Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/mmu_context.h * * Copyright (C) 1996 Russell King. * * Changelog: * 27-06-1996 RMK Created */ #ifndef __ASM_ARM_MMU_CONTEXT_H #define __ASM_ARM_MMU_CONTEXT_H #include <linux/compiler.h> #include <linux/sched.h> #include <linux/mm_types.h> #include <linux/preempt.h> #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/proc-fns.h> #include <asm/smp_plat.h> #include <asm-generic/mm_hooks.h> void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_MMU static inline void check_vmalloc_seq(struct mm_struct *mm) { if (!IS_ENABLED(CONFIG_ARM_LPAE) && unlikely(atomic_read(&mm->context.vmalloc_seq) != atomic_read(&init_mm.context.vmalloc_seq))) __check_vmalloc_seq(mm); } #endif #ifdef CONFIG_CPU_HAS_ASID void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context init_new_context static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { atomic64_set(&mm->context.id, 0); return 0; } #ifdef CONFIG_ARM_ERRATA_798181 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, cpumask_t *mask); #else /* !CONFIG_ARM_ERRATA_798181 */ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, cpumask_t *mask) { } #endif /* CONFIG_ARM_ERRATA_798181 */ #else /* !CONFIG_CPU_HAS_ASID */ #ifdef CONFIG_MMU static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { check_vmalloc_seq(mm); if (irqs_disabled()) /* * cpu_switch_mm() needs to flush the VIVT caches. To avoid * high interrupt latencies, defer the call and continue * running with the old mm. Since we only support UP systems * on non-ASID CPUs, the old mm will remain valid until the * finish_arch_post_lock_switch() call. */ mm->context.switch_pending = 1; else cpu_switch_mm(mm->pgd, mm); } #ifndef MODULE #define finish_arch_post_lock_switch \ finish_arch_post_lock_switch static inline void finish_arch_post_lock_switch(void) { struct mm_struct *mm = current->mm; if (mm && mm->context.switch_pending) { /* * Preemption must be disabled during cpu_switch_mm() as we * have some stateful cache flush implementations. Check * switch_pending again in case we were preempted and the * switch to this mm was already done. */ preempt_disable(); if (mm->context.switch_pending) { mm->context.switch_pending = 0; cpu_switch_mm(mm->pgd, mm); } preempt_enable_no_resched(); } } #endif /* !MODULE */ #endif /* CONFIG_MMU */ #endif /* CONFIG_CPU_HAS_ASID */ #define activate_mm(prev,next) switch_mm(prev, next, NULL) /* * This is the actual mm switch as far as the scheduler * is concerned. No registers are touched. We avoid * calling the CPU specific function when the mm hasn't * actually changed. */ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { #ifdef CONFIG_MMU unsigned int cpu = smp_processor_id(); /* * __sync_icache_dcache doesn't broadcast the I-cache invalidation, * so check for possible thread migration and invalidate the I-cache * if we're new to this CPU. */ if (cache_ops_need_broadcast() && !cpumask_empty(mm_cpumask(next)) && !cpumask_test_cpu(cpu, mm_cpumask(next))) __flush_icache_all(); if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { check_and_switch_context(next, tsk); if (cache_is_vivt()) cpumask_clear_cpu(cpu, mm_cpumask(prev)); } #endif } #ifdef CONFIG_VMAP_STACK static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { if (mm != &init_mm) check_vmalloc_seq(mm); } #define enter_lazy_tlb enter_lazy_tlb #endif #include <asm-generic/mmu_context.h> #endif |