Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | /* * Based on arch/arm/include/asm/mmu_context.h * * Copyright (C) 1996 Russell King. * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __ASM_MMU_CONTEXT_H #define __ASM_MMU_CONTEXT_H #include <linux/compiler.h> #include <linux/sched.h> #include <asm/cacheflush.h> #include <asm/proc-fns.h> #include <asm-generic/mm_hooks.h> #include <asm/cputype.h> #include <asm/pgtable.h> #define MAX_ASID_BITS 16 extern unsigned int cpu_last_asid; void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); #ifdef CONFIG_PID_IN_CONTEXTIDR static inline void contextidr_thread_switch(struct task_struct *next) { asm( " msr contextidr_el1, %0\n" " isb" : : "r" (task_pid_nr(next))); } #else static inline void contextidr_thread_switch(struct task_struct *next) { } #endif /* * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. */ static inline void cpu_set_reserved_ttbr0(void) { unsigned long ttbr = page_to_phys(empty_zero_page); asm( " msr ttbr0_el1, %0 // set TTBR0\n" " isb" : : "r" (ttbr)); } static inline void switch_new_context(struct mm_struct *mm) { unsigned long flags; __new_context(mm); local_irq_save(flags); cpu_switch_mm(mm->pgd, mm); local_irq_restore(flags); } static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { /* * Required during context switch to avoid speculative page table * walking with the wrong TTBR. */ cpu_set_reserved_ttbr0(); if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) /* * The ASID is from the current generation, just switch to the * new pgd. This condition is only true for calls from * context_switch() and interrupts are already disabled. */ cpu_switch_mm(mm->pgd, mm); else if (irqs_disabled()) /* * Defer the new ASID allocation until after the context * switch critical region since __new_context() cannot be * called with interrupts disabled. */ set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); else /* * That is a direct call to switch_mm() or activate_mm() with * interrupts enabled and a new context. */ switch_new_context(mm); } #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) #define destroy_context(mm) do { } while(0) #define finish_arch_post_lock_switch \ finish_arch_post_lock_switch static inline void finish_arch_post_lock_switch(void) { if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { struct mm_struct *mm = current->mm; unsigned long flags; __new_context(mm); local_irq_save(flags); cpu_switch_mm(mm->pgd, mm); local_irq_restore(flags); } } /* * This is called when "tsk" is about to enter lazy TLB mode. * * mm: describes the currently active mm context * tsk: task which is entering lazy tlb * cpu: cpu number which is entering lazy tlb * * tsk->mm will be NULL */ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } /* * This is the actual mm switch as far as the scheduler * is concerned. No registers are touched. We avoid * calling the CPU specific function when the mm hasn't * actually changed. */ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned int cpu = smp_processor_id(); #ifdef CONFIG_SMP /* check for possible thread migration */ if (!cpumask_empty(mm_cpumask(next)) && !cpumask_test_cpu(cpu, mm_cpumask(next))) __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) check_and_switch_context(next, tsk); } #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, NULL) #endif |