Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 | #ifndef _ASM_X86_TLBFLUSH_H #define _ASM_X86_TLBFLUSH_H #include <linux/mm.h> #include <linux/sched.h> #include <asm/processor.h> #include <asm/special_insns.h> #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #else #define __flush_tlb() __native_flush_tlb() #define __flush_tlb_global() __native_flush_tlb_global() #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) #endif static inline void __native_flush_tlb(void) { native_write_cr3(native_read_cr3()); } static inline void __native_flush_tlb_global_irq_disabled(void) { unsigned long cr4; cr4 = native_read_cr4(); /* clear PGE */ native_write_cr4(cr4 & ~X86_CR4_PGE); /* write old PGE again and flush TLBs */ native_write_cr4(cr4); } static inline void __native_flush_tlb_global(void) { unsigned long flags; /* * Read-modify-write to CR4 - protect it from preemption and * from interrupts. (Use the raw variant because this code can * be called from deep inside debugging code.) */ raw_local_irq_save(flags); __native_flush_tlb_global_irq_disabled(); raw_local_irq_restore(flags); } static inline void __native_flush_tlb_single(unsigned long addr) { asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } static inline void __flush_tlb_all(void) { if (cpu_has_pge) __flush_tlb_global(); else __flush_tlb(); } static inline void __flush_tlb_one(unsigned long addr) { count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } #define TLB_FLUSH_ALL -1UL /* * TLB flushing: * * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */ #ifndef CONFIG_SMP /* "_up" is for UniProcessor. * * This is a helper for other header functions. *Not* intended to be called * directly. All global TLB flushes need to either call this, or to bump the * vm statistics themselves. */ static inline void __flush_tlb_up(void) { count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); } static inline void flush_tlb_all(void) { count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb_all(); } static inline void flush_tlb(void) { __flush_tlb_up(); } static inline void local_flush_tlb(void) { __flush_tlb_up(); } static inline void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) __flush_tlb_up(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb_up(); } static inline void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { if (mm == current->active_mm) __flush_tlb_up(); } static inline void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { } static inline void reset_lazy_tlbstate(void) { } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { flush_tlb_all(); } #else /* SMP */ #include <asm/smp.h> #define local_flush_tlb() __flush_tlb() #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) #define flush_tlb_range(vma, start, end) \ flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) extern void flush_tlb_all(void); extern void flush_tlb_current_task(void); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #define flush_tlb() flush_tlb_current_task() void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end); #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 struct tlb_state { struct mm_struct *active_mm; int state; }; DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); static inline void reset_lazy_tlbstate(void) { this_cpu_write(cpu_tlbstate.state, 0); this_cpu_write(cpu_tlbstate.active_mm, &init_mm); } #endif /* SMP */ #ifndef CONFIG_PARAVIRT #define flush_tlb_others(mask, mm, start, end) \ native_flush_tlb_others(mask, mm, start, end) #endif #endif /* _ASM_X86_TLBFLUSH_H */ |