Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | #ifndef _SPARC64_TLBFLUSH_H #define _SPARC64_TLBFLUSH_H #include <linux/config.h> #include <linux/mm.h> #include <asm/mmu_context.h> /* TLB flush operations. */ extern void __flush_tlb_all(void); extern void __flush_tlb_mm(unsigned long context, unsigned long r); extern void __flush_tlb_range(unsigned long context, unsigned long start, unsigned long r, unsigned long end, unsigned long pgsz, unsigned long size); extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP #define flush_tlb_all() __flush_tlb_all() #define flush_tlb_kernel_range(start,end) \ __flush_tlb_kernel_range(start,end) #define flush_tlb_mm(__mm) \ do { if (CTX_VALID((__mm)->context)) \ __flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \ } while (0) #define flush_tlb_range(__vma, start, end) \ do { if (CTX_VALID((__vma)->vm_mm->context)) { \ unsigned long __start = (start)&PAGE_MASK; \ unsigned long __end = PAGE_ALIGN(end); \ __flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \ SECONDARY_CONTEXT, __end, PAGE_SIZE, \ (__end - __start)); \ } \ } while (0) #define flush_tlb_vpte_range(__mm, start, end) \ do { if (CTX_VALID((__mm)->context)) { \ unsigned long __start = (start)&PAGE_MASK; \ unsigned long __end = PAGE_ALIGN(end); \ __flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \ SECONDARY_CONTEXT, __end, PAGE_SIZE, \ (__end - __start)); \ } \ } while (0) #define flush_tlb_page(vma, page) \ do { struct mm_struct *__mm = (vma)->vm_mm; \ if (CTX_VALID(__mm->context)) \ __flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \ SECONDARY_CONTEXT); \ } while (0) #define flush_tlb_vpte_page(mm, addr) \ do { struct mm_struct *__mm = (mm); \ if (CTX_VALID(__mm->context)) \ __flush_tlb_page(CTX_HWBITS(__mm->context), (addr)&PAGE_MASK, \ SECONDARY_CONTEXT); \ } while (0) #else /* CONFIG_SMP */ extern void smp_flush_tlb_all(void); extern void smp_flush_tlb_mm(struct mm_struct *mm); extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end); extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page); #define flush_tlb_all() smp_flush_tlb_all() #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm) #define flush_tlb_range(vma, start, end) \ smp_flush_tlb_range((vma)->vm_mm, start, end) #define flush_tlb_vpte_range(mm, start, end) \ smp_flush_tlb_range(mm, start, end) #define flush_tlb_kernel_range(start, end) \ smp_flush_tlb_kernel_range(start, end) #define flush_tlb_page(vma, page) \ smp_flush_tlb_page((vma)->vm_mm, page) #define flush_tlb_vpte_page(mm, page) \ smp_flush_tlb_page((mm), page) #endif /* ! CONFIG_SMP */ static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { /* Note the signed type. */ long s = start, e = end, vpte_base; /* Nobody should call us with start below VM hole and end above. See if it is really true. */ BUG_ON(s > e); #if 0 /* Currently free_pgtables guarantees this. */ s &= PMD_MASK; e = (e + PMD_SIZE - 1) & PMD_MASK; #endif vpte_base = (tlb_type == spitfire ? VPTE_BASE_SPITFIRE : VPTE_BASE_CHEETAH); flush_tlb_vpte_range(mm, vpte_base + (s >> (PAGE_SHIFT - 3)), vpte_base + (e >> (PAGE_SHIFT - 3))); } #endif /* _SPARC64_TLBFLUSH_H */ |