Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 | // SPDX-License-Identifier: GPL-2.0 /* * PPC Huge TLB Page Support for Book3E MMU * * Copyright (C) 2009 David Gibson, IBM Corporation. * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor * */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <asm/mmu.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> static inline int tlb1_next(void) { struct paca_struct *paca = get_paca(); struct tlb_core_data *tcd; int this, next; tcd = paca->tcd_ptr; this = tcd->esel_next; next = this + 1; if (next >= tcd->esel_max) next = tcd->esel_first; tcd->esel_next = next; return this; } static inline void book3e_tlb_lock(void) { struct paca_struct *paca = get_paca(); unsigned long tmp; int token = smp_processor_id() + 1; /* * Besides being unnecessary in the absence of SMT, this * check prevents trying to do lbarx/stbcx. on e5500 which * doesn't implement either feature. */ if (!cpu_has_feature(CPU_FTR_SMT)) return; asm volatile(".machine push;" ".machine e6500;" "1: lbarx %0, 0, %1;" "cmpwi %0, 0;" "bne 2f;" "stbcx. %2, 0, %1;" "bne 1b;" "b 3f;" "2: lbzx %0, 0, %1;" "cmpwi %0, 0;" "bne 2b;" "b 1b;" "3:" ".machine pop;" : "=&r" (tmp) : "r" (&paca->tcd_ptr->lock), "r" (token) : "memory"); } static inline void book3e_tlb_unlock(void) { struct paca_struct *paca = get_paca(); if (!cpu_has_feature(CPU_FTR_SMT)) return; isync(); paca->tcd_ptr->lock = 0; } #else static inline int tlb1_next(void) { int index, ncams; ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; index = this_cpu_read(next_tlbcam_idx); /* Just round-robin the entries and wrap when we hit the end */ if (unlikely(index == ncams - 1)) __this_cpu_write(next_tlbcam_idx, tlbcam_index); else __this_cpu_inc(next_tlbcam_idx); return index; } static inline void book3e_tlb_lock(void) { } static inline void book3e_tlb_unlock(void) { } #endif static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) { int found = 0; mtspr(SPRN_MAS6, pid << 16); asm volatile( "tlbsx 0,%1\n" "mfspr %0,0x271\n" "srwi %0,%0,31\n" : "=&r"(found) : "r"(ea)); return found; } static void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) { unsigned long mas1, mas2; u64 mas7_3; unsigned long psize, tsize, shift; unsigned long flags; struct mm_struct *mm; int index; if (unlikely(is_kernel_addr(ea))) return; mm = vma->vm_mm; psize = vma_mmu_pagesize(vma); shift = __ilog2(psize); tsize = shift - 10; /* * We can't be interrupted while we're setting up the MAS * registers or after we've confirmed that no tlb exists. */ local_irq_save(flags); book3e_tlb_lock(); if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { book3e_tlb_unlock(); local_irq_restore(flags); return; } /* We have to use the CAM(TLB1) on FSL parts for hugepages */ index = tlb1_next(); mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); mas2 = ea & ~((1UL << shift) - 1); mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; if (!pte_dirty(pte)) mas7_3 &= ~(MAS3_SW|MAS3_UW); mtspr(SPRN_MAS1, mas1); mtspr(SPRN_MAS2, mas2); if (mmu_has_feature(MMU_FTR_BIG_PHYS)) mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); asm volatile ("tlbwe"); book3e_tlb_unlock(); local_irq_restore(flags); } /* * This is called at the end of handling a user page fault, when the * fault has been handled by updating a PTE in the linux page tables. * * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { if (is_vm_hugetlb_page(vma)) book3e_hugetlb_preload(vma, address, *ptep); } void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { struct hstate *hstate = hstate_file(vma->vm_file); unsigned long tsize = huge_page_shift(hstate) - 10; __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); } |