Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 | /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #ifndef _ASM_PAGE_H #define _ASM_PAGE_H #include <spaces.h> #include <linux/const.h> #include <linux/kernel.h> #include <asm/mipsregs.h> /* * PAGE_SHIFT determines the page size */ #ifdef CONFIG_PAGE_SIZE_4KB #define PAGE_SHIFT 12 #endif #ifdef CONFIG_PAGE_SIZE_8KB #define PAGE_SHIFT 13 #endif #ifdef CONFIG_PAGE_SIZE_16KB #define PAGE_SHIFT 14 #endif #ifdef CONFIG_PAGE_SIZE_32KB #define PAGE_SHIFT 15 #endif #ifdef CONFIG_PAGE_SIZE_64KB #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) /* * This is used for calculating the real page sizes * for FTLB or VTLB + FTLB configurations. */ static inline unsigned int page_size_ftlb(unsigned int mmuextdef) { switch (mmuextdef) { case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: if (PAGE_SIZE == (1 << 30)) return 5; if (PAGE_SIZE == (1llu << 32)) return 6; if (PAGE_SIZE > (256 << 10)) return 7; /* reserved */ fallthrough; case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT: return (PAGE_SHIFT - 10) / 2; default: panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n", mmuextdef >> 14); } } #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */ #define HPAGE_SHIFT ({BUILD_BUG(); 0; }) #define HPAGE_SIZE ({BUILD_BUG(); 0; }) #define HPAGE_MASK ({BUILD_BUG(); 0; }) #define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; }) #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #include <linux/pfn.h> extern void build_clear_page(void); extern void build_copy_page(void); /* * It's normally defined only for FLATMEM config but it's * used in our early mem init code for all memory models. * So always define it. */ #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET extern unsigned long ARCH_PFN_OFFSET; # define ARCH_PFN_OFFSET ARCH_PFN_OFFSET #else # define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET) #endif extern void clear_page(void * page); extern void copy_page(void * to, void * from); extern unsigned long shm_align_mask; static inline unsigned long pages_do_alias(unsigned long addr1, unsigned long addr2) { return (addr1 ^ addr2) & shm_align_mask; } struct page; static inline void clear_user_page(void *addr, unsigned long vaddr, struct page *page) { extern void (*flush_data_cache_page)(unsigned long addr); clear_page(addr); if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) flush_data_cache_page((unsigned long)addr); } struct vm_area_struct; extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE /* * These are used to make use of C type-checking.. */ #ifdef CONFIG_PHYS_ADDR_T_64BIT #ifdef CONFIG_CPU_MIPS32 typedef struct { unsigned long pte_low, pte_high; } pte_t; #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) #else typedef struct { unsigned long long pte; } pte_t; #define pte_val(x) ((x).pte) #define __pte(x) ((pte_t) { (x) } ) #endif #else typedef struct { unsigned long pte; } pte_t; #define pte_val(x) ((x).pte) #define __pte(x) ((pte_t) { (x) } ) #endif typedef struct page *pgtable_t; /* * Right now we don't support 4-level pagetables, so all pud-related * definitions come from <asm-generic/pgtable-nopud.h>. */ /* * Finall the top of the hierarchy, the pgd */ typedef struct { unsigned long pgd; } pgd_t; #define pgd_val(x) ((x).pgd) #define __pgd(x) ((pgd_t) { (x) } ) /* * Manipulate page protection bits */ typedef struct { unsigned long pgprot; } pgprot_t; #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) #define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK) /* * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd * pair of pages we only have a single global bit per pair of pages. When * writing to the TLB make sure we always have the bit set for both pages * or none. This macro is used to access the `buddy' of the pte we're just * working on. */ #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) /* * __pa()/__va() should be used only during mem init. */ static inline unsigned long ___pa(unsigned long x) { if (IS_ENABLED(CONFIG_64BIT)) { /* * For MIPS64 the virtual address may either be in one of * the compatibility segements ckseg0 or ckseg1, or it may * be in xkphys. */ return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x); } if (!IS_ENABLED(CONFIG_EVA)) { /* * We're using the standard MIPS32 legacy memory map, ie. * the address x is going to be in kseg0 or kseg1. We can * handle either case by masking out the desired bits using * CPHYSADDR. */ return CPHYSADDR(x); } /* * EVA is in use so the memory map could be anything, making it not * safe to just mask out bits. */ return x - PAGE_OFFSET + PHYS_OFFSET; } #define __pa(x) ___pa((unsigned long)(x)) #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) #include <asm/io.h> /* * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The * discussion can be found in * https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com * * It is unclear if the misscompilations mentioned in * https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com * also affect MIPS so we keep this one until GCC 3.x has been retired * before we can apply https://patchwork.linux-mips.org/patch/1541/ */ #define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #ifdef CONFIG_DEBUG_VIRTUAL extern phys_addr_t __phys_addr_symbol(unsigned long x); #else #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) #endif #ifndef __pa_symbol #define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x)) #endif #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #ifdef CONFIG_FLATMEM static inline int pfn_valid(unsigned long pfn) { /* avoid <linux/mm.h> include hell */ extern unsigned long max_mapnr; unsigned long pfn_offset = ARCH_PFN_OFFSET; return pfn >= pfn_offset && pfn < max_mapnr; } #elif defined(CONFIG_SPARSEMEM) /* pfn_valid is defined in linux/mmzone.h */ #elif defined(CONFIG_NUMA) #define pfn_valid(pfn) \ ({ \ unsigned long __pfn = (pfn); \ int __n = pfn_to_nid(__pfn); \ ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \ NODE_DATA(__n)->node_spanned_pages) \ : 0); \ }) #endif #define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr))) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) extern bool __virt_addr_valid(const volatile void *kaddr); #define virt_addr_valid(kaddr) \ __virt_addr_valid((const volatile void *) (kaddr)) #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC extern unsigned long __kaslr_offset; static inline unsigned long kaslr_offset(void) { return __kaslr_offset; } #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> #endif /* _ASM_PAGE_H */ |