Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _M68K_PGTABLE_H #define _M68K_PGTABLE_H #if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE) #include <asm-generic/pgtable-nopmd.h> #else #include <asm-generic/pgtable-nopud.h> #endif #include <asm/setup.h> #ifndef __ASSEMBLY__ #include <asm/processor.h> #include <linux/sched.h> #include <linux/threads.h> /* * This file contains the functions and defines necessary to modify and use * the m68k page table tree. */ #include <asm/virtconvert.h> /* Certain architectures need to do special things when pte's * within a page table are directly modified. Thus, the following * hook is made available. */ #define set_pte(pteptr, pteval) \ do{ \ *(pteptr) = (pteval); \ } while(0) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* PMD_SHIFT determines the size of the area a second-level page table can map */ #if CONFIG_PGTABLE_LEVELS == 3 #define PMD_SHIFT 18 #endif #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) /* PGDIR_SHIFT determines what a third-level page table entry can map */ #ifdef CONFIG_SUN3 #define PGDIR_SHIFT 17 #elif defined(CONFIG_COLDFIRE) #define PGDIR_SHIFT 22 #else #define PGDIR_SHIFT 25 #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) /* * entries per page directory level: the m68k is configured as three-level, * so we do have PMD level physically. */ #ifdef CONFIG_SUN3 #define PTRS_PER_PTE 16 #define __PAGETABLE_PMD_FOLDED 1 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 #elif defined(CONFIG_COLDFIRE) #define PTRS_PER_PTE 512 #define __PAGETABLE_PMD_FOLDED 1 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 #else #define PTRS_PER_PTE 64 #define PTRS_PER_PMD 128 #define PTRS_PER_PGD 128 #endif #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) /* Virtual address region for use by kernel_map() */ #ifdef CONFIG_SUN3 #define KMAP_START 0x0dc00000 #define KMAP_END 0x0e000000 #elif defined(CONFIG_COLDFIRE) #define KMAP_START 0xe0000000 #define KMAP_END 0xf0000000 #elif defined(CONFIG_VIRT) #define KMAP_START 0xdf000000 #define KMAP_END 0xff000000 #else #define KMAP_START 0xd0000000 #define KMAP_END 0xf0000000 #endif #ifdef CONFIG_SUN3 extern unsigned long m68k_vmalloc_end; #define VMALLOC_START 0x0f800000 #define VMALLOC_END m68k_vmalloc_end #elif defined(CONFIG_COLDFIRE) #define VMALLOC_START 0xd0000000 #define VMALLOC_END 0xe0000000 #elif defined(CONFIG_VIRT) #define VMALLOC_OFFSET PAGE_SIZE #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END KMAP_START #else /* Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the * physical memory until the kernel virtual memory starts. That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END KMAP_START #endif /* zero page used for uninitialized stuff */ extern void *empty_zero_page; /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) /* number of bits that fit into a memory pointer */ #define BITS_PER_PTR (8*sizeof(unsigned long)) /* to align the pointer to a pointer address */ #define PTR_MASK (~(sizeof(void*)-1)) /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ /* 64-bit machines, beware! SRB. */ #define SIZEOF_PTR_LOG2 2 extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode); /* * The m68k doesn't have any external MMU info: the kernel page * tables contain all the necessary information. The Sun3 does, but * they are updated on demand. */ static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { } #endif /* !__ASSEMBLY__ */ /* MMU-specific headers */ #ifdef CONFIG_SUN3 #include <asm/sun3_pgtable.h> #elif defined(CONFIG_COLDFIRE) #include <asm/mcf_pgtable.h> #else #include <asm/motorola_pgtable.h> #endif #ifndef __ASSEMBLY__ /* * Macro to mark a page protection value as "uncacheable". */ #ifdef CONFIG_COLDFIRE # define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE)) #else #ifdef SUN3_PAGE_NOCACHE # define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE #else # define __SUN3_PAGE_NOCACHE 0 #endif #define pgprot_noncached(prot) \ (MMU_IS_SUN3 \ ? (__pgprot(pgprot_val(prot) | __SUN3_PAGE_NOCACHE)) \ : ((MMU_IS_851 || MMU_IS_030) \ ? (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE030)) \ : (MMU_IS_040 || MMU_IS_060) \ ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ : (prot))) pgprot_t pgprot_dmacoherent(pgprot_t prot); #define pgprot_dmacoherent(prot) pgprot_dmacoherent(prot) #endif /* CONFIG_COLDFIRE */ #endif /* !__ASSEMBLY__ */ #endif /* _M68K_PGTABLE_H */ |