Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 | /* * linux/include/asm-arm/pgalloc.h */ #ifndef _ASMARM_PGALLOC_H #define _ASMARM_PGALLOC_H #include <linux/config.h> #include <linux/threads.h> #include <asm/processor.h> /* * Get the cache handling stuff now. */ #include <asm/proc/cache.h> extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { } /* * Page table cache stuff */ #ifndef CONFIG_NO_PGT_CACHE #ifdef CONFIG_SMP #error Pgtable caches have to be per-CPU, so that no locking is needed. #endif /* CONFIG_SMP */ extern struct pgtable_cache_struct { unsigned long *pgd_cache; unsigned long *pte_cache; unsigned long pgtable_cache_sz; } quicklists; #define pgd_quicklist (quicklists.pgd_cache) #define pmd_quicklist ((unsigned long *)0) #define pte_quicklist (quicklists.pte_cache) #define pgtable_cache_size (quicklists.pgtable_cache_sz) /* used for quicklists */ #define __pgd_next(pgd) (((unsigned long *)pgd)[1]) #define __pte_next(pte) (((unsigned long *)pte)[0]) extern __inline__ pgd_t *get_pgd_fast(void) { unsigned long *ret; if ((ret = pgd_quicklist) != NULL) { pgd_quicklist = (unsigned long *)__pgd_next(ret); ret[1] = ret[2]; clean_cache_area(ret + 1, 4); pgtable_cache_size--; } return (pgd_t *)ret; } extern __inline__ void free_pgd_fast(pgd_t *pgd) { __pgd_next(pgd) = (unsigned long) pgd_quicklist; pgd_quicklist = (unsigned long *) pgd; pgtable_cache_size++; } /* We don't use pmd cache, so this is a dummy routine */ #define get_pmd_fast() ((pmd_t *)0) extern __inline__ void free_pmd_fast(pmd_t *pmd) { } extern __inline__ pte_t *get_pte_fast(void) { unsigned long *ret; if((ret = pte_quicklist) != NULL) { pte_quicklist = (unsigned long *)__pte_next(ret); ret[0] = ret[1]; clean_cache_area(ret, 4); pgtable_cache_size--; } return (pte_t *)ret; } extern __inline__ void free_pte_fast(pte_t *pte) { __pte_next(pte) = (unsigned long) pte_quicklist; pte_quicklist = (unsigned long *) pte; pgtable_cache_size++; } #else /* CONFIG_NO_PGT_CACHE */ #define pgd_quicklist ((unsigned long *)0) #define pmd_quicklist ((unsigned long *)0) #define pte_quicklist ((unsigned long *)0) #define get_pgd_fast() ((pgd_t *)0) #define get_pmd_fast() ((pmd_t *)0) #define get_pte_fast() ((pte_t *)0) #define free_pgd_fast(pgd) free_pgd_slow(pgd) #define free_pmd_fast(pmd) free_pmd_slow(pmd) #define free_pte_fast(pte) free_pte_slow(pte) #endif /* CONFIG_NO_PGT_CACHE */ extern pgd_t *get_pgd_slow(void); extern void free_pgd_slow(pgd_t *pgd); #define free_pmd_slow(pmd) do { } while (0) extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long addr_preadjusted); extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long addr_preadjusted); extern void free_pte_slow(pte_t *pte); /* * Allocate and free page tables. The xxx_kernel() versions are * used to allocate a kernel page table - this turns on ASN bits * if any. */ #define pte_free_kernel(pte) free_pte_fast(pte) #define pte_free(pte) free_pte_fast(pte) #ifndef pte_alloc_kernel extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address) { address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); if (pmd_none(*pmd)) { pte_t *page = (pte_t *) get_pte_fast(); if (!page) return get_pte_kernel_slow(pmd, address); set_pmd(pmd, mk_kernel_pmd(page)); return page + address; } if (pmd_bad(*pmd)) { __handle_bad_pmd_kernel(pmd); return NULL; } return (pte_t *) pmd_page(*pmd) + address; } #endif extern __inline__ pte_t *pte_alloc(pmd_t * pmd, unsigned long address) { address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); if (pmd_none(*pmd)) { pte_t *page = (pte_t *) get_pte_fast(); if (!page) return get_pte_slow(pmd, address); set_pmd(pmd, mk_user_pmd(page)); return page + address; } if (pmd_bad(*pmd)) { __handle_bad_pmd(pmd); return NULL; } return (pte_t *) pmd_page(*pmd) + address; } #define pmd_free_kernel pmd_free #define pmd_free(pmd) do { } while (0) #define pmd_alloc_kernel pmd_alloc extern __inline__ pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address) { return (pmd_t *) pgd; } #define pgd_free(pgd) free_pgd_fast(pgd) extern __inline__ pgd_t *pgd_alloc(void) { pgd_t *pgd; pgd = get_pgd_fast(); if (!pgd) pgd = get_pgd_slow(); return pgd; } extern int do_check_pgt_cache(int, int); extern __inline__ void set_pgdir(unsigned long address, pgd_t entry) { struct task_struct * p; read_lock(&tasklist_lock); for_each_task(p) { if (!p->mm) continue; *pgd_offset(p->mm,address) = entry; } read_unlock(&tasklist_lock); #ifndef CONFIG_NO_PGT_CACHE { pgd_t *pgd; for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)__pgd_next(pgd)) pgd[address >> PGDIR_SHIFT] = entry; } #endif } #endif |