Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 | /* * linux/include/asm-arm/proc-armv/cache.h * * Copyright (C) 1999-2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <asm/mman.h> /* * This flag is used to indicate that the page pointed to by a pte * is dirty and requires cleaning before returning it to the user. */ #define PG_dcache_dirty PG_arch_1 /* * Cache handling for 32-bit ARM processors. * * Note that on ARM, we have a more accurate specification than that * Linux's "flush". We therefore do not use "flush" here, but instead * use: * * clean: the act of pushing dirty cache entries out to memory. * invalidate: the act of discarding data held within the cache, * whether it is dirty or not. */ /* * Generic I + D cache */ #define flush_cache_all() \ do { \ cpu_cache_clean_invalidate_all(); \ } while (0) /* This is always called for current->mm */ #define flush_cache_mm(_mm) \ do { \ if ((_mm) == current->active_mm) \ cpu_cache_clean_invalidate_all(); \ } while (0) #define flush_cache_range(_mm,_start,_end) \ do { \ if ((_mm) == current->mm) \ cpu_cache_clean_invalidate_range((_start), (_end), 1); \ } while (0) #define flush_cache_page(_vma,_vmaddr) \ do { \ if ((_vma)->vm_mm == current->mm) { \ cpu_cache_clean_invalidate_range((_vmaddr), \ (_vmaddr) + PAGE_SIZE, \ ((_vma)->vm_flags & VM_EXEC)); \ } \ } while (0) /* * This flushes back any buffered write data. We have to clean the entries * in the cache for this page. This does not invalidate either I or D caches. * * Called from: * 1. mm/filemap.c:filemap_nopage * 2. mm/filemap.c:filemap_nopage * [via do_no_page - ok] * * 3. mm/memory.c:break_cow * [copy_cow_page doesn't do anything to the cache; insufficient cache * handling. Need to add flush_dcache_page() here] * * 4. mm/memory.c:do_swap_page * [read_swap_cache_async doesn't do anything to the cache: insufficient * cache handling. Need to add flush_dcache_page() here] * * 5. mm/memory.c:do_anonymous_page * [zero page, never written by kernel - ok] * * 6. mm/memory.c:do_no_page * [we will be calling update_mmu_cache, which will catch on PG_dcache_dirty] * * 7. mm/shmem.c:shmem_nopage * 8. mm/shmem.c:shmem_nopage * [via do_no_page - ok] * * 9. fs/exec.c:put_dirty_page * [we call flush_dcache_page prior to this, which will flush out the * kernel virtual addresses from the dcache - ok] */ static __inline__ void flush_page_to_ram(struct page *page) { cpu_flush_ram_page(page_address(page)); } /* * D cache only */ #define invalidate_dcache_range(_s,_e) cpu_dcache_invalidate_range((_s),(_e)) #define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e)) #define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0) /* * flush_dcache_page is used when the kernel has written to the page * cache page at virtual address page->virtual. * * If this page isn't mapped (ie, page->mapping = NULL), or it has * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared) * then we _must_ always clean + invalidate the dcache entries associated * with the kernel mapping. * * Otherwise we can defer the operation, and clean the cache when we are * about to change to user space. This is the same method as used on SPARC64. * See update_mmu_cache for the user space part. */ static inline void flush_dcache_page(struct page *page) { if (page->mapping && !(page->mapping->i_mmap) && !(page->mapping->i_mmap_shared)) set_bit(PG_dcache_dirty, &page->flags); else { unsigned long virt = (unsigned long)page_address(page); cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0); } } #define clean_dcache_entry(_s) cpu_dcache_clean_entry((unsigned long)(_s)) /* * I cache only */ #define flush_icache_range(_s,_e) \ do { \ cpu_icache_invalidate_range((_s), (_e)); \ } while (0) /* * This function is misnamed IMHO. There are three places where it * is called, each of which is preceded immediately by a call to * flush_page_to_ram: * * 1. kernel/ptrace.c:access_one_page * called after we have written to the kernel view of a user page. * The user page has been expundged from the cache by flush_cache_page. * [we don't need to do anything here if we add a call to * flush_dcache_page] * * 2. mm/memory.c:do_swap_page * called after we have (possibly) written to the kernel view of a * user page, which has previously been removed (ie, has been through * the swap cache). * [if the flush_page_to_ram() conditions are satisfied, then ok] * * 3. mm/memory.c:do_no_page * [if the flush_page_to_ram() conditions are satisfied, then ok] * * Invalidating the icache at the kernels virtual page isn't really * going to do us much good, since we wouldn't have executed any * instructions there. */ #define flush_icache_page(vma,pg) do { } while (0) /* * Old ARM MEMC stuff. This supports the reversed mapping handling that * we have on the older 26-bit machines. We don't have a MEMC chip, so... */ #define memc_update_all() do { } while (0) #define memc_update_mm(mm) do { } while (0) #define memc_update_addr(mm,pte,log) do { } while (0) #define memc_clear(mm,physaddr) do { } while (0) /* * TLB flushing. * * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes TLB for specified page * - flush_tlb_range(mm, start, end) flushes TLB for specified range of pages * * We drain the write buffer in here to ensure that the page tables in ram * are really up to date. It is more efficient to do this here... */ /* * Notes: * current->active_mm is the currently active memory description. * current->mm == NULL iff we are lazy. */ #define flush_tlb_all() \ do { \ cpu_tlb_invalidate_all(); \ } while (0) /* * Flush all user virtual address space translations described by `_mm'. * * Currently, this is always called for current->mm, which should be * the same as current->active_mm. This is currently not be called for * the lazy TLB case. */ #define flush_tlb_mm(_mm) \ do { \ if ((_mm) == current->active_mm) \ cpu_tlb_invalidate_all(); \ } while (0) /* * Flush the specified range of user virtual address space translations. * * _mm may not be current->active_mm, but may not be NULL. */ #define flush_tlb_range(_mm,_start,_end) \ do { \ if ((_mm) == current->active_mm) \ cpu_tlb_invalidate_range((_start), (_end)); \ } while (0) /* * Flush the specified user virtual address space translation. */ #define flush_tlb_page(_vma,_page) \ do { \ if ((_vma)->vm_mm == current->active_mm) \ cpu_tlb_invalidate_page((_page), \ ((_vma)->vm_flags & VM_EXEC)); \ } while (0) /* * if PG_dcache_dirty is set for the page, we need to ensure that any * cache entries for the kernels virtual memory range are written * back to the page. */ extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); |