Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 | /* * linux/arch/arm/mm/cache-v4.S * * Copyright (C) 1997-2002 Russell king * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/hardware.h> #include <asm/page.h> #include "proc-macros.S" /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. * * - mm - mm_struct describing address space */ ENTRY(v4_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(v4_flush_kern_cache_all) #ifdef CPU_CP15 mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache mov pc, lr #else /* FALLTHROUGH */ #endif /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vma_area_struct flags describing address space */ ENTRY(v4_flush_user_cache_range) #ifdef CPU_CP15 mov ip, #0 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache mov pc, lr #else /* FALLTHROUGH */ #endif /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_coherent_user_range) mov pc, lr /* * flush_kern_dcache_page(void *page) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - page aligned address */ ENTRY(v4_flush_kern_dcache_page) /* FALLTHROUGH */ /* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_dma_inv_range) /* FALLTHROUGH */ /* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_dma_flush_range) #ifdef CPU_CP15 mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif /* FALLTHROUGH */ /* * dma_clean_range(start, end) * * Clean (write back) the specified virtual address range. * * - start - virtual start address * - end - virtual end address */ ENTRY(v4_dma_clean_range) mov pc, lr __INITDATA .type v4_cache_fns, #object ENTRY(v4_cache_fns) .long v4_flush_kern_cache_all .long v4_flush_user_cache_all .long v4_flush_user_cache_range .long v4_coherent_kern_range .long v4_coherent_user_range .long v4_flush_kern_dcache_page .long v4_dma_inv_range .long v4_dma_clean_range .long v4_dma_flush_range .size v4_cache_fns, . - v4_cache_fns |