Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Cache maintenance * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. */ #include <linux/errno.h> #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/cpufeature.h> #include <asm/alternative.h> #include <asm/asm-uaccess.h> /* * flush_icache_range(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, * and will be executed. * * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(__flush_icache_range) /* FALLTHROUGH */ /* * __flush_cache_user_range(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, * and will be executed. * * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(__flush_cache_user_range) uaccess_ttbr0_enable x2, x3, x4 alternative_if ARM64_HAS_CACHE_IDC dsb ishst b 7f alternative_else_nop_endif dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x0, x3 1: user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE add x4, x4, x2 cmp x4, x1 b.lo 1b dsb ish 7: alternative_if ARM64_HAS_CACHE_DIC isb b 8f alternative_else_nop_endif invalidate_icache_by_line x0, x1, x2, x3, 9f 8: mov x0, #0 1: uaccess_ttbr0_disable x1, x2 ret 9: mov x0, #-EFAULT b 1b ENDPROC(__flush_icache_range) ENDPROC(__flush_cache_user_range) /* * invalidate_icache_range(start,end) * * Ensure that the I cache is invalid within specified region. * * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(invalidate_icache_range) alternative_if ARM64_HAS_CACHE_DIC mov x0, xzr isb ret alternative_else_nop_endif uaccess_ttbr0_enable x2, x3, x4 invalidate_icache_by_line x0, x1, x2, x3, 2f mov x0, xzr 1: uaccess_ttbr0_disable x1, x2 ret 2: mov x0, #-EFAULT b 1b ENDPROC(invalidate_icache_range) /* * __flush_dcache_area(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are cleaned and invalidated to the PoC. * * - kaddr - kernel address * - size - size in question */ ENTRY(__flush_dcache_area) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret ENDPIPROC(__flush_dcache_area) /* * __clean_dcache_area_pou(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are cleaned to the PoU. * * - kaddr - kernel address * - size - size in question */ ENTRY(__clean_dcache_area_pou) alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret ENDPROC(__clean_dcache_area_pou) /* * __inval_dcache_area(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are invalidated. Any partial lines at the ends of the interval are * also cleaned to PoC to prevent data loss. * * - kaddr - kernel address * - size - size in question */ ENTRY(__inval_dcache_area) /* FALLTHROUGH */ /* * __dma_inv_area(start, size) * - start - virtual start address of region * - size - size in question */ __dma_inv_area: add x1, x1, x0 dcache_line_size x2, x3 sub x3, x2, #1 tst x1, x3 // end cache line aligned? bic x1, x1, x3 b.eq 1f dc civac, x1 // clean & invalidate D / U line 1: tst x0, x3 // start cache line aligned? bic x0, x0, x3 b.eq 2f dc civac, x0 // clean & invalidate D / U line b 3f 2: dc ivac, x0 // invalidate D / U line 3: add x0, x0, x2 cmp x0, x1 b.lo 2b dsb sy ret ENDPIPROC(__inval_dcache_area) ENDPROC(__dma_inv_area) /* * __clean_dcache_area_poc(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are cleaned to the PoC. * * - kaddr - kernel address * - size - size in question */ ENTRY(__clean_dcache_area_poc) /* FALLTHROUGH */ /* * __dma_clean_area(start, size) * - start - virtual start address of region * - size - size in question */ __dma_clean_area: dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_poc) ENDPROC(__dma_clean_area) /* * __clean_dcache_area_pop(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are cleaned to the PoP. * * - kaddr - kernel address * - size - size in question */ ENTRY(__clean_dcache_area_pop) alternative_if_not ARM64_HAS_DCPOP b __clean_dcache_area_poc alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_pop) /* * __dma_flush_area(start, size) * * clean & invalidate D / U line * * - start - virtual start address of region * - size - size in question */ ENTRY(__dma_flush_area) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret ENDPIPROC(__dma_flush_area) /* * __dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(__dma_map_area) cmp w2, #DMA_FROM_DEVICE b.eq __dma_inv_area b __dma_clean_area ENDPIPROC(__dma_map_area) /* * __dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(__dma_unmap_area) cmp w2, #DMA_TO_DEVICE b.ne __dma_inv_area ret ENDPIPROC(__dma_unmap_area) |