Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 | // SPDX-License-Identifier: GPL-2.0 /* * linux/arch/m68k/mm/cache.c * * Instruction cache handling * * Copyright (C) 1995 Hamish Macdonald */ #include <linux/module.h> #include <asm/cacheflush.h> #include <asm/traps.h> static unsigned long virt_to_phys_slow(unsigned long vaddr) { if (CPU_IS_060) { unsigned long paddr; /* The PLPAR instruction causes an access error if the translation * is not possible. To catch this we use the same exception mechanism * as for user space accesses in <asm/uaccess.h>. */ asm volatile (".chip 68060\n" "1: plpar (%0)\n" ".chip 68k\n" "2:\n" ".section .fixup,\"ax\"\n" " .even\n" "3: sub.l %0,%0\n" " jra 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=a" (paddr) : "0" (vaddr)); return paddr; } else if (CPU_IS_040) { unsigned long mmusr; asm volatile (".chip 68040\n\t" "ptestr (%1)\n\t" "movec %%mmusr, %0\n\t" ".chip 68k" : "=r" (mmusr) : "a" (vaddr)); if (mmusr & MMU_R_040) return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); } else { WARN_ON_ONCE(!CPU_IS_040_OR_060); } return 0; } /* Push n pages at kernel virtual address and clear the icache */ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ void flush_icache_user_range(unsigned long address, unsigned long endaddr) { if (CPU_IS_COLDFIRE) { unsigned long start, end; start = address & ICACHE_SET_MASK; end = endaddr & ICACHE_SET_MASK; if (start > end) { flush_cf_icache(0, end); end = ICACHE_MAX_ADDR; } flush_cf_icache(start, end); } else if (CPU_IS_040_OR_060) { address &= PAGE_MASK; do { asm volatile ("nop\n\t" ".chip 68040\n\t" "cpushp %%bc,(%0)\n\t" ".chip 68k" : : "a" (virt_to_phys_slow(address))); address += PAGE_SIZE; } while (address < endaddr); } else { unsigned long tmp; asm volatile ("movec %%cacr,%0\n\t" "orw %1,%0\n\t" "movec %0,%%cacr" : "=&d" (tmp) : "di" (FLUSH_I)); } } void flush_icache_range(unsigned long address, unsigned long endaddr) { set_fc(SUPER_DATA); flush_icache_user_range(address, endaddr); set_fc(USER_DATA); } EXPORT_SYMBOL(flush_icache_range); void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { if (CPU_IS_COLDFIRE) { unsigned long start, end; start = addr & ICACHE_SET_MASK; end = (addr + len) & ICACHE_SET_MASK; if (start > end) { flush_cf_icache(0, end); end = ICACHE_MAX_ADDR; } flush_cf_icache(start, end); } else if (CPU_IS_040_OR_060) { asm volatile ("nop\n\t" ".chip 68040\n\t" "cpushp %%bc,(%0)\n\t" ".chip 68k" : : "a" (page_to_phys(page))); } else { unsigned long tmp; asm volatile ("movec %%cacr,%0\n\t" "orw %1,%0\n\t" "movec %0,%%cacr" : "=&d" (tmp) : "di" (FLUSH_I)); } } |