Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/lib/copypage-xscale.S * * Copyright (C) 1995-2005 Russell King * * This handles the mini data cache, as found on SA11x0 and XScale * processors. When we copy a user page page, we map it in such a way * that accesses to this page will not touch the main data cache, but * will be cached in the mini data cache. This prevents us thrashing * the main data cache on page faults. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include "mm.h" #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) static DEFINE_RAW_SPINLOCK(minicache_lock); /* * XScale mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ static void mc_copy_user_page(void *from, void *to) { int tmp; /* * Strangely enough, best performance is achieved * when prefetching destination as well. (NP) */ asm volatile ("\ .arch xscale \n\ pld [%0, #0] \n\ pld [%0, #32] \n\ pld [%1, #0] \n\ pld [%1, #32] \n\ 1: pld [%0, #64] \n\ pld [%0, #96] \n\ pld [%1, #64] \n\ pld [%1, #96] \n\ 2: ldrd r2, r3, [%0], #8 \n\ ldrd r4, r5, [%0], #8 \n\ mov ip, %1 \n\ strd r2, r3, [%1], #8 \n\ ldrd r2, r3, [%0], #8 \n\ strd r4, r5, [%1], #8 \n\ ldrd r4, r5, [%0], #8 \n\ strd r2, r3, [%1], #8 \n\ strd r4, r5, [%1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ ldrd r2, r3, [%0], #8 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ ldrd r4, r5, [%0], #8 \n\ mov ip, %1 \n\ strd r2, r3, [%1], #8 \n\ ldrd r2, r3, [%0], #8 \n\ strd r4, r5, [%1], #8 \n\ ldrd r4, r5, [%0], #8 \n\ strd r2, r3, [%1], #8 \n\ strd r4, r5, [%1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs %2, %2, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bgt 1b \n\ beq 2b " : "+&r" (from), "+&r" (to), "=&r" (tmp) : "2" (PAGE_SIZE / 64 - 1) : "r2", "r3", "r4", "r5", "ip"); } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { struct folio *src = page_folio(from); void *kto = kmap_atomic(to); if (!test_and_set_bit(PG_dcache_clean, &src->flags)) __flush_dcache_folio(folio_flush_mapping(src), src); raw_spin_lock(&minicache_lock); set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); raw_spin_unlock(&minicache_lock); kunmap_atomic(kto); } /* * XScale optimised clear_user_page */ void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ .arch xscale \n\ mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ 1: mov ip, %0 \n\ strd r2, r3, [%0], #8 \n\ strd r2, r3, [%0], #8 \n\ strd r2, r3, [%0], #8 \n\ strd r2, r3, [%0], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bne 1b" : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip"); kunmap_atomic(kaddr); } struct cpu_user_fns xscale_mc_user_fns __initdata = { .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, }; |