Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2003 by Ralf Baechle */ #include <linux/config.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/cpu.h> #include <asm/cpu-features.h> /* Cache operations. */ void (*flush_cache_all)(void); void (*__flush_cache_all)(void); void (*flush_cache_mm)(struct mm_struct *mm); void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, unsigned long end); void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); void (*flush_icache_range)(unsigned long start, unsigned long end); void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); /* MIPS specific cache operations */ void (*flush_cache_sigtramp)(unsigned long addr); void (*flush_data_cache_page)(unsigned long addr); void (*flush_icache_all)(void); EXPORT_SYMBOL(flush_data_cache_page); #ifdef CONFIG_DMA_NONCOHERENT /* DMA cache operations. */ void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); void (*_dma_cache_wback)(unsigned long start, unsigned long size); void (*_dma_cache_inv)(unsigned long start, unsigned long size); EXPORT_SYMBOL(_dma_cache_wback_inv); EXPORT_SYMBOL(_dma_cache_wback); EXPORT_SYMBOL(_dma_cache_inv); #endif /* CONFIG_DMA_NONCOHERENT */ /* * We could optimize the case where the cache argument is not BCACHE but * that seems very atypical use ... */ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long bytes, unsigned int cache) { if (bytes == 0) return 0; if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) return -EFAULT; flush_icache_range(addr, addr + bytes); return 0; } void __flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); unsigned long addr; if (mapping && !mapping_mapped(mapping)) { SetPageDcacheDirty(page); return; } /* * We could delay the flush for the !page_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ addr = (unsigned long) page_address(page); flush_data_cache_page(addr); } EXPORT_SYMBOL(__flush_dcache_page); void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; pfn = pte_pfn(pte); if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) && Page_dcache_dirty(page)) { if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) { addr = (unsigned long) page_address(page); flush_data_cache_page(addr); } ClearPageDcacheDirty(page); } } #define __weak __attribute__((weak)) static char cache_panic[] __initdata = "Yeee, unsupported cache architecture."; void __init cpu_cache_init(void) { if (cpu_has_3k_cache) { extern void __weak r3k_cache_init(void); r3k_cache_init(); return; } if (cpu_has_6k_cache) { extern void __weak r6k_cache_init(void); r6k_cache_init(); return; } if (cpu_has_4k_cache) { extern void __weak r4k_cache_init(void); r4k_cache_init(); return; } if (cpu_has_8k_cache) { extern void __weak r8k_cache_init(void); r8k_cache_init(); return; } if (cpu_has_tx39_cache) { extern void __weak tx39_cache_init(void); tx39_cache_init(); return; } if (cpu_has_sb1_cache) { extern void __weak sb1_cache_init(void); sb1_cache_init(); return; } panic(cache_panic); } |