Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 | /* ** Stolen mostly from arch/parisc/kernel/pci-dma.c */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/io.h> #include <asm/page.h> /* get_order */ #undef flush_cache_all #define flush_cache_all flush_all_caches typedef void (*pte_iterator_t) (pte_t * pte, unsigned long arg); #if 0 /* XXX This routine could be used with iterate_page() to replace * unmap_uncached_page() and save a little code space but I didn't * do that since I'm not certain whether this is the right path. -PB */ static void unmap_cached_pte(pte_t * pte, unsigned long arg) { pte_t page = *pte; pte_clear(pte); if (!pte_none(page)) { if (pte_present(page)) { unsigned long map_nr = pte_pagenr(page); if (map_nr < max_mapnr) __free_page(mem_map + map_nr); } else { printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); } } } #endif /* These two routines should probably check a few things... */ static void set_uncached(pte_t * pte, unsigned long arg) { pte_val(*pte) |= _PAGE_NO_CACHE; } static void set_cached(pte_t * pte, unsigned long arg) { pte_val(*pte) &= ~_PAGE_NO_CACHE; } static inline void iterate_pte(pmd_t * pmd, unsigned long address, unsigned long size, pte_iterator_t op, unsigned long arg) { pte_t *pte; unsigned long end; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { op(pte, arg); address += PAGE_SIZE; pte++; } while (address < end); } static inline void iterate_pmd(pgd_t * dir, unsigned long address, unsigned long size, pte_iterator_t op, unsigned long arg) { pmd_t *pmd; unsigned long end; if (pgd_none(*dir)) return; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return; } pmd = pmd_offset(dir, address); address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { iterate_pte(pmd, address, end - address, op, arg); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end); } static void iterate_pages(unsigned long address, unsigned long size, pte_iterator_t op, unsigned long arg) { pgd_t *dir; unsigned long end = address + size; dir = pgd_offset_k(address); flush_cache_all(); do { iterate_pmd(dir, address, end - address, op, arg); address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); flush_tlb_all(); } void kernel_set_cachemode(unsigned long vaddr, unsigned long size, int what) { switch (what) { case IOMAP_FULL_CACHING: iterate_pages(vaddr, size, set_cached, 0); flush_tlb_range(&init_mm, vaddr, size); break; case IOMAP_NOCACHE_SER: iterate_pages(vaddr, size, set_uncached, 0); flush_tlb_range(&init_mm, vaddr, size); break; default: printk(KERN_CRIT "kernel_set_cachemode mode %d not understood\n", what); break; } } |