Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 | #include <linux/highmem.h> #include <linux/module.h> void *kmap(struct page *page) { might_sleep(); if (!PageHighMem(page)) return page_address(page); return kmap_high(page); } void kunmap(struct page *page) { if (in_interrupt()) BUG(); if (!PageHighMem(page)) return; kunmap_high(page); } static void debug_kmap_atomic_prot(enum km_type type) { #ifdef CONFIG_DEBUG_HIGHMEM static unsigned warn_count = 10; if (unlikely(warn_count == 0)) return; if (unlikely(in_interrupt())) { if (in_irq()) { if (type != KM_IRQ0 && type != KM_IRQ1 && type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && type != KM_BOUNCE_READ) { WARN_ON(1); warn_count--; } } else if (!irqs_disabled()) { /* softirq */ if (type != KM_IRQ0 && type != KM_IRQ1 && type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && type != KM_SKB_SUNRPC_DATA && type != KM_SKB_DATA_SOFTIRQ && type != KM_BOUNCE_READ) { WARN_ON(1); warn_count--; } } } if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { if (!irqs_disabled()) { WARN_ON(1); warn_count--; } } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { if (irq_count() == 0 && !irqs_disabled()) { WARN_ON(1); warn_count--; } } #endif } /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic_prot(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); arch_flush_lazy_mmu_mode(); return (void *)vaddr; } void *kmap_atomic(struct page *page, enum km_type type) { return kmap_atomic_prot(page, type, kmap_prot); } void kunmap_atomic(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); /* * Force other mappings to Oops if they'll try to access this pte * without first remap it. Keeping stale mappings around is a bad idea * also, in case the page changes cacheability attributes or becomes * a protected page in a hypervisor. */ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) kpte_clear_flush(kmap_pte-idx, vaddr); else { #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr < PAGE_OFFSET); BUG_ON(vaddr >= (unsigned long)high_memory); #endif } arch_flush_lazy_mmu_mode(); pagefault_enable(); } /* This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) { enum fixed_addresses idx; unsigned long vaddr; pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); arch_flush_lazy_mmu_mode(); return (void*) vaddr; } struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; if (vaddr < FIXADDR_START) return virt_to_page(ptr); idx = virt_to_fix(vaddr); pte = kmap_pte - (idx - FIX_KMAP_BEGIN); return pte_page(*pte); } EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(kunmap_atomic); |