Loading...
#include <linux/highmem.h> void *kmap(struct page *page) { might_sleep(); if (page < highmem_start_page) return page_address(page); return kmap_high(page); } void kunmap(struct page *page) { if (in_interrupt()) BUG(); if (page < highmem_start_page) return; kunmap_high(page); } /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic(struct page *page, enum km_type type) { enum fixed_addresses idx; unsigned long vaddr; inc_preempt_count(); if (page < highmem_start_page) return page_address(page); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM if (!pte_none(*(kmap_pte-idx))) BUG(); #endif set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); __flush_tlb_one(vaddr); return (void*) vaddr; } void kunmap_atomic(void *kvaddr, enum km_type type) { #ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < FIXADDR_START) { // FIXME dec_preempt_count(); return; } if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) BUG(); /* * force other mappings to Oops if they'll try to access * this pte without first remap it */ pte_clear(kmap_pte-idx); __flush_tlb_one(vaddr); #endif dec_preempt_count(); } struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; if (vaddr < FIXADDR_START) return virt_to_page(ptr); idx = virt_to_fix(vaddr); pte = kmap_pte - (idx - FIX_KMAP_BEGIN); return pte_page(*pte); } |