Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | /* * arch/s390/mm/ioremap.c * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com) * * Derived from "arch/i386/mm/extable.c" * (C) Copyright 1995 1996 Linus Torvalds * * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's */ #include <linux/vmalloc.h> #include <asm/io.h> static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, unsigned long phys_addr, unsigned long flags) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { if (!pte_none(*pte)) printk("remap_area_pte: page already exists\n"); set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | flags))); address += PAGE_SIZE; phys_addr += PAGE_SIZE; pte++; } while (address < end); } static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, unsigned long phys_addr, unsigned long flags) { unsigned long end; address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; phys_addr -= address; do { pte_t * pte = pte_alloc_kernel(pmd, address); if (!pte) return -ENOMEM; remap_area_pte(pte, address, end - address, address + phys_addr, flags); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end); return 0; } static int remap_area_pages(unsigned long address, unsigned long phys_addr, unsigned long size, unsigned long flags) { pgd_t * dir; unsigned long end = address + size; phys_addr -= address; dir = pgd_offset(&init_mm, address); flush_cache_all(); while (address < end) { pmd_t *pmd = pmd_alloc_kernel(dir, address); if (!pmd) return -ENOMEM; if (remap_area_pmd(pmd, address, end - address, phys_addr + address, flags)) return -ENOMEM; set_pgdir(address, *dir); address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } flush_tlb_all(); return 0; } /* * Generic mapping function (not visible outside): */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. */ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void * addr; struct vm_struct * area; if (phys_addr < virt_to_phys(high_memory)) return phys_to_virt(phys_addr); if (phys_addr & ~PAGE_MASK) return NULL; size = PAGE_ALIGN(size); if (!size || size > phys_addr + size) return NULL; area = get_vm_area(size); if (!area) return NULL; addr = area->addr; if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { vfree(addr); return NULL; } return addr; } void iounmap(void *addr) { if (addr > high_memory) return vfree(addr); } |