Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 | /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * (C) Copyright 1995 1996 Linus Torvalds * (C) Copyright 2001, 2002 Ralf Baechle */ #include <linux/module.h> #include <asm/addrspace.h> #include <asm/byteorder.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/tlbflush.h> static inline void remap_area_pte(pte_t * pte, unsigned long address, phys_t size, phys_t phys_addr, unsigned long flags) { phys_t end; unsigned long pfn; pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE | flags); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; if (address >= end) BUG(); pfn = phys_addr >> PAGE_SHIFT; do { if (!pte_none(*pte)) { printk("remap_area_pte: page already exists\n"); BUG(); } set_pte(pte, pfn_pte(pfn, pgprot)); address += PAGE_SIZE; pfn++; pte++; } while (address && (address < end)); } static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, phys_t size, phys_t phys_addr, unsigned long flags) { phys_t end; address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; phys_addr -= address; if (address >= end) BUG(); do { pte_t * pte = pte_alloc_kernel(pmd, address); if (!pte) return -ENOMEM; remap_area_pte(pte, address, end - address, address + phys_addr, flags); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); return 0; } static int remap_area_pages(unsigned long address, phys_t phys_addr, phys_t size, unsigned long flags) { int error; pgd_t * dir; unsigned long end = address + size; phys_addr -= address; dir = pgd_offset(&init_mm, address); flush_cache_all(); if (address >= end) BUG(); do { pud_t *pud; pmd_t *pmd; error = -ENOMEM; pud = pud_alloc(&init_mm, dir, address); if (!pud) break; pmd = pmd_alloc(&init_mm, pud, address); if (!pmd) break; if (remap_area_pmd(pmd, address, end - address, phys_addr + address, flags)) break; error = 0; address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); flush_tlb_all(); return error; } /* * Generic mapping function (not visible outside): */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) { struct vm_struct * area; unsigned long offset; phys_t last_addr; void * addr; phys_addr = fixup_bigphys_addr(phys_addr, size); /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * Map uncached objects in the low 512mb of address space using KSEG1, * otherwise map using page tables. */ if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) && flags == _CACHE_UNCACHED) return (void __iomem *) CKSEG1ADDR(phys_addr); /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) { char *t_addr, *t_end; struct page *page; t_addr = __va(phys_addr); t_end = t_addr + (size - 1); for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if(!PageReserved(page)) return NULL; } /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = area->addr; if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { vunmap(addr); return NULL; } return (void __iomem *) (offset + (char *)addr); } #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) void __iounmap(const volatile void __iomem *addr) { struct vm_struct *p; if (IS_KSEG1(addr)) return; p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); if (!p) printk(KERN_ERR "iounmap: bad address %p\n", addr); kfree(p); } EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__iounmap); |