Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 | /* * linux/arch/m32r/mm/ioremap.c * * Copyright (c) 2001, 2002 Hiroyuki Kondo * * Taken from mips version. * (C) Copyright 1995 1996 Linus Torvalds * (C) Copyright 2001 Ralf Baechle */ /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/module.h> #include <asm/addrspace.h> #include <asm/byteorder.h> #include <linux/vmalloc.h> #include <linux/io.h> #include <asm/pgalloc.h> /* * Generic mapping function (not visible outside): */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ #define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL)) void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void __iomem * addr; struct vm_struct * area; unsigned long offset, last_addr; pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * Map objects in the low 512mb of address space using KSEG1, otherwise * map using page tables. */ if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1)) return (void *) KSEG1ADDR(phys_addr); /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) { char *t_addr, *t_end; struct page *page; t_addr = __va(phys_addr); t_end = t_addr + (size - 1); for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if(!PageReserved(page)) return NULL; } pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | flags); /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; area->phys_addr = phys_addr; addr = (void __iomem *) area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, pgprot)) { vunmap((void __force *) addr); return NULL; } return (void __iomem *) (offset + (char __iomem *)addr); } #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) void iounmap(volatile void __iomem *addr) { if (!IS_KSEG1(addr)) vfree((void *) (PAGE_MASK & (unsigned long) addr)); } |