Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 | /* * linux/arch/arm/mm/small_page.c * * Copyright (C) 1996 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Changelog: * 26/01/1996 RMK Cleaned up various areas to make little more generic * 07/02/1999 RMK Support added for 16K and 32K page sizes * containing 8K blocks */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> #include <asm/bitops.h> #include <asm/pgtable.h> #define PEDANTIC /* * Requirement: * We need to be able to allocate naturally aligned memory of finer * granularity than the page size. This is typically used for the * second level page tables on 32-bit ARMs. * * Theory: * We "misuse" the Linux memory management system. We use alloc_page * to allocate a page and then mark it as reserved. The Linux memory * management system will then ignore the "offset", "next_hash" and * "pprev_hash" entries in the mem_map for this page. * * We then use a bitstring in the "offset" field to mark which segments * of the page are in use, and manipulate this as required during the * allocation and freeing of these small pages. * * We also maintain a queue of pages being used for this purpose using * the "next_hash" and "pprev_hash" entries of mem_map; */ struct order { struct page *queue; unsigned int mask; /* (1 << shift) - 1 */ unsigned int shift; /* (1 << shift) size of page */ unsigned int block_mask; /* nr_blocks - 1 */ unsigned int all_used; /* (1 << nr_blocks) - 1 */ }; static struct order orders[] = { #if PAGE_SIZE == 4096 { NULL, 2047, 11, 1, 0x00000003 } #elif PAGE_SIZE == 32768 { NULL, 2047, 11, 15, 0x0000ffff }, { NULL, 8191, 13, 3, 0x0000000f } #else #error unsupported page size #endif }; #define USED_MAP(pg) ((pg)->index) #define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg))) #define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg))) static spinlock_t small_page_lock = SPIN_LOCK_UNLOCKED; static void add_page_to_queue(struct page *page, struct page **p) { #ifdef PEDANTIC if (page->pprev_hash) PAGE_BUG(page); #endif page->next_hash = *p; if (*p) (*p)->pprev_hash = &page->next_hash; *p = page; page->pprev_hash = p; } static void remove_page_from_queue(struct page *page) { if (page->pprev_hash) { if (page->next_hash) page->next_hash->pprev_hash = page->pprev_hash; *page->pprev_hash = page->next_hash; page->pprev_hash = NULL; } } static unsigned long __get_small_page(int priority, struct order *order) { unsigned long flags; struct page *page; int offset; if (!order->queue) goto need_new_page; spin_lock_irqsave(&small_page_lock, flags); page = order->queue; again: #ifdef PEDANTIC if (USED_MAP(page) & ~order->all_used) PAGE_BUG(page); #endif offset = ffz(USED_MAP(page)); SET_USED(page, offset); if (USED_MAP(page) == order->all_used) remove_page_from_queue(page); spin_unlock_irqrestore(&small_page_lock, flags); return (unsigned long) page_address(page) + (offset << order->shift); need_new_page: page = alloc_page(priority); spin_lock_irqsave(&small_page_lock, flags); if (!order->queue) { if (!page) goto no_page; SetPageReserved(page); USED_MAP(page) = 0; cli(); add_page_to_queue(page, &order->queue); } else { __free_page(page); cli(); page = order->queue; } goto again; no_page: spin_unlock_irqrestore(&small_page_lock, flags); return 0; } static void __free_small_page(unsigned long spage, struct order *order) { unsigned long flags; struct page *page; page = virt_to_page(spage); if (VALID_PAGE(page)) { /* * The container-page must be marked Reserved */ if (!PageReserved(page) || spage & order->mask) goto non_small; #ifdef PEDANTIC if (USED_MAP(page) & ~order->all_used) PAGE_BUG(page); #endif spage = spage >> order->shift; spage &= order->block_mask; /* * the following must be atomic wrt get_page */ spin_lock_irqsave(&small_page_lock, flags); if (USED_MAP(page) == order->all_used) add_page_to_queue(page, &order->queue); if (!TEST_AND_CLEAR_USED(page, spage)) goto already_free; if (USED_MAP(page) == 0) goto free_page; spin_unlock_irqrestore(&small_page_lock, flags); } return; free_page: /* * unlink the page from the small page queue and free it */ remove_page_from_queue(page); spin_unlock_irqrestore(&small_page_lock, flags); ClearPageReserved(page); __free_page(page); return; non_small: printk("Trying to free non-small page from %p\n", __builtin_return_address(0)); return; already_free: printk("Trying to free free small page from %p\n", __builtin_return_address(0)); } unsigned long get_page_2k(int priority) { return __get_small_page(priority, orders+0); } void free_page_2k(unsigned long spage) { __free_small_page(spage, orders+0); } #if PAGE_SIZE > 8192 unsigned long get_page_8k(int priority) { return __get_small_page(priority, orders+1); } void free_page_8k(unsigned long spage) { __free_small_page(spage, orders+1); } #endif |