Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 | /* * linux/arch/arm/mm/small_page.c * * Copyright (C) 1996 Russell King * * Changelog: * 26/01/1996 RMK Cleaned up various areas to make little more generic */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/head.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> #define SMALL_ALLOC_SHIFT (10) #define SMALL_ALLOC_SIZE (1 << SMALL_ALLOC_SHIFT) #define NR_BLOCKS (PAGE_SIZE / SMALL_ALLOC_SIZE) #if NR_BLOCKS != 4 #error I only support 4 blocks per page! #endif #define USED(pg) ((atomic_read(&(pg)->count) >> 8) & 15) #define SET_USED(pg,off) (atomic_read(&(pg)->count) |= 256 << off) #define CLEAR_USED(pg,off) (atomic_read(&(pg)->count) &= ~(256 << off)) #define IS_FREE(pg,off) (!(atomic_read(&(pg)->count) & (256 << off))) #define PAGE_PTR(page,block) ((struct free_small_page *)((page) + \ ((block) << SMALL_ALLOC_SHIFT))) struct free_small_page { unsigned long next; unsigned long prev; }; /* * To handle allocating small pages, we use the main get_free_page routine, * and split the page up into 4. The page is marked in mem_map as reserved, * so it can't be free'd by free_page. The count field is used to keep track * of which sections of this page are allocated. */ static unsigned long small_page_ptr; static unsigned char offsets[1<<NR_BLOCKS] = { 0, /* 0000 */ 1, /* 0001 */ 0, /* 0010 */ 2, /* 0011 */ 0, /* 0100 */ 1, /* 0101 */ 0, /* 0110 */ 3, /* 0111 */ 0, /* 1000 */ 1, /* 1001 */ 0, /* 1010 */ 2, /* 1011 */ 0, /* 1100 */ 1, /* 1101 */ 0, /* 1110 */ 4 /* 1111 */ }; static inline void clear_page_links(unsigned long page) { struct free_small_page *fsp; int i; for (i = 0; i < NR_BLOCKS; i++) { fsp = PAGE_PTR(page, i); fsp->next = fsp->prev = 0; } } static inline void set_page_links_prev(unsigned long page, unsigned long prev) { struct free_small_page *fsp; unsigned int mask; int i; if (!page) return; mask = USED(&mem_map[MAP_NR(page)]); for (i = 0; i < NR_BLOCKS; i++) { if (mask & (1 << i)) continue; fsp = PAGE_PTR(page, i); fsp->prev = prev; } } static inline void set_page_links_next(unsigned long page, unsigned long next) { struct free_small_page *fsp; unsigned int mask; int i; if (!page) return; mask = USED(&mem_map[MAP_NR(page)]); for (i = 0; i < NR_BLOCKS; i++) { if (mask & (1 << i)) continue; fsp = PAGE_PTR(page, i); fsp->next = next; } } unsigned long get_small_page(int priority) { struct free_small_page *fsp; unsigned long new_page; unsigned long flags; struct page *page; int offset; save_flags(flags); if (!small_page_ptr) goto need_new_page; cli(); again: page = mem_map + MAP_NR(small_page_ptr); offset = offsets[USED(page)]; SET_USED(page, offset); new_page = (unsigned long)PAGE_PTR(small_page_ptr, offset); if (USED(page) == 15) { fsp = (struct free_small_page *)new_page; set_page_links_prev (fsp->next, 0); small_page_ptr = fsp->next; } restore_flags(flags); return new_page; need_new_page: new_page = __get_free_page(priority); if (!small_page_ptr) { if (new_page) { set_bit (PG_reserved, &mem_map[MAP_NR(new_page)].flags); clear_page_links (new_page); cli(); small_page_ptr = new_page; goto again; } restore_flags(flags); return 0; } free_page(new_page); cli(); goto again; } void free_small_page(unsigned long spage) { struct free_small_page *ofsp, *cfsp; unsigned long flags; struct page *page; int offset, oldoffset; offset = (spage >> SMALL_ALLOC_SHIFT) & (NR_BLOCKS - 1); spage -= offset << SMALL_ALLOC_SHIFT; page = mem_map + MAP_NR(spage); if (!PageReserved(page) || !USED(page)) { printk ("Trying to free non-small page from %p\n", __builtin_return_address(0)); return; } if (IS_FREE(page, offset)) { printk ("Trying to free free small page from %p\n", __builtin_return_address(0)); return; } save_flags_cli (flags); oldoffset = offsets[USED(page)]; CLEAR_USED(page, offset); ofsp = PAGE_PTR(spage, oldoffset); cfsp = PAGE_PTR(spage, offset); if (oldoffset == NR_BLOCKS) { /* going from totally used to mostly used */ cfsp->prev = 0; cfsp->next = small_page_ptr; set_page_links_prev (small_page_ptr, spage); small_page_ptr = spage; } else if (!USED(page)) { set_page_links_prev (ofsp->next, ofsp->prev); set_page_links_next (ofsp->prev, ofsp->next); if (spage == small_page_ptr) small_page_ptr = ofsp->next; clear_bit (PG_reserved, &page->flags); restore_flags(flags); free_page (spage); } else *cfsp = *ofsp; restore_flags(flags); } |