Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | /* * Basic general purpose allocator for managing special purpose memory * not managed by the regular kmalloc/kfree interface. * Uses for this includes on-device special memory, uncached memory * etc. * * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/module.h> #include <linux/genalloc.h> /** * gen_pool_create - create a new special memory pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node id of the node the pool structure should be allocated on, or -1 * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) { struct gen_pool *pool; pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); if (pool != NULL) { rwlock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @addr: starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. */ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, int nid) { struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); if (unlikely(chunk == NULL)) return -1; spin_lock_init(&chunk->lock); chunk->start_addr = addr; chunk->end_addr = addr + size; write_lock(&pool->lock); list_add(&chunk->next_chunk, &pool->chunks); write_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_add); /** * gen_pool_destroy - destroy a special memory pool * @pool: pool to destroy * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int bit, end_bit; write_lock(&pool->lock); list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); kfree(chunk); } kfree(pool); return; } EXPORT_SYMBOL(gen_pool_destroy); /** * gen_pool_alloc - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. */ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) { struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long addr, flags; int order = pool->min_alloc_order; int nbits, bit, start_bit, end_bit; if (size == 0) return 0; nbits = (size + (1UL << order) - 1) >> order; read_lock(&pool->lock); list_for_each(_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; end_bit -= nbits + 1; spin_lock_irqsave(&chunk->lock, flags); bit = -1; while (bit + 1 < end_bit) { bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); if (bit >= end_bit) break; start_bit = bit; if (nbits > 1) { bit = find_next_bit(chunk->bits, bit + nbits, bit + 1); if (bit - start_bit < nbits) continue; } addr = chunk->start_addr + ((unsigned long)start_bit << order); while (nbits--) __set_bit(start_bit++, chunk->bits); spin_unlock_irqrestore(&chunk->lock, flags); read_unlock(&pool->lock); return addr; } spin_unlock_irqrestore(&chunk->lock, flags); } read_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_alloc); /** * gen_pool_free - free allocated special memory back to the pool * @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free * * Free previously allocated special memory back to the specified pool. */ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long flags; int order = pool->min_alloc_order; int bit, nbits; nbits = (size + (1UL << order) - 1) >> order; read_lock(&pool->lock); list_for_each(_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); if (addr >= chunk->start_addr && addr < chunk->end_addr) { BUG_ON(addr + size > chunk->end_addr); spin_lock_irqsave(&chunk->lock, flags); bit = (addr - chunk->start_addr) >> order; while (nbits--) __clear_bit(bit++, chunk->bits); spin_unlock_irqrestore(&chunk->lock, flags); break; } } BUG_ON(nbits > 0); read_unlock(&pool->lock); } EXPORT_SYMBOL(gen_pool_free); |