Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
// SPDX-License-Identifier: GPL-2.0-only
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 * list of free blocks across all pages.  Used blocks aren't tracked, but we
 * keep a count of how many are currently allocated from each page.
 */

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>

#ifdef CONFIG_SLUB_DEBUG_ON
#define DMAPOOL_DEBUG 1
#endif

struct dma_block {
	struct dma_block *next_block;
	dma_addr_t dma;
};

struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	struct dma_block *next_block;
	size_t nr_blocks;
	size_t nr_active;
	size_t nr_pages;
	struct device *dev;
	unsigned int size;
	unsigned int allocation;
	unsigned int boundary;
	char name[32];
	struct list_head pools;
};

struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
};

static DEFINE_MUTEX(pools_lock);
static DEFINE_MUTEX(pools_reg_lock);

static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct dma_pool *pool;
	unsigned size;

	size = sysfs_emit(buf, "poolinfo - 0.1\n");

	mutex_lock(&pools_lock);
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		/* per-pool info, no real statistics yet */
		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
				      pool->name, pool->nr_active,
				      pool->nr_blocks, pool->size,
				      pool->nr_pages);
	}
	mutex_unlock(&pools_lock);

	return size;
}

static DEVICE_ATTR_RO(pools);

#ifdef DMAPOOL_DEBUG
static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
			     gfp_t mem_flags)
{
	u8 *data = (void *)block;
	int i;

	for (i = sizeof(struct dma_block); i < pool->size; i++) {
		if (data[i] == POOL_POISON_FREED)
			continue;
		dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
			pool->name, block);

		/*
		 * Dump the first 4 bytes even if they are not
		 * POOL_POISON_FREED
		 */
		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
				data, pool->size, 1);
		break;
	}

	if (!want_init_on_alloc(mem_flags))
		memset(block, POOL_POISON_ALLOCATED, pool->size);
}

static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
{
	struct dma_page *page;

	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if ((dma - page->dma) < pool->allocation)
			return page;
	}
	return NULL;
}

static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
{
	struct dma_block *block = pool->next_block;
	struct dma_page *page;

	page = pool_find_page(pool, dma);
	if (!page) {
		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
			__func__, pool->name, vaddr, &dma);
		return true;
	}

	while (block) {
		if (block != vaddr) {
			block = block->next_block;
			continue;
		}
		dev_err(pool->dev, "%s %s, dma %pad already free\n",
			__func__, pool->name, &dma);
		return true;
	}

	memset(vaddr, POOL_POISON_FREED, pool->size);
	return false;
}

static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
{
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
}
#else
static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
			     gfp_t mem_flags)
{
}

static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
{
	if (want_init_on_free())
		memset(vaddr, 0, pool->size);
	return false;
}

static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
{
}
#endif

static struct dma_block *pool_block_pop(struct dma_pool *pool)
{
	struct dma_block *block = pool->next_block;

	if (block) {
		pool->next_block = block->next_block;
		pool->nr_active++;
	}
	return block;
}

static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
			    dma_addr_t dma)
{
	block->dma = dma;
	block->next_block = pool->next_block;
	pool->next_block = block;
}


/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @boundary: returned blocks won't cross this power of two boundary
 * Context: not in_interrupt()
 *
 * Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 *
 * Return: a dma allocation pool with the requested characteristics, or
 * %NULL if one can't be created.
 */
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
				 size_t size, size_t align, size_t boundary)
{
	struct dma_pool *retval;
	size_t allocation;
	bool empty;

	if (!dev)
		return NULL;

	if (align == 0)
		align = 1;
	else if (align & (align - 1))
		return NULL;

	if (size == 0 || size > INT_MAX)
		return NULL;
	if (size < sizeof(struct dma_block))
		size = sizeof(struct dma_block);

	size = ALIGN(size, align);
	allocation = max_t(size_t, size, PAGE_SIZE);

	if (!boundary)
		boundary = allocation;
	else if ((boundary < size) || (boundary & (boundary - 1)))
		return NULL;

	boundary = min(boundary, allocation);

	retval = kzalloc(sizeof(*retval), GFP_KERNEL);
	if (!retval)
		return retval;

	strscpy(retval->name, name, sizeof(retval->name));

	retval->dev = dev;

	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
	retval->size = size;
	retval->boundary = boundary;
	retval->allocation = allocation;
	INIT_LIST_HEAD(&retval->pools);

	/*
	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
	 * pools_reg_lock ensures that there is not a race between
	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
	 * when the first invocation of dma_pool_create() failed on
	 * device_create_file() and the second assumes that it has been done (I
	 * know it is a short window).
	 */
	mutex_lock(&pools_reg_lock);
	mutex_lock(&pools_lock);
	empty = list_empty(&dev->dma_pools);
	list_add(&retval->pools, &dev->dma_pools);
	mutex_unlock(&pools_lock);
	if (empty) {
		int err;

		err = device_create_file(dev, &dev_attr_pools);
		if (err) {
			mutex_lock(&pools_lock);
			list_del(&retval->pools);
			mutex_unlock(&pools_lock);
			mutex_unlock(&pools_reg_lock);
			kfree(retval);
			return NULL;
		}
	}
	mutex_unlock(&pools_reg_lock);
	return retval;
}
EXPORT_SYMBOL(dma_pool_create);

static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
	unsigned int next_boundary = pool->boundary, offset = 0;
	struct dma_block *block, *first = NULL, *last = NULL;

	pool_init_page(pool, page);
	while (offset + pool->size <= pool->allocation) {
		if (offset + pool->size > next_boundary) {
			offset = next_boundary;
			next_boundary += pool->boundary;
			continue;
		}

		block = page->vaddr + offset;
		block->dma = page->dma + offset;
		block->next_block = NULL;

		if (last)
			last->next_block = block;
		else
			first = block;
		last = block;

		offset += pool->size;
		pool->nr_blocks++;
	}

	last->next_block = pool->next_block;
	pool->next_block = first;

	list_add(&page->page_list, &pool->page_list);
	pool->nr_pages++;
}

static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
{
	struct dma_page *page;

	page = kmalloc(sizeof(*page), mem_flags);
	if (!page)
		return NULL;

	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
					 &page->dma, mem_flags);
	if (!page->vaddr) {
		kfree(page);
		return NULL;
	}

	return page;
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
void dma_pool_destroy(struct dma_pool *pool)
{
	struct dma_page *page, *tmp;
	bool empty, busy = false;

	if (unlikely(!pool))
		return;

	mutex_lock(&pools_reg_lock);
	mutex_lock(&pools_lock);
	list_del(&pool->pools);
	empty = list_empty(&pool->dev->dma_pools);
	mutex_unlock(&pools_lock);
	if (empty)
		device_remove_file(pool->dev, &dev_attr_pools);
	mutex_unlock(&pools_reg_lock);

	if (pool->nr_active) {
		dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
		busy = true;
	}

	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
		if (!busy)
			dma_free_coherent(pool->dev, pool->allocation,
					  page->vaddr, page->dma);
		list_del(&page->page_list);
		kfree(page);
	}

	kfree(pool);
}
EXPORT_SYMBOL(dma_pool_destroy);

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * Return: the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
 * If such a memory block can't be allocated, %NULL is returned.
 */
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
{
	struct dma_block *block;
	struct dma_page *page;
	unsigned long flags;

	might_alloc(mem_flags);

	spin_lock_irqsave(&pool->lock, flags);
	block = pool_block_pop(pool);
	if (!block) {
		/*
		 * pool_alloc_page() might sleep, so temporarily drop
		 * &pool->lock
		 */
		spin_unlock_irqrestore(&pool->lock, flags);

		page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
		if (!page)
			return NULL;

		spin_lock_irqsave(&pool->lock, flags);
		pool_initialise_page(pool, page);
		block = pool_block_pop(pool);
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	*handle = block->dma;
	pool_check_block(pool, block, mem_flags);
	if (want_init_on_alloc(mem_flags))
		memset(block, 0, pool->size);

	return block;
}
EXPORT_SYMBOL(dma_pool_alloc);

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
{
	struct dma_block *block = vaddr;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	if (!pool_block_err(pool, vaddr, dma)) {
		pool_block_push(pool, block, dma);
		pool->nr_active--;
	}
	spin_unlock_irqrestore(&pool->lock, flags);
}
EXPORT_SYMBOL(dma_pool_free);

/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 *
 * Return: a managed dma allocation pool with the requested
 * characteristics, or %NULL if one can't be created.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
EXPORT_SYMBOL(dmam_pool_create);

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
}
EXPORT_SYMBOL(dmam_pool_destroy);