dmapool.c 13.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 as published by the
 * Free Software Foundation.
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
20
21
22
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 * list of free blocks within the page.  Used blocks aren't tracked, but we
 * keep a count of how many are currently allocated from each page.
23
 */
Linus Torvalds's avatar
Linus Torvalds committed
24
25
26
27

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
28
29
#include <linux/kernel.h>
#include <linux/list.h>
30
#include <linux/export.h>
31
#include <linux/mutex.h>
32
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
33
#include <linux/sched.h>
34
#include <linux/slab.h>
35
#include <linux/stat.h>
36
37
38
39
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
40

41
42
43
44
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
#define DMAPOOL_DEBUG 1
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
45
46
47
48
49
50
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t size;
	struct device *dev;
	size_t allocation;
51
	size_t boundary;
Matthew Wilcox's avatar
Matthew Wilcox committed
52
53
	char name[32];
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
54
55
};

Matthew Wilcox's avatar
Matthew Wilcox committed
56
57
58
59
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
60
61
	unsigned int in_use;
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
62
63
};

Matthew Wilcox's avatar
Matthew Wilcox committed
64
static DEFINE_MUTEX(pools_lock);
65
static DEFINE_MUTEX(pools_reg_lock);
Linus Torvalds's avatar
Linus Torvalds committed
66
67

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
68
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

83
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
84
85
86
87
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

88
		spin_lock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
89
90
91
92
		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}
93
		spin_unlock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
94
95

		/* per-pool info, no real statistics yet */
96
		temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
97
98
				 pool->name, blocks,
				 pages * (pool->allocation / pool->size),
Matthew Wilcox's avatar
Matthew Wilcox committed
99
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
100
101
102
		size -= temp;
		next += temp;
	}
103
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
104
105
106

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
107

108
static DEVICE_ATTR(pools, 0444, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
109
110
111
112
113
114
115

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
116
 * @boundary: returned blocks won't cross this power of two boundary
Linus Torvalds's avatar
Linus Torvalds committed
117
118
119
120
121
122
123
124
125
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
126
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds's avatar
Linus Torvalds committed
127
128
129
130
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
131
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
132
				 size_t size, size_t align, size_t boundary)
Linus Torvalds's avatar
Linus Torvalds committed
133
{
Matthew Wilcox's avatar
Matthew Wilcox committed
134
	struct dma_pool *retval;
135
	size_t allocation;
136
	bool empty = false;
Linus Torvalds's avatar
Linus Torvalds committed
137

138
	if (align == 0)
Linus Torvalds's avatar
Linus Torvalds committed
139
		align = 1;
140
	else if (align & (align - 1))
Linus Torvalds's avatar
Linus Torvalds committed
141
142
		return NULL;

143
	if (size == 0)
144
		return NULL;
145
	else if (size < 4)
146
		size = 4;
147
148
149
150

	if ((size % align) != 0)
		size = ALIGN(size, align);

151
152
	allocation = max_t(size_t, size, PAGE_SIZE);

153
	if (!boundary)
154
		boundary = allocation;
155
	else if ((boundary < size) || (boundary & (boundary - 1)))
Linus Torvalds's avatar
Linus Torvalds committed
156
157
		return NULL;

158
159
	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
	if (!retval)
Linus Torvalds's avatar
Linus Torvalds committed
160
161
		return retval;

162
	strlcpy(retval->name, name, sizeof(retval->name));
Linus Torvalds's avatar
Linus Torvalds committed
163
164
165

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
166
167
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
168
	retval->size = size;
169
	retval->boundary = boundary;
Linus Torvalds's avatar
Linus Torvalds committed
170
171
	retval->allocation = allocation;

172
173
	INIT_LIST_HEAD(&retval->pools);

174
175
176
177
178
179
180
181
182
	/*
	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
	 * pools_reg_lock ensures that there is not a race between
	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
	 * when the first invocation of dma_pool_create() failed on
	 * device_create_file() and the second assumes that it has been done (I
	 * know it is a short window).
	 */
	mutex_lock(&pools_reg_lock);
183
	mutex_lock(&pools_lock);
184
185
186
	if (list_empty(&dev->dma_pools))
		empty = true;
	list_add(&retval->pools, &dev->dma_pools);
187
	mutex_unlock(&pools_lock);
188
189
190
191
192
193
194
195
196
197
198
199
200
201
	if (empty) {
		int err;

		err = device_create_file(dev, &dev_attr_pools);
		if (err) {
			mutex_lock(&pools_lock);
			list_del(&retval->pools);
			mutex_unlock(&pools_lock);
			mutex_unlock(&pools_reg_lock);
			kfree(retval);
			return NULL;
		}
	}
	mutex_unlock(&pools_reg_lock);
Linus Torvalds's avatar
Linus Torvalds committed
202
203
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
204
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
205

206
207
208
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
	unsigned int offset = 0;
209
	unsigned int next_boundary = pool->boundary;
210
211
212

	do {
		unsigned int next = offset + pool->size;
213
214
215
216
		if (unlikely((next + pool->size) >= next_boundary)) {
			next = next_boundary;
			next_boundary += pool->boundary;
		}
217
218
219
220
221
		*(int *)(page->vaddr + offset) = next;
		offset = next;
	} while (offset < pool->allocation);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
222
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
223
{
Matthew Wilcox's avatar
Matthew Wilcox committed
224
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
225

226
	page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
227
228
	if (!page)
		return NULL;
229
	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcox's avatar
Matthew Wilcox committed
230
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
231
	if (page->vaddr) {
232
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
233
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
234
#endif
235
		pool_initialise_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
236
		page->in_use = 0;
237
		page->offset = 0;
Linus Torvalds's avatar
Linus Torvalds committed
238
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
239
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
240
241
242
243
244
		page = NULL;
	}
	return page;
}

245
static inline bool is_page_busy(struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
246
{
247
	return page->in_use != 0;
Linus Torvalds's avatar
Linus Torvalds committed
248
249
}

Matthew Wilcox's avatar
Matthew Wilcox committed
250
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
251
{
Matthew Wilcox's avatar
Matthew Wilcox committed
252
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
253

254
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
255
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
256
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
257
258
259
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
260
261
262
263
264
265
266
267
268
269
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
270
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
271
{
272
273
	bool empty = false;

274
275
276
	if (unlikely(!pool))
		return;

277
	mutex_lock(&pools_reg_lock);
278
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
279
280
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
281
		empty = true;
282
	mutex_unlock(&pools_lock);
283
284
285
	if (empty)
		device_remove_file(pool->dev, &dev_attr_pools);
	mutex_unlock(&pools_reg_lock);
Linus Torvalds's avatar
Linus Torvalds committed
286

Matthew Wilcox's avatar
Matthew Wilcox committed
287
288
289
290
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
291
		if (is_page_busy(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
292
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
293
294
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
295
296
					pool->name, page->vaddr);
			else
297
				pr_err("dma_pool_destroy %s, %p busy\n",
Matthew Wilcox's avatar
Matthew Wilcox committed
298
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
299
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
300
301
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
302
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
303
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
304
305
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
306
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
307
}
Matthew Wilcox's avatar
Matthew Wilcox committed
308
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
309
310
311
312
313
314
315
316
317

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
318
 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds's avatar
Linus Torvalds committed
319
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
320
321
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
322
{
Matthew Wilcox's avatar
Matthew Wilcox committed
323
324
325
326
327
	unsigned long flags;
	struct dma_page *page;
	size_t offset;
	void *retval;

328
	might_sleep_if(gfpflags_allow_blocking(mem_flags));
329

Matthew Wilcox's avatar
Matthew Wilcox committed
330
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
331
	list_for_each_entry(page, &pool->page_list, page_list) {
332
333
		if (page->offset < pool->allocation)
			goto ready;
Linus Torvalds's avatar
Linus Torvalds committed
334
335
	}

336
337
	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
338

339
	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
340
341
	if (!page)
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
342

343
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
344

345
	list_add(&page->page_list, &pool->page_list);
Matthew Wilcox's avatar
Matthew Wilcox committed
346
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
347
	page->in_use++;
348
349
	offset = page->offset;
	page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds's avatar
Linus Torvalds committed
350
351
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
352
#ifdef	DMAPOOL_DEBUG
353
354
355
356
357
358
359
360
361
	{
		int i;
		u8 *data = retval;
		/* page->offset is stored in first 4 bytes */
		for (i = sizeof(page->offset); i < pool->size; i++) {
			if (data[i] == POOL_POISON_FREED)
				continue;
			if (pool->dev)
				dev_err(pool->dev,
362
					"dma_pool_alloc %s, %p (corrupted)\n",
363
364
					pool->name, retval);
			else
365
				pr_err("dma_pool_alloc %s, %p (corrupted)\n",
366
367
368
369
370
371
372
373
374
375
376
					pool->name, retval);

			/*
			 * Dump the first 4 bytes even if they are not
			 * POOL_POISON_FREED
			 */
			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
					data, pool->size, 1);
			break;
		}
	}
377
378
	if (!(mem_flags & __GFP_ZERO))
		memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
379
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
380
	spin_unlock_irqrestore(&pool->lock, flags);
381
382
383
384

	if (mem_flags & __GFP_ZERO)
		memset(retval, 0, pool->size);

Linus Torvalds's avatar
Linus Torvalds committed
385
386
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
387
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
388

Matthew Wilcox's avatar
Matthew Wilcox committed
389
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
390
{
Matthew Wilcox's avatar
Matthew Wilcox committed
391
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
392
393
394
395

	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
396
		if ((dma - page->dma) < pool->allocation)
397
			return page;
Linus Torvalds's avatar
Linus Torvalds committed
398
	}
399
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
400
401
402
403
404
405
406
407
408
409
410
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
411
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
412
{
Matthew Wilcox's avatar
Matthew Wilcox committed
413
414
	struct dma_page *page;
	unsigned long flags;
415
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
416

417
	spin_lock_irqsave(&pool->lock, flags);
Matthew Wilcox's avatar
Matthew Wilcox committed
418
419
	page = pool_find_page(pool, dma);
	if (!page) {
420
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
421
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
422
423
424
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
425
		else
426
			pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
Matthew Wilcox's avatar
Matthew Wilcox committed
427
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
428
429
430
		return;
	}

431
	offset = vaddr - page->vaddr;
432
#ifdef	DMAPOOL_DEBUG
433
	if ((dma - page->dma) != offset) {
434
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
435
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
436
			dev_err(pool->dev,
437
438
				"dma_pool_free %s, %p (bad vaddr)/%pad\n",
				pool->name, vaddr, &dma);
Linus Torvalds's avatar
Linus Torvalds committed
439
		else
440
441
			pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
			       pool->name, vaddr, &dma);
Linus Torvalds's avatar
Linus Torvalds committed
442
443
		return;
	}
444
445
446
447
448
449
450
	{
		unsigned int chain = page->offset;
		while (chain < pool->allocation) {
			if (chain != offset) {
				chain = *(int *)(page->vaddr + chain);
				continue;
			}
451
			spin_unlock_irqrestore(&pool->lock, flags);
452
			if (pool->dev)
453
454
				dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
					pool->name, &dma);
455
			else
456
457
				pr_err("dma_pool_free %s, dma %pad already free\n",
				       pool->name, &dma);
458
459
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
460
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
461
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
462
463
464
#endif

	page->in_use--;
465
466
	*(int *)vaddr = page->offset;
	page->offset = offset;
Linus Torvalds's avatar
Linus Torvalds committed
467
468
	/*
	 * Resist a temptation to do
469
	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
470
471
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
472
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
473
}
Matthew Wilcox's avatar
Matthew Wilcox committed
474
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
475

Tejun Heo's avatar
Tejun Heo committed
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
519
EXPORT_SYMBOL(dmam_pool_create);
Tejun Heo's avatar
Tejun Heo committed
520
521
522
523
524
525
526
527
528
529
530

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

531
	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
Tejun Heo's avatar
Tejun Heo committed
532
}
Matthew Wilcox's avatar
Matthew Wilcox committed
533
EXPORT_SYMBOL(dmam_pool_destroy);