mempool.c 14.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
/*
 *  linux/mm/mempool.c
 *
 *  memory buffer pool support. Such pools are mostly used
 *  for guaranteed, deadlock-free memory allocations during
 *  extreme VM load.
 *
 *  started by Ingo Molnar, Copyright (C) 2001
9
 *  debugging by David Rientjes, Copyright (C) 2015
Linus Torvalds's avatar
Linus Torvalds committed
10
11
12
13
 */

#include <linux/mm.h>
#include <linux/slab.h>
14
#include <linux/highmem.h>
15
#include <linux/kasan.h>
16
#include <linux/kmemleak.h>
17
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
18
19
20
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
21
#include "slab.h"
Linus Torvalds's avatar
Linus Torvalds committed
22

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
static void poison_error(mempool_t *pool, void *element, size_t size,
			 size_t byte)
{
	const int nr = pool->curr_nr;
	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
	int i;

	pr_err("BUG: mempool element poison mismatch\n");
	pr_err("Mempool %p size %zu\n", pool, size);
	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
	for (i = start; i < end; i++)
		pr_cont("%x ", *(u8 *)(element + i));
	pr_cont("%s\n", end < size ? "..." : "");
	dump_stack();
}

static void __check_element(mempool_t *pool, void *element, size_t size)
{
	u8 *obj = element;
	size_t i;

	for (i = 0; i < size; i++) {
		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;

		if (obj[i] != exp) {
			poison_error(pool, element, size, i);
			return;
		}
	}
	memset(obj, POISON_INUSE, size);
}

static void check_element(mempool_t *pool, void *element)
{
	/* Mempools backed by slab allocator */
	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
		__check_element(pool, element, ksize(element));

	/* Mempools backed by page allocator */
	if (pool->free == mempool_free_pages) {
		int order = (int)(long)pool->pool_data;
		void *addr = kmap_atomic((struct page *)element);

		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
		kunmap_atomic(addr);
	}
}

static void __poison_element(void *element, size_t size)
{
	u8 *obj = element;

	memset(obj, POISON_FREE, size - 1);
	obj[size - 1] = POISON_END;
}

static void poison_element(mempool_t *pool, void *element)
{
	/* Mempools backed by slab allocator */
	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
		__poison_element(element, ksize(element));

	/* Mempools backed by page allocator */
	if (pool->alloc == mempool_alloc_pages) {
		int order = (int)(long)pool->pool_data;
		void *addr = kmap_atomic((struct page *)element);

		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
		kunmap_atomic(addr);
	}
}
#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
static inline void check_element(mempool_t *pool, void *element)
{
}
static inline void poison_element(mempool_t *pool, void *element)
{
}
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */

105
106
107
108
109
110
111
112
113
114
static void kasan_poison_element(mempool_t *pool, void *element)
{
	if (pool->alloc == mempool_alloc_slab)
		kasan_slab_free(pool->pool_data, element);
	if (pool->alloc == mempool_kmalloc)
		kasan_kfree(element);
	if (pool->alloc == mempool_alloc_pages)
		kasan_free_pages(element, (unsigned long)pool->pool_data);
}

115
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
116
117
{
	if (pool->alloc == mempool_alloc_slab)
118
		kasan_slab_alloc(pool->pool_data, element, flags);
119
	if (pool->alloc == mempool_kmalloc)
120
		kasan_krealloc(element, (size_t)pool->pool_data, flags);
121
122
123
124
	if (pool->alloc == mempool_alloc_pages)
		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}

Linus Torvalds's avatar
Linus Torvalds committed
125
126
127
static void add_element(mempool_t *pool, void *element)
{
	BUG_ON(pool->curr_nr >= pool->min_nr);
128
	poison_element(pool, element);
129
	kasan_poison_element(pool, element);
Linus Torvalds's avatar
Linus Torvalds committed
130
131
132
	pool->elements[pool->curr_nr++] = element;
}

133
static void *remove_element(mempool_t *pool, gfp_t flags)
Linus Torvalds's avatar
Linus Torvalds committed
134
{
135
136
137
	void *element = pool->elements[--pool->curr_nr];

	BUG_ON(pool->curr_nr < 0);
138
	kasan_unpoison_element(pool, element, flags);
139
	check_element(pool, element);
140
	return element;
Linus Torvalds's avatar
Linus Torvalds committed
141
142
}

143
144
145
146
147
148
149
150
151
/**
 * mempool_destroy - deallocate a memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 *
 * Free all reserved elements in @pool and @pool itself.  This function
 * only sleeps if the free_fn() function sleeps.
 */
void mempool_destroy(mempool_t *pool)
Linus Torvalds's avatar
Linus Torvalds committed
152
{
153
154
155
	if (unlikely(!pool))
		return;

Linus Torvalds's avatar
Linus Torvalds committed
156
	while (pool->curr_nr) {
157
		void *element = remove_element(pool, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
158
159
160
161
162
		pool->free(element, pool->pool_data);
	}
	kfree(pool->elements);
	kfree(pool);
}
163
EXPORT_SYMBOL(mempool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
164
165
166
167
168
169
170
171
172
173

/**
 * mempool_create - create a memory pool
 * @min_nr:    the minimum number of elements guaranteed to be
 *             allocated for this pool.
 * @alloc_fn:  user-defined element-allocation function.
 * @free_fn:   user-defined element-freeing function.
 * @pool_data: optional private data available to the user-defined functions.
 *
 * this function creates and allocates a guaranteed size, preallocated
174
 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
Linus Torvalds's avatar
Linus Torvalds committed
175
 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
176
 * functions might sleep - as long as the mempool_alloc() function is not called
Linus Torvalds's avatar
Linus Torvalds committed
177
178
 * from IRQ contexts.
 */
179
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
Linus Torvalds's avatar
Linus Torvalds committed
180
181
				mempool_free_t *free_fn, void *pool_data)
{
182
183
	return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
				   GFP_KERNEL, NUMA_NO_NODE);
184
185
}
EXPORT_SYMBOL(mempool_create);
Linus Torvalds's avatar
Linus Torvalds committed
186

187
mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
188
189
			       mempool_free_t *free_fn, void *pool_data,
			       gfp_t gfp_mask, int node_id)
190
191
{
	mempool_t *pool;
192
	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
Linus Torvalds's avatar
Linus Torvalds committed
193
194
	if (!pool)
		return NULL;
195
	pool->elements = kmalloc_node(min_nr * sizeof(void *),
196
				      gfp_mask, node_id);
Linus Torvalds's avatar
Linus Torvalds committed
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
	if (!pool->elements) {
		kfree(pool);
		return NULL;
	}
	spin_lock_init(&pool->lock);
	pool->min_nr = min_nr;
	pool->pool_data = pool_data;
	init_waitqueue_head(&pool->wait);
	pool->alloc = alloc_fn;
	pool->free = free_fn;

	/*
	 * First pre-allocate the guaranteed number of buffers.
	 */
	while (pool->curr_nr < pool->min_nr) {
		void *element;

214
		element = pool->alloc(gfp_mask, pool->pool_data);
Linus Torvalds's avatar
Linus Torvalds committed
215
		if (unlikely(!element)) {
216
			mempool_destroy(pool);
Linus Torvalds's avatar
Linus Torvalds committed
217
218
219
220
221
222
			return NULL;
		}
		add_element(pool, element);
	}
	return pool;
}
223
EXPORT_SYMBOL(mempool_create_node);
Linus Torvalds's avatar
Linus Torvalds committed
224
225
226
227
228
229
230
231
232
233
234

/**
 * mempool_resize - resize an existing memory pool
 * @pool:       pointer to the memory pool which was allocated via
 *              mempool_create().
 * @new_min_nr: the new minimum number of elements guaranteed to be
 *              allocated for this pool.
 *
 * This function shrinks/grows the pool. In the case of growing,
 * it cannot be guaranteed that the pool will be grown to the new
 * size immediately, but new mempool_free() calls will refill it.
235
 * This function may sleep.
Linus Torvalds's avatar
Linus Torvalds committed
236
237
238
239
240
 *
 * Note, the caller must guarantee that no mempool_destroy is called
 * while this function is running. mempool_alloc() & mempool_free()
 * might be called (eg. from IRQ contexts) while this function executes.
 */
241
int mempool_resize(mempool_t *pool, int new_min_nr)
Linus Torvalds's avatar
Linus Torvalds committed
242
243
244
245
246
247
{
	void *element;
	void **new_elements;
	unsigned long flags;

	BUG_ON(new_min_nr <= 0);
248
	might_sleep();
Linus Torvalds's avatar
Linus Torvalds committed
249
250
251
252

	spin_lock_irqsave(&pool->lock, flags);
	if (new_min_nr <= pool->min_nr) {
		while (new_min_nr < pool->curr_nr) {
253
			element = remove_element(pool, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
254
255
256
257
258
259
260
261
262
263
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);
			spin_lock_irqsave(&pool->lock, flags);
		}
		pool->min_nr = new_min_nr;
		goto out_unlock;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* Grow the pool */
264
265
	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
				     GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
	if (!new_elements)
		return -ENOMEM;

	spin_lock_irqsave(&pool->lock, flags);
	if (unlikely(new_min_nr <= pool->min_nr)) {
		/* Raced, other resize will do our work */
		spin_unlock_irqrestore(&pool->lock, flags);
		kfree(new_elements);
		goto out;
	}
	memcpy(new_elements, pool->elements,
			pool->curr_nr * sizeof(*new_elements));
	kfree(pool->elements);
	pool->elements = new_elements;
	pool->min_nr = new_min_nr;

	while (pool->curr_nr < pool->min_nr) {
		spin_unlock_irqrestore(&pool->lock, flags);
284
		element = pool->alloc(GFP_KERNEL, pool->pool_data);
Linus Torvalds's avatar
Linus Torvalds committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
		if (!element)
			goto out;
		spin_lock_irqsave(&pool->lock, flags);
		if (pool->curr_nr < pool->min_nr) {
			add_element(pool, element);
		} else {
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);	/* Raced */
			goto out;
		}
	}
out_unlock:
	spin_unlock_irqrestore(&pool->lock, flags);
out:
	return 0;
}
EXPORT_SYMBOL(mempool_resize);

/**
 * mempool_alloc - allocate an element from a specific memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 * @gfp_mask:  the usual allocation bitmask.
 *
309
 * this function only sleeps if the alloc_fn() function sleeps or
Linus Torvalds's avatar
Linus Torvalds committed
310
311
312
 * returns NULL. Note that due to preallocation, this function
 * *never* fails when called from process contexts. (it might
 * fail if called from an IRQ context.)
313
 * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported.
Linus Torvalds's avatar
Linus Torvalds committed
314
 */
315
void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
316
317
318
{
	void *element;
	unsigned long flags;
319
	wait_queue_t wait;
Al Viro's avatar
Al Viro committed
320
	gfp_t gfp_temp;
Nick Piggin's avatar
Nick Piggin committed
321

322
323
324
	/* If oom killed, memory reserves are essential to prevent livelock */
	VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC);
	/* No element size to zero on allocation */
325
	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
326

327
	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
328
329
330

	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
Linus Torvalds's avatar
Linus Torvalds committed
331

332
	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
Nick Piggin's avatar
Nick Piggin committed
333

Linus Torvalds's avatar
Linus Torvalds committed
334
repeat_alloc:
335
336
337
338
339
340
341
342
	if (likely(pool->curr_nr)) {
		/*
		 * Don't allocate from emergency reserves if there are
		 * elements available.  This check is racy, but it will
		 * be rechecked each loop.
		 */
		gfp_temp |= __GFP_NOMEMALLOC;
	}
Nick Piggin's avatar
Nick Piggin committed
343
344

	element = pool->alloc(gfp_temp, pool->pool_data);
Linus Torvalds's avatar
Linus Torvalds committed
345
346
347
348
349
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
350
		element = remove_element(pool, gfp_temp);
Linus Torvalds's avatar
Linus Torvalds committed
351
		spin_unlock_irqrestore(&pool->lock, flags);
352
353
		/* paired with rmb in mempool_free(), read comment there */
		smp_wmb();
354
355
356
357
358
		/*
		 * Update the allocation stack trace as this is more useful
		 * for debugging.
		 */
		kmemleak_update_trace(element);
Linus Torvalds's avatar
Linus Torvalds committed
359
360
361
		return element;
	}

362
	/*
363
	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
364
365
	 * alloc failed with that and @pool was empty, retry immediately.
	 */
366
	if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) {
367
368
369
370
		spin_unlock_irqrestore(&pool->lock, flags);
		gfp_temp = gfp_mask;
		goto repeat_alloc;
	}
371
	gfp_temp = gfp_mask;
372

373
374
	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
375
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
376
		return NULL;
377
	}
Linus Torvalds's avatar
Linus Torvalds committed
378

379
	/* Let's wait for someone else to return an element to @pool */
380
	init_wait(&wait);
Linus Torvalds's avatar
Linus Torvalds committed
381
382
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);

383
384
385
386
387
388
389
390
391
	spin_unlock_irqrestore(&pool->lock, flags);

	/*
	 * FIXME: this should be io_schedule().  The timeout is there as a
	 * workaround for some DM problems in 2.6.18.
	 */
	io_schedule_timeout(5*HZ);

	finish_wait(&pool->wait, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
	goto repeat_alloc;
}
EXPORT_SYMBOL(mempool_alloc);

/**
 * mempool_free - return an element to the pool.
 * @element:   pool element pointer.
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 *
 * this function only sleeps if the free_fn() function sleeps.
 */
void mempool_free(void *element, mempool_t *pool)
{
	unsigned long flags;

Rusty Russell's avatar
Rusty Russell committed
408
409
410
	if (unlikely(element == NULL))
		return;

411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
	/*
	 * Paired with the wmb in mempool_alloc().  The preceding read is
	 * for @element and the following @pool->curr_nr.  This ensures
	 * that the visible value of @pool->curr_nr is from after the
	 * allocation of @element.  This is necessary for fringe cases
	 * where @element was passed to this task without going through
	 * barriers.
	 *
	 * For example, assume @p is %NULL at the beginning and one task
	 * performs "p = mempool_alloc(...);" while another task is doing
	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
	 * may end up using curr_nr value which is from before allocation
	 * of @p without the following rmb.
	 */
	smp_rmb();

	/*
	 * For correctness, we need a test which is guaranteed to trigger
	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
	 * without locking achieves that and refilling as soon as possible
	 * is desirable.
	 *
	 * Because curr_nr visible here is always a value after the
	 * allocation of @element, any task which decremented curr_nr below
	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
	 * incremented to min_nr afterwards.  If curr_nr gets incremented
	 * to min_nr after the allocation of @element, the elements
	 * allocated after that are subject to the same guarantee.
	 *
	 * Waiters happen iff curr_nr is 0 and the above guarantee also
	 * ensures that there will be frees which return elements to the
	 * pool waking up the waiters.
	 */
444
	if (unlikely(pool->curr_nr < pool->min_nr)) {
Linus Torvalds's avatar
Linus Torvalds committed
445
		spin_lock_irqsave(&pool->lock, flags);
446
		if (likely(pool->curr_nr < pool->min_nr)) {
Linus Torvalds's avatar
Linus Torvalds committed
447
448
449
450
451
452
453
454
455
456
457
458
459
460
			add_element(pool, element);
			spin_unlock_irqrestore(&pool->lock, flags);
			wake_up(&pool->wait);
			return;
		}
		spin_unlock_irqrestore(&pool->lock, flags);
	}
	pool->free(element, pool->pool_data);
}
EXPORT_SYMBOL(mempool_free);

/*
 * A commonly used alloc and free fn.
 */
461
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
Linus Torvalds's avatar
Linus Torvalds committed
462
{
463
	struct kmem_cache *mem = pool_data;
464
	VM_BUG_ON(mem->ctor);
Linus Torvalds's avatar
Linus Torvalds committed
465
466
467
468
469
470
	return kmem_cache_alloc(mem, gfp_mask);
}
EXPORT_SYMBOL(mempool_alloc_slab);

void mempool_free_slab(void *element, void *pool_data)
{
471
	struct kmem_cache *mem = pool_data;
Linus Torvalds's avatar
Linus Torvalds committed
472
473
474
	kmem_cache_free(mem, element);
}
EXPORT_SYMBOL(mempool_free_slab);
475

476
477
/*
 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
Simon Arlott's avatar
Simon Arlott committed
478
 * specified by pool_data
479
480
481
 */
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
Figo.zhang's avatar
Figo.zhang committed
482
	size_t size = (size_t)pool_data;
483
484
485
486
487
488
489
490
491
492
	return kmalloc(size, gfp_mask);
}
EXPORT_SYMBOL(mempool_kmalloc);

void mempool_kfree(void *element, void *pool_data)
{
	kfree(element);
}
EXPORT_SYMBOL(mempool_kfree);

493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
/*
 * A simple mempool-backed page allocator that allocates pages
 * of the order specified by pool_data.
 */
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
{
	int order = (int)(long)pool_data;
	return alloc_pages(gfp_mask, order);
}
EXPORT_SYMBOL(mempool_alloc_pages);

void mempool_free_pages(void *element, void *pool_data)
{
	int order = (int)(long)pool_data;
	__free_pages(element, order);
}
EXPORT_SYMBOL(mempool_free_pages);