slab.c 108 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
29
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
102
103
104
105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119

120
121
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
122
123
124
125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126
127
#include <trace/events/kmem.h>

128
129
#include	"internal.h"

130
131
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157
158
159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160
161
162
163
164
165
166
167
168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

Linus Torvalds's avatar
Linus Torvalds committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
188
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
189
190
191
192
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
Linus Torvalds's avatar
Linus Torvalds committed
193
194
};

Joonsoo Kim's avatar
Joonsoo Kim committed
195
196
197
198
199
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

200
201
202
/*
 * Need this for bootstrapping a per node allocator.
 */
203
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
204
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
205
#define	CACHE_CACHE 0
206
#define	SIZE_NODE (MAX_NUMNODES)
207

208
static int drain_freelist(struct kmem_cache *cache,
209
			struct kmem_cache_node *n, int tofree);
210
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
211
212
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
213
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
214
static void cache_reap(struct work_struct *unused);
215

216
217
218
219
220
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
221
222
static int slab_early_init = 1;

223
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
224

225
static void kmem_cache_node_init(struct kmem_cache_node *parent)
226
227
228
229
230
231
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
232
	parent->colour_next = 0;
233
234
235
236
237
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
238
239
240
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
241
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
242
243
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
244
245
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
246
247
248
249
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
250

251
#define CFLGS_OBJFREELIST_SLAB	(0x40000000UL)
Linus Torvalds's avatar
Linus Torvalds committed
252
#define CFLGS_OFF_SLAB		(0x80000000UL)
253
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
Linus Torvalds's avatar
Linus Torvalds committed
254
255
256
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
257
258
259
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
260
 *
Adrian Bunk's avatar
Adrian Bunk committed
261
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
262
263
 * which could lock up otherwise freeable slabs.
 */
264
265
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
266
267
268
269
270
271

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
272
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
273
274
275
276
277
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
278
279
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
280
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
281
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
282
283
284
285
286
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
287
288
289
290
291
292
293
294
295
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
296
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
297
298
299
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
300
#define	STATS_INC_NODEFREES(x)	do { } while (0)
301
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
302
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
303
304
305
306
307
308
309
310
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
311
312
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
313
 * 0		: objp
314
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
315
316
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
317
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
318
 * 		redzone word.
319
 * cachep->obj_offset: The real object.
320
321
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
322
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
323
 */
324
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
325
{
326
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
327
328
}

329
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
330
331
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
332
333
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
334
335
}

336
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
337
338
339
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
340
		return (unsigned long long *)(objp + cachep->size -
341
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
342
					      REDZONE_ALIGN);
343
	return (unsigned long long *) (objp + cachep->size -
344
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
345
346
}

347
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
348
349
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
350
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
351
352
353
354
}

#else

355
#define obj_offset(x)			0
356
357
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
358
359
360
361
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

362
363
#ifdef CONFIG_DEBUG_SLAB_LEAK

364
static inline bool is_store_user_clean(struct kmem_cache *cachep)
365
{
366
367
	return atomic_read(&cachep->store_user_clean) == 1;
}
368

369
370
371
372
static inline void set_store_user_clean(struct kmem_cache *cachep)
{
	atomic_set(&cachep->store_user_clean, 1);
}
373

374
375
376
377
static inline void set_store_user_dirty(struct kmem_cache *cachep)
{
	if (is_store_user_clean(cachep))
		atomic_set(&cachep->store_user_clean, 0);
378
379
380
}

#else
381
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
382
383
384

#endif

Linus Torvalds's avatar
Linus Torvalds committed
385
/*
386
387
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
388
 */
389
390
391
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
392
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
393

394
395
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
396
	struct page *page = virt_to_head_page(obj);
397
	return page->slab_cache;
398
399
}

400
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
401
402
				 unsigned int idx)
{
403
	return page->s_mem + cache->size * idx;
404
405
}

406
/*
407
408
409
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
410
411
412
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
413
					const struct page *page, void *obj)
414
{
415
	u32 offset = (obj - page->s_mem);
416
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
417
418
}

419
#define BOOT_CPUCACHE_ENTRIES	1
Linus Torvalds's avatar
Linus Torvalds committed
420
/* internal cache of cache description objs */
421
static struct kmem_cache kmem_cache_boot = {
422
423
424
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
425
	.size = sizeof(struct kmem_cache),
426
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
427
428
};

429
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
430

431
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
432
{
433
	return this_cpu_ptr(cachep->cpu_cache);
Linus Torvalds's avatar
Linus Torvalds committed
434
435
}

Andrew Morton's avatar
Andrew Morton committed
436
437
438
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
439
440
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
		unsigned long flags, size_t *left_over)
441
{
442
	unsigned int num;
443
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
444

445
446
447
448
449
450
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
451
452
453
454
455
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
456
457
458
459
460
461
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
462
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
463
		num = slab_size / buffer_size;
464
		*left_over = slab_size % buffer_size;
465
	} else {
466
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
467
468
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
469
	}
470
471

	return num;
Linus Torvalds's avatar
Linus Torvalds committed
472
473
}

474
#if DEBUG
475
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
476

Andrew Morton's avatar
Andrew Morton committed
477
478
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
479
{
480
	pr_err("slab error in %s(): cache `%s': %s\n",
481
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
482
	dump_stack();
483
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
484
}
485
#endif
Linus Torvalds's avatar
Linus Torvalds committed
486

487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

503
504
505
506
507
508
509
510
511
512
513
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

514
515
516
517
518
519
520
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
521
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
522
523
524
525
526

static void init_reap_node(int cpu)
{
	int node;

527
	node = next_node(cpu_to_mem(cpu), node_online_map);
528
	if (node == MAX_NUMNODES)
529
		node = first_node(node_online_map);
530

531
	per_cpu(slab_reap_node, cpu) = node;
532
533
534
535
}

static void next_reap_node(void)
{
536
	int node = __this_cpu_read(slab_reap_node);
537
538
539
540

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
541
	__this_cpu_write(slab_reap_node, node);
542
543
544
545
546
547
548
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
549
550
551
552
553
554
555
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
556
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
557
{
558
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
559
560
561
562
563
564

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
565
	if (keventd_up() && reap_work->work.func == NULL) {
566
		init_reap_node(cpu);
567
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
568
569
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
570
571
572
	}
}

573
static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds's avatar
Linus Torvalds committed
574
{
575
576
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
577
	 * However, when such objects are allocated or transferred to another
578
579
580
581
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
582
583
584
585
586
587
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
Linus Torvalds's avatar
Linus Torvalds committed
588
	}
589
590
591
592
593
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
594
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
595
596
597
598
599
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
Linus Torvalds's avatar
Linus Torvalds committed
600
601
}

602
603
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
604
{
605
606
607
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
608

609
610
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
611

612
613
614
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
615

616
	slabs_destroy(cachep, &list);
617
618
}

619
620
621
622
623
624
625
626
627
628
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
629
	int nr = min3(from->avail, max, to->limit - to->avail);
630
631
632
633
634
635
636
637
638
639
640
641

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

642
643
644
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
645
#define reap_alien(cachep, n) do { } while (0)
646

Joonsoo Kim's avatar
Joonsoo Kim committed
647
648
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
649
{
650
	return NULL;
651
652
}

Joonsoo Kim's avatar
Joonsoo Kim committed
653
static inline void free_alien_cache(struct alien_cache **ac_ptr)
654
655
656
657
658
659
660
661
662
663
664
665
666
667
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

668
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
669
670
671
672
673
		 gfp_t flags, int nodeid)
{
	return NULL;
}

David Rientjes's avatar
David Rientjes committed
674
675
static inline gfp_t gfp_exact_node(gfp_t flags)
{
676
	return flags & ~__GFP_NOFAIL;
David Rientjes's avatar
David Rientjes committed
677
678
}

679
680
#else	/* CONFIG_NUMA */

681
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
682
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
683

Joonsoo Kim's avatar
Joonsoo Kim committed
684
685
686
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
687
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
Joonsoo Kim's avatar
Joonsoo Kim committed
688
689
690
691
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
692
	spin_lock_init(&alc->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
693
694
695
696
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
697
{
Joonsoo Kim's avatar
Joonsoo Kim committed
698
	struct alien_cache **alc_ptr;
699
	size_t memsize = sizeof(void *) * nr_node_ids;
700
701
702
703
	int i;

	if (limit > 1)
		limit = 12;
Joonsoo Kim's avatar
Joonsoo Kim committed
704
705
706
707
708
709
710
711
712
713
714
715
716
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
717
718
		}
	}
Joonsoo Kim's avatar
Joonsoo Kim committed
719
	return alc_ptr;
720
721
}

Joonsoo Kim's avatar
Joonsoo Kim committed
722
static void free_alien_cache(struct alien_cache **alc_ptr)
723
724
725
{
	int i;

Joonsoo Kim's avatar
Joonsoo Kim committed
726
	if (!alc_ptr)
727
728
		return;
	for_each_node(i)
Joonsoo Kim's avatar
Joonsoo Kim committed
729
730
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
731
732
}

733
static void __drain_alien_cache(struct kmem_cache *cachep,
734
735
				struct array_cache *ac, int node,
				struct list_head *list)
736
{
737
	struct kmem_cache_node *n = get_node(cachep, node);
738
739

	if (ac->avail) {
740
		spin_lock(&n->list_lock);
741
742
743
744
745
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
746
747
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
748

749
		free_block(cachep, ac->entry, ac->avail, node, list);
750
		ac->avail = 0;
751
		spin_unlock(&n->list_lock);
752
753
754
	}
}

755
756
757
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
758
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
759
{
760
	int node = __this_cpu_read(slab_reap_node);
761

762
	if (n->alien) {
Joonsoo Kim's avatar
Joonsoo Kim committed
763
764
765
766
767
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
768
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
769
770
771
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
772
				spin_unlock_irq(&alc->lock);
773
				slabs_destroy(cachep, &list);
Joonsoo Kim's avatar
Joonsoo Kim committed
774
			}
775
776
777
778
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
779
static void drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kim's avatar
Joonsoo Kim committed
780
				struct alien_cache **alien)
781
{
782
	int i = 0;
Joonsoo Kim's avatar
Joonsoo Kim committed
783
	struct alien_cache *alc;
784
785
786
787
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
Joonsoo Kim's avatar
Joonsoo Kim committed
788
789
		alc = alien[i];
		if (alc) {
790
791
			LIST_HEAD(list);

Joonsoo Kim's avatar
Joonsoo Kim committed
792
			ac = &alc->ac;
793
			spin_lock_irqsave(&alc->lock, flags);
794
			__drain_alien_cache(cachep, ac, i, &list);
795
			spin_unlock_irqrestore(&alc->lock, flags);
796
			slabs_destroy(cachep, &list);
797
798
799
		}
	}
}
800

801
802
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
803
{
804
	struct kmem_cache_node *n;
Joonsoo Kim's avatar
Joonsoo Kim committed
805
806
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
807
	LIST_HEAD(list);
808

809
	n = get_node(cachep, node);
810
	STATS_INC_NODEFREES(cachep);
811
812
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
Joonsoo Kim's avatar
Joonsoo Kim committed
813
		ac = &alien->ac;
814
		spin_lock(&alien->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
815
		if (unlikely(ac->avail == ac->limit)) {
816
			STATS_INC_ACOVERFLOW(cachep);
817
			__drain_alien_cache(cachep, ac, page_node, &list);
818
		}
819
		ac->entry[ac->avail++] = objp;
820
		spin_unlock(&alien->lock);
821
		slabs_destroy(cachep, &list);
822
	} else {
823
		n = get_node(cachep, page_node);
824
		spin_lock(&n->list_lock);
825
		free_block(cachep, &objp, 1, page_node, &list);
826
		spin_unlock(&n->list_lock);
827
		slabs_destroy(cachep, &list);
828
829
830
	}
	return 1;
}
831
832
833
834
835
836
837
838
839
840
841
842
843
844

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
David Rientjes's avatar
David Rientjes committed
845
846

/*
847
848
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
David Rientjes's avatar
David Rientjes committed
849
850
851
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
852
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
David Rientjes's avatar
David Rientjes committed
853
}
854
855
#endif

856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

896
/*
897
 * Allocates and initializes node for a node on each slab cache, used for
898
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
899
 * will be allocated off-node since memory is not yet online for the new node.
900
 * When hotplugging memory or a cpu, existing node are not replaced if
901
902
 * already in use.
 *
903
 * Must hold slab_mutex.
904
 */
905
static int init_cache_node_node(int node)
906
{
907
	int ret;
908
909
	struct kmem_cache *cachep;

910
	list_for_each_entry(cachep, &slab_caches, list) {
911
912
913
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
914
	}
915

916
917
918
	return 0;
}

919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

976
static void cpuup_canceled(long cpu)
977
978
{
	struct kmem_cache *cachep;
979
	struct kmem_cache_node *n = NULL;
980
	int node = cpu_to_mem(cpu);
981
	const struct cpumask *mask = cpumask_of_node(node);
982

983
	list_for_each_entry(cachep, &slab_caches, list) {
984
985
		struct array_cache *nc;
		struct array_cache *shared;
Joonsoo Kim's avatar
Joonsoo Kim committed
986
		struct alien_cache **alien;
987
		LIST_HEAD(list);
988

989
		n = get_node(cachep, node);
990
		if (!n)
991
			continue;
992

993
		spin_lock_irq(&n->list_lock);
994

995
996
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
997
998
999
1000

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
		if (nc) {
1001
			free_block(cachep, nc->entry, nc->avail, node, &list);
1002
1003
			nc->avail = 0;
		}
1004

1005
		if (!cpumask_empty(mask)) {
1006
			spin_unlock_irq(&n->list_lock);
1007
			goto free_slab;
1008
1009
		}

1010
		shared = n->shared;
1011
1012
		if (shared) {
			free_block(cachep, shared->entry,
1013
				   shared->avail, node, &list);
1014
			n->shared = NULL;
1015
1016
		}

1017
1018
		alien = n->alien;
		n->alien = NULL;
1019

1020
		spin_unlock_irq(&n->list_lock);
1021
1022
1023
1024
1025
1026

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
1027
1028

free_slab:
1029
		slabs_destroy(cachep, &list);
1030
1031
1032
1033
1034
1035
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1036
	list_for_each_entry(cachep, &slab_caches, list) {
1037
		n = get_node(cachep, node);
1038
		if (!n)
1039
			continue;
1040
		drain_freelist(cachep, n, INT_MAX);
1041
1042
1043
	}
}

1044
static int cpuup_prepare(long cpu)
Linus Torvalds's avatar
Linus Torvalds committed
1045
{
1046
	struct kmem_cache *cachep;
1047
	int node = cpu_to_mem(cpu);
1048
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
1049

1050
1051
1052
1053
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1054
	 * kmem_cache_node and not this cpu's kmem_cache_node
1055
	 */
1056
	err = init_cache_node_node(node);
1057
1058
	if (err < 0)
		goto bad;
1059
1060
1061
1062
1063

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1064
	list_for_each_entry(cachep, &slab_caches, list) {
1065
1066
1067
		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
		if (err)
			goto bad;
1068
	}
1069

1070
1071
	return 0;
bad: