slab.c 111 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
29
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
102
103
104
105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119

120
121
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
122
123
124
125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126
127
#include <trace/events/kmem.h>

128
129
#include	"internal.h"

130
131
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157
158
159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160
161
162
163
164
165
166
167
168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

Linus Torvalds's avatar
Linus Torvalds committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
188
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
189
190
191
192
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
Linus Torvalds's avatar
Linus Torvalds committed
193
194
};

Joonsoo Kim's avatar
Joonsoo Kim committed
195
196
197
198
199
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

200
201
202
/*
 * Need this for bootstrapping a per node allocator.
 */
203
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
204
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
205
#define	CACHE_CACHE 0
206
#define	SIZE_NODE (MAX_NUMNODES)
207

208
static int drain_freelist(struct kmem_cache *cache,
209
			struct kmem_cache_node *n, int tofree);
210
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
211
212
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
213
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
214
static void cache_reap(struct work_struct *unused);
215

216
217
218
219
220
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
221
222
static int slab_early_init = 1;

223
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
224

225
static void kmem_cache_node_init(struct kmem_cache_node *parent)
226
227
228
229
230
231
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
232
	parent->colour_next = 0;
233
234
235
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
236
	parent->num_slabs = 0;
237
238
}

Andrew Morton's avatar
Andrew Morton committed
239
240
241
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
242
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
243
244
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
245
246
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
247
248
249
250
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
251

252
#define CFLGS_OBJFREELIST_SLAB	(0x40000000UL)
Linus Torvalds's avatar
Linus Torvalds committed
253
#define CFLGS_OFF_SLAB		(0x80000000UL)
254
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
Linus Torvalds's avatar
Linus Torvalds committed
255
256
257
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
258
259
260
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
261
 *
Adrian Bunk's avatar
Adrian Bunk committed
262
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
263
264
 * which could lock up otherwise freeable slabs.
 */
265
266
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
267
268
269
270
271
272

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
273
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
274
275
276
277
278
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
279
280
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
281
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
282
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
283
284
285
286
287
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
288
289
290
291
292
293
294
295
296
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
297
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
298
299
300
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
301
#define	STATS_INC_NODEFREES(x)	do { } while (0)
302
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
303
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
304
305
306
307
308
309
310
311
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
312
313
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
314
 * 0		: objp
315
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
316
317
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
318
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
319
 * 		redzone word.
320
 * cachep->obj_offset: The real object.
321
322
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
323
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
324
 */
325
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
326
{
327
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
328
329
}

330
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
331
332
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
333
334
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
335
336
}

337
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
338
339
340
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
341
		return (unsigned long long *)(objp + cachep->size -
342
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
343
					      REDZONE_ALIGN);
344
	return (unsigned long long *) (objp + cachep->size -
345
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
346
347
}

348
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
349
350
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
351
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
352
353
354
355
}

#else

356
#define obj_offset(x)			0
357
358
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
359
360
361
362
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

363
364
#ifdef CONFIG_DEBUG_SLAB_LEAK

365
static inline bool is_store_user_clean(struct kmem_cache *cachep)
366
{
367
368
	return atomic_read(&cachep->store_user_clean) == 1;
}
369

370
371
372
373
static inline void set_store_user_clean(struct kmem_cache *cachep)
{
	atomic_set(&cachep->store_user_clean, 1);
}
374

375
376
377
378
static inline void set_store_user_dirty(struct kmem_cache *cachep)
{
	if (is_store_user_clean(cachep))
		atomic_set(&cachep->store_user_clean, 0);
379
380
381
}

#else
382
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
383
384
385

#endif

Linus Torvalds's avatar
Linus Torvalds committed
386
/*
387
388
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
389
 */
390
391
392
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
393
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
394

395
396
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
397
	struct page *page = virt_to_head_page(obj);
398
	return page->slab_cache;
399
400
}

401
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
402
403
				 unsigned int idx)
{
404
	return page->s_mem + cache->size * idx;
405
406
}

407
/*
408
409
410
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
411
412
413
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
414
					const struct page *page, void *obj)
415
{
416
	u32 offset = (obj - page->s_mem);
417
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
418
419
}

420
#define BOOT_CPUCACHE_ENTRIES	1
Linus Torvalds's avatar
Linus Torvalds committed
421
/* internal cache of cache description objs */
422
static struct kmem_cache kmem_cache_boot = {
423
424
425
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
426
	.size = sizeof(struct kmem_cache),
427
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
428
429
};

430
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
431

432
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
433
{
434
	return this_cpu_ptr(cachep->cpu_cache);
Linus Torvalds's avatar
Linus Torvalds committed
435
436
}

Andrew Morton's avatar
Andrew Morton committed
437
438
439
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
440
441
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
		unsigned long flags, size_t *left_over)
442
{
443
	unsigned int num;
444
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
445

446
447
448
449
450
451
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
452
453
454
455
456
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
457
458
459
460
461
462
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
463
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
464
		num = slab_size / buffer_size;
465
		*left_over = slab_size % buffer_size;
466
	} else {
467
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
468
469
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
470
	}
471
472

	return num;
Linus Torvalds's avatar
Linus Torvalds committed
473
474
}

475
#if DEBUG
476
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
477

Andrew Morton's avatar
Andrew Morton committed
478
479
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
480
{
481
	pr_err("slab error in %s(): cache `%s': %s\n",
482
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
483
	dump_stack();
484
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
485
}
486
#endif
Linus Torvalds's avatar
Linus Torvalds committed
487

488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

504
505
506
507
508
509
510
511
512
513
514
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

515
516
517
518
519
520
521
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
522
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
523
524
525

static void init_reap_node(int cpu)
{
526
527
	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
						    node_online_map);
528
529
530
531
}

static void next_reap_node(void)
{
532
	int node = __this_cpu_read(slab_reap_node);
533

534
	node = next_node_in(node, node_online_map);
535
	__this_cpu_write(slab_reap_node, node);
536
537
538
539
540
541
542
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
543
544
545
546
547
548
549
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
550
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
551
{
552
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
553
554
555
556
557
558

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
559
	if (keventd_up() && reap_work->work.func == NULL) {
560
		init_reap_node(cpu);
561
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
562
563
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
564
565
566
	}
}

567
static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds's avatar
Linus Torvalds committed
568
{
569
570
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
571
	 * However, when such objects are allocated or transferred to another
572
573
574
575
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
576
577
578
579
580
581
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
Linus Torvalds's avatar
Linus Torvalds committed
582
	}
583
584
585
586
587
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
588
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
589
590
591
592
593
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
Linus Torvalds's avatar
Linus Torvalds committed
594
595
}

596
597
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
598
{
599
600
601
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
602

603
604
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
605

606
607
608
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
609

610
	slabs_destroy(cachep, &list);
611
612
}

613
614
615
616
617
618
619
620
621
622
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
623
	int nr = min3(from->avail, max, to->limit - to->avail);
624
625
626
627
628
629
630
631
632
633
634
635

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

636
637
638
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
639
#define reap_alien(cachep, n) do { } while (0)
640

Joonsoo Kim's avatar
Joonsoo Kim committed
641
642
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
643
{
644
	return NULL;
645
646
}

Joonsoo Kim's avatar
Joonsoo Kim committed
647
static inline void free_alien_cache(struct alien_cache **ac_ptr)
648
649
650
651
652
653
654
655
656
657
658
659
660
661
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

662
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
663
664
665
666
667
		 gfp_t flags, int nodeid)
{
	return NULL;
}

David Rientjes's avatar
David Rientjes committed
668
669
static inline gfp_t gfp_exact_node(gfp_t flags)
{
670
	return flags & ~__GFP_NOFAIL;
David Rientjes's avatar
David Rientjes committed
671
672
}

673
674
#else	/* CONFIG_NUMA */

675
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
676
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
677

Joonsoo Kim's avatar
Joonsoo Kim committed
678
679
680
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
681
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
Joonsoo Kim's avatar
Joonsoo Kim committed
682
683
684
685
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
686
	spin_lock_init(&alc->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
687
688
689
690
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
691
{
Joonsoo Kim's avatar
Joonsoo Kim committed
692
	struct alien_cache **alc_ptr;
693
	size_t memsize = sizeof(void *) * nr_node_ids;
694
695
696
697
	int i;

	if (limit > 1)
		limit = 12;
Joonsoo Kim's avatar
Joonsoo Kim committed
698
699
700
701
702
703
704
705
706
707
708
709
710
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
711
712
		}
	}
Joonsoo Kim's avatar
Joonsoo Kim committed
713
	return alc_ptr;
714
715
}

Joonsoo Kim's avatar
Joonsoo Kim committed
716
static void free_alien_cache(struct alien_cache **alc_ptr)
717
718
719
{
	int i;

Joonsoo Kim's avatar
Joonsoo Kim committed
720
	if (!alc_ptr)
721
722
		return;
	for_each_node(i)
Joonsoo Kim's avatar
Joonsoo Kim committed
723
724
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
725
726
}

727
static void __drain_alien_cache(struct kmem_cache *cachep,
728
729
				struct array_cache *ac, int node,
				struct list_head *list)
730
{
731
	struct kmem_cache_node *n = get_node(cachep, node);
732
733

	if (ac->avail) {
734
		spin_lock(&n->list_lock);
735
736
737
738
739
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
740
741
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
742

743
		free_block(cachep, ac->entry, ac->avail, node, list);
744
		ac->avail = 0;
745
		spin_unlock(&n->list_lock);
746
747
748
	}
}

749
750
751
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
752
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
753
{
754
	int node = __this_cpu_read(slab_reap_node);
755

756
	if (n->alien) {
Joonsoo Kim's avatar
Joonsoo Kim committed
757
758
759
760
761
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
762
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
763
764
765
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
766
				spin_unlock_irq(&alc->lock);
767
				slabs_destroy(cachep, &list);
Joonsoo Kim's avatar
Joonsoo Kim committed
768
			}
769
770
771
772
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
773
static void drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kim's avatar
Joonsoo Kim committed
774
				struct alien_cache **alien)
775
{
776
	int i = 0;
Joonsoo Kim's avatar
Joonsoo Kim committed
777
	struct alien_cache *alc;
778
779
780
781
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
Joonsoo Kim's avatar
Joonsoo Kim committed
782
783
		alc = alien[i];
		if (alc) {
784
785
			LIST_HEAD(list);

Joonsoo Kim's avatar
Joonsoo Kim committed
786
			ac = &alc->ac;
787
			spin_lock_irqsave(&alc->lock, flags);
788
			__drain_alien_cache(cachep, ac, i, &list);
789
			spin_unlock_irqrestore(&alc->lock, flags);
790
			slabs_destroy(cachep, &list);
791
792
793
		}
	}
}
794

795
796
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
797
{
798
	struct kmem_cache_node *n;
Joonsoo Kim's avatar
Joonsoo Kim committed
799
800
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
801
	LIST_HEAD(list);
802

803
	n = get_node(cachep, node);
804
	STATS_INC_NODEFREES(cachep);
805
806
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
Joonsoo Kim's avatar
Joonsoo Kim committed
807
		ac = &alien->ac;
808
		spin_lock(&alien->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
809
		if (unlikely(ac->avail == ac->limit)) {
810
			STATS_INC_ACOVERFLOW(cachep);
811
			__drain_alien_cache(cachep, ac, page_node, &list);
812
		}
813
		ac->entry[ac->avail++] = objp;
814
		spin_unlock(&alien->lock);
815
		slabs_destroy(cachep, &list);
816
	} else {
817
		n = get_node(cachep, page_node);
818
		spin_lock(&n->list_lock);
819
		free_block(cachep, &objp, 1, page_node, &list);
820
		spin_unlock(&n->list_lock);
821
		slabs_destroy(cachep, &list);
822
823
824
	}
	return 1;
}
825
826
827
828
829
830
831
832
833
834
835
836
837
838

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
David Rientjes's avatar
David Rientjes committed
839
840

/*
841
842
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
David Rientjes's avatar
David Rientjes committed
843
844
845
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
846
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
David Rientjes's avatar
David Rientjes committed
847
}
848
849
#endif

850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

890
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
891
/*
892
 * Allocates and initializes node for a node on each slab cache, used for
893
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
894
 * will be allocated off-node since memory is not yet online for the new node.
895
 * When hotplugging memory or a cpu, existing node are not replaced if
896
897
 * already in use.
 *
898
 * Must hold slab_mutex.
899
 */
900
static int init_cache_node_node(int node)
901
{
902
	int ret;
903
904
	struct kmem_cache *cachep;

905
	list_for_each_entry(cachep, &slab_caches, list) {
906
907
908
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
909
	}
910

911
912
	return 0;
}
913
#endif
914

915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

964
965
966
967
968
969
	/*
	 * To protect lockless access to n->shared during irq disabled context.
	 * If n->shared isn't NULL in irq disabled context, accessing to it is
	 * guaranteed to be valid until irq is re-enabled, because it will be
	 * freed after synchronize_sched().
	 */
970
	if (old_shared && force_change)
971
972
		synchronize_sched();

973
974
975
976
977
978
979
980
fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

981
982
#ifdef CONFIG_SMP

983
static void cpuup_canceled(long cpu)
984
985
{
	struct kmem_cache *cachep;
986
	struct kmem_cache_node *n = NULL;
987
	int node = cpu_to_mem(cpu);
988
	const struct cpumask *mask = cpumask_of_node(node);
989

990
	list_for_each_entry(cachep, &slab_caches, list) {
991
992
		struct array_cache *nc;
		struct array_cache *shared;
Joonsoo Kim's avatar
Joonsoo Kim committed
993
		struct alien_cache **alien;
994
		LIST_HEAD(list);
995

996
		n = get_node(cachep, node);
997
		if (!n)
998
			continue;
999

1000
		spin_lock_irq(&n->list_lock);
1001

1002
1003
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1004
1005
1006
1007

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
		if (nc) {
1008
			free_block(cachep, nc->entry, nc->avail, node, &list);
1009
1010
			nc->avail = 0;
		}
1011

1012
		if (!cpumask_empty(mask)) {
1013
			spin_unlock_irq(&n->list_lock);
1014
			goto free_slab;
1015
1016
		}

1017
		shared = n->shared;
1018
1019
		if (shared) {
			free_block(cachep, shared->entry,
1020
				   shared->avail, node, &list);
1021
			n->shared = NULL;
1022
1023
		}

1024
1025
		alien = n->alien;
		n->alien = NULL;
1026

1027
		spin_unlock_irq(&n->list_lock);
1028
1029
1030
1031
1032
1033

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
1034
1035

free_slab:
1036
		slabs_destroy(cachep, &list);
1037
1038
1039
1040
1041
1042
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1043
	list_for_each_entry(cachep, &slab_caches, list) {
1044
		n = get_node(cachep, node);
1045
		if (!n)
1046
			continue;
1047
		drain_freelist(cachep, n, INT_MAX);
1048
1049
1050
	}
}

1051
static int cpuup_prepare(long cpu)
Linus Torvalds's avatar
Linus Torvalds committed
1052
{
1053
	struct kmem_cache *cachep;
1054
	int node = cpu_to_mem(cpu);
1055
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
1056

1057
1058
1059
1060
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1061
	 * kmem_cache_node and not this cpu's kmem_cache_node
1062
	 */
1063
	err = init_cache_node_node(node);
1064
1065
	if (err < 0)
		goto bad;
1066
1067
1068
1069