slab.c 111 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
29
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
102
103
104
105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119

120
121
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
122
123
124
125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126
127
#include <trace/events/kmem.h>

128
129
#include	"internal.h"

130
131
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157
158
159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160
161
162
163
164
165
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

166
167
168
169
170
171
172
173
/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
174
175
176
	struct {
		struct list_head list;
		void *s_mem;		/* including colour offset */
177
		unsigned int active;	/* num of objs active in slab */
178
179
180
	};
};

Linus Torvalds's avatar
Linus Torvalds committed
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
198
	spinlock_t lock;
199
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
200
201
202
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
203
204
205
206
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
Andrew Morton's avatar
Andrew Morton committed
207
			 */
Linus Torvalds's avatar
Linus Torvalds committed
208
209
};

210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

Andrew Morton's avatar
Andrew Morton committed
227
228
229
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
Linus Torvalds's avatar
Linus Torvalds committed
230
231
232
233
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
234
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
235
236
};

237
238
239
/*
 * Need this for bootstrapping a per node allocator.
 */
240
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
241
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
242
#define	CACHE_CACHE 0
243
#define	SIZE_AC MAX_NUMNODES
244
#define	SIZE_NODE (2 * MAX_NUMNODES)
245

246
static int drain_freelist(struct kmem_cache *cache,
247
			struct kmem_cache_node *n, int tofree);
248
249
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
250
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
251
static void cache_reap(struct work_struct *unused);
252

253
254
static int slab_early_init = 1;

255
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
256
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
257

258
static void kmem_cache_node_init(struct kmem_cache_node *parent)
259
260
261
262
263
264
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
265
	parent->colour_next = 0;
266
267
268
269
270
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
271
272
273
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
274
		list_splice(&(cachep->node[nodeid]->slab), listp);	\
275
276
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
277
278
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
279
280
281
282
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
283
284
285
286
287

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
288
289
290
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
291
 *
Adrian Bunk's avatar
Adrian Bunk committed
292
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
293
294
295
296
297
298
299
300
301
302
 * which could lock up otherwise freeable slabs.
 */
#define REAPTIMEOUT_CPUC	(2*HZ)
#define REAPTIMEOUT_LIST3	(4*HZ)

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
303
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
304
305
306
307
308
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
309
310
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
311
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
312
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
313
314
315
316
317
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
318
319
320
321
322
323
324
325
326
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
327
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
328
329
330
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
331
#define	STATS_INC_NODEFREES(x)	do { } while (0)
332
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
333
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
334
335
336
337
338
339
340
341
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
342
343
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
344
 * 0		: objp
345
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
346
347
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
348
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
349
 * 		redzone word.
350
 * cachep->obj_offset: The real object.
351
352
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
353
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
354
 */
355
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
356
{
357
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
358
359
}

360
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
361
362
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
363
364
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
365
366
}

367
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
368
369
370
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
371
		return (unsigned long long *)(objp + cachep->size -
372
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
373
					      REDZONE_ALIGN);
374
	return (unsigned long long *) (objp + cachep->size -
375
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
376
377
}

378
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
379
380
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
381
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
382
383
384
385
}

#else

386
#define obj_offset(x)			0
387
388
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
389
390
391
392
393
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
394
395
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
396
 */
397
398
399
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
400
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
401

402
403
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
404
	struct page *page = virt_to_head_page(obj);
405
	return page->slab_cache;
406
407
408
409
}

static inline struct slab *virt_to_slab(const void *obj)
{
410
	struct page *page = virt_to_head_page(obj);
411
412
413

	VM_BUG_ON(!PageSlab(page));
	return page->slab_page;
414
415
}

416
417
418
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
				 unsigned int idx)
{
419
	return slab->s_mem + cache->size * idx;
420
421
}

422
/*
423
424
425
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
426
427
428
429
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
					const struct slab *slab, void *obj)
430
{
431
432
	u32 offset = (obj - slab->s_mem);
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
433
434
}

Linus Torvalds's avatar
Linus Torvalds committed
435
static struct arraycache_init initarray_generic =
436
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
437
438

/* internal cache of cache description objs */
439
static struct kmem_cache kmem_cache_boot = {
440
441
442
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
443
	.size = sizeof(struct kmem_cache),
444
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
445
446
};

447
448
#define BAD_ALIEN_MAGIC 0x01020304ul

449
450
451
452
453
454
455
456
#ifdef CONFIG_LOCKDEP

/*
 * Slab sometimes uses the kmalloc slabs to store the slab headers
 * for other slabs "off slab".
 * The locking for this is tricky in that it nests within the locks
 * of all other slabs in a few places; to deal with this special
 * locking we put on-slab caches into a separate lock-class.
457
458
459
460
 *
 * We set lock class for alien array caches which are up during init.
 * The lock annotation will be lost if all cpus of a node goes down and
 * then comes back up during hotplug
461
 */
462
463
464
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

465
466
467
468
469
470
471
472
static struct lock_class_key debugobj_l3_key;
static struct lock_class_key debugobj_alc_key;

static void slab_set_lock_classes(struct kmem_cache *cachep,
		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
		int q)
{
	struct array_cache **alc;
473
	struct kmem_cache_node *n;
474
475
	int r;

476
477
	n = cachep->node[q];
	if (!n)
478
479
		return;

480
481
	lockdep_set_class(&n->list_lock, l3_key);
	alc = n->alien;
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
	/*
	 * FIXME: This check for BAD_ALIEN_MAGIC
	 * should go away when common slab code is taught to
	 * work even without alien caches.
	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
	 * for alloc_alien_cache,
	 */
	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
		return;
	for_each_node(r) {
		if (alc[r])
			lockdep_set_class(&alc[r]->lock, alc_key);
	}
}

static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
	int node;

	for_each_online_node(node)
		slab_set_debugobj_lock_classes_node(cachep, node);
}

510
static void init_node_lock_keys(int q)
511
{
512
	int i;
513

514
	if (slab_state < UP)
515
516
		return;

Christoph Lameter's avatar
Christoph Lameter committed
517
	for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
518
		struct kmem_cache_node *n;
519
520
521
522
		struct kmem_cache *cache = kmalloc_caches[i];

		if (!cache)
			continue;
523

524
525
		n = cache->node[q];
		if (!n || OFF_SLAB(cache))
526
			continue;
527

528
		slab_set_lock_classes(cache, &on_slab_l3_key,
529
				&on_slab_alc_key, q);
530
531
	}
}
532

533
534
static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
{
535
	if (!cachep->node[q])
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
		return;

	slab_set_lock_classes(cachep, &on_slab_l3_key,
			&on_slab_alc_key, q);
}

static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
	int node;

	VM_BUG_ON(OFF_SLAB(cachep));
	for_each_node(node)
		on_slab_lock_classes_node(cachep, node);
}

551
552
553
554
555
556
557
static inline void init_lock_keys(void)
{
	int node;

	for_each_node(node)
		init_node_lock_keys(node);
}
558
#else
559
560
561
562
static void init_node_lock_keys(int q)
{
}

563
static inline void init_lock_keys(void)
564
565
{
}
566

567
568
569
570
571
572
573
574
static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
}

static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

575
576
577
578
579
580
581
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
}
582
583
#endif

584
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
585

586
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
587
588
589
590
{
	return cachep->array[smp_processor_id()];
}

591
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
592
{
Joonsoo Kim's avatar
Joonsoo Kim committed
593
	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(unsigned int), align);
594
}
Linus Torvalds's avatar
Linus Torvalds committed
595

Andrew Morton's avatar
Andrew Morton committed
596
597
598
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
599
600
601
602
603
604
605
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
606

607
608
609
610
611
612
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - The struct slab
Joonsoo Kim's avatar
Joonsoo Kim committed
613
	 * - One unsigned int for each object
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

	} else {
		/*
		 * Ignore padding for the initial guess. The padding
		 * is at most @align-1 bytes, and @buffer_size is at
		 * least @align. In the worst case, this result will
		 * be one greater than the number of objects that fit
		 * into the memory allocation when taking the padding
		 * into account.
		 */
		nr_objs = (slab_size - sizeof(struct slab)) /
Joonsoo Kim's avatar
Joonsoo Kim committed
636
			  (buffer_size + sizeof(unsigned int));
637
638
639
640
641
642
643
644
645
646
647
648
649

		/*
		 * This calculated number will be either the right
		 * amount, or one greater than what we want.
		 */
		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
		       > slab_size)
			nr_objs--;

		mgmt_size = slab_mgmt_size(nr_objs, align);
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
650
651
}

652
#if DEBUG
653
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
654

Andrew Morton's avatar
Andrew Morton committed
655
656
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
657
658
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
659
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
660
	dump_stack();
661
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
662
}
663
#endif
Linus Torvalds's avatar
Linus Torvalds committed
664

665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

681
682
683
684
685
686
687
688
689
690
691
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

692
693
694
695
696
697
698
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
699
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
700
701
702
703
704

static void init_reap_node(int cpu)
{
	int node;

705
	node = next_node(cpu_to_mem(cpu), node_online_map);
706
	if (node == MAX_NUMNODES)
707
		node = first_node(node_online_map);
708

709
	per_cpu(slab_reap_node, cpu) = node;
710
711
712
713
}

static void next_reap_node(void)
{
714
	int node = __this_cpu_read(slab_reap_node);
715
716
717
718

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
719
	__this_cpu_write(slab_reap_node, node);
720
721
722
723
724
725
726
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
727
728
729
730
731
732
733
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
734
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
735
{
736
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
737
738
739
740
741
742

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
743
	if (keventd_up() && reap_work->work.func == NULL) {
744
		init_reap_node(cpu);
745
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
746
747
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
748
749
750
	}
}

751
static struct array_cache *alloc_arraycache(int node, int entries,
752
					    int batchcount, gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
753
{
754
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds's avatar
Linus Torvalds committed
755
756
	struct array_cache *nc = NULL;

757
	nc = kmalloc_node(memsize, gfp, node);
758
759
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
760
	 * However, when such objects are allocated or transferred to another
761
762
763
764
765
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
	kmemleak_no_scan(nc);
Linus Torvalds's avatar
Linus Torvalds committed
766
767
768
769
770
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
771
		spin_lock_init(&nc->lock);
Linus Torvalds's avatar
Linus Torvalds committed
772
773
774
775
	}
	return nc;
}

776
777
778
779
780
781
782
783
784
785
786
static inline bool is_slab_pfmemalloc(struct slab *slabp)
{
	struct page *page = virt_to_page(slabp->s_mem);

	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
787
	struct kmem_cache_node *n = cachep->node[numa_mem_id()];
788
789
790
791
792
793
	struct slab *slabp;
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

794
795
	spin_lock_irqsave(&n->list_lock, flags);
	list_for_each_entry(slabp, &n->slabs_full, list)
796
797
798
		if (is_slab_pfmemalloc(slabp))
			goto out;

799
	list_for_each_entry(slabp, &n->slabs_partial, list)
800
801
802
		if (is_slab_pfmemalloc(slabp))
			goto out;

803
	list_for_each_entry(slabp, &n->slabs_free, list)
804
805
806
807
808
		if (is_slab_pfmemalloc(slabp))
			goto out;

	pfmemalloc_active = false;
out:
809
	spin_unlock_irqrestore(&n->list_lock, flags);
810
811
}

812
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
813
814
815
816
817
818
819
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
820
		struct kmem_cache_node *n;
821
822
823
824
825
826
827

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
828
		for (i = 0; i < ac->avail; i++) {
829
830
831
832
833
834
835
836
837
838
839
840
841
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
842
843
		n = cachep->node[numa_mem_id()];
		if (!list_empty(&n->slabs_free) && force_refill) {
844
			struct slab *slabp = virt_to_slab(objp);
845
			ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
846
847
848
849
850
851
852
853
854
855
856
857
858
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

859
860
861
862
863
864
865
866
867
868
869
870
871
872
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
873
874
875
876
								void *objp)
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
Joonsoo Kim's avatar
Joonsoo Kim committed
877
878
		struct slab *slabp = virt_to_slab(objp);
		struct page *page = virt_to_head_page(slabp->s_mem);
879
880
881
882
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

883
884
885
886
887
888
889
890
891
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

892
893
894
	ac->entry[ac->avail++] = objp;
}

895
896
897
898
899
900
901
902
903
904
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
905
	int nr = min3(from->avail, max, to->limit - to->avail);
906
907
908
909
910
911
912
913
914
915
916
917

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

918
919
920
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
921
#define reap_alien(cachep, n) do { } while (0)
922

923
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
{
	return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

943
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
944
945
946
947
948
949
950
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

951
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
952
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
953

954
static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
955
956
{
	struct array_cache **ac_ptr;
957
	int memsize = sizeof(void *) * nr_node_ids;
958
959
960
961
	int i;

	if (limit > 1)
		limit = 12;
962
	ac_ptr = kzalloc_node(memsize, gfp, node);
963
964
	if (ac_ptr) {
		for_each_node(i) {
965
			if (i == node || !node_online(i))
966
				continue;
967
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
968
			if (!ac_ptr[i]) {
969
				for (i--; i >= 0; i--)
970
971
972
973
974
975
976
977
978
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

Pekka Enberg's avatar
Pekka Enberg committed
979
static void free_alien_cache(struct array_cache **ac_ptr)
980
981
982
983
984
985
{
	int i;

	if (!ac_ptr)
		return;
	for_each_node(i)
986
	    kfree(ac_ptr[i]);
987
988
989
	kfree(ac_ptr);
}

990
static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg's avatar
Pekka Enberg committed
991
				struct array_cache *ac, int node)
992
{
993
	struct kmem_cache_node *n = cachep->node[node];
994
995

	if (ac->avail) {
996
		spin_lock(&n->list_lock);
997
998
999
1000
1001
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
1002
1003
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
1004

1005
		free_block(cachep, ac->entry, ac->avail, node);
1006
		ac->avail = 0;
1007
		spin_unlock(&n->list_lock);
1008
1009
1010
	}
}

1011
1012
1013
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
1014
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1015
{
1016
	int node = __this_cpu_read(slab_reap_node);
1017

1018
1019
	if (n->alien) {
		struct array_cache *ac = n->alien[node];
1020
1021

		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1022
1023
1024
1025
1026
1027
			__drain_alien_cache(cachep, ac, node);
			spin_unlock_irq(&ac->lock);
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
1028
1029
static void drain_alien_cache(struct kmem_cache *cachep,
				struct array_cache **alien)
1030
{
1031
	int i = 0;
1032
1033
1034
1035
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
1036
		ac = alien[i];
1037
1038
1039
1040
1041
1042
1043
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
1044

1045
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1046
{
1047
	int nodeid = page_to_nid(virt_to_page(objp));
1048
	struct kmem_cache_node *n;
1049
	struct array_cache *alien = NULL;
1050
1051
	int node;

1052
	node = numa_mem_id();
1053
1054
1055
1056
1057

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
1058
	if (likely(nodeid == node))
1059
1060
		return 0;

1061
	n = cachep->node[node];
1062
	STATS_INC_NODEFREES(cachep);
1063
1064
	if (n->alien && n->alien[nodeid]) {
		alien = n->alien[nodeid];
1065
		spin_lock(&alien->lock);
1066
1067
1068
1069
		if (unlikely(alien->avail == alien->limit)) {
			STATS_INC_ACOVERFLOW(cachep);
			__drain_alien_cache(cachep, alien, nodeid);
		}
1070
		ac_put_obj(cachep, alien, objp);
1071
1072
		spin_unlock(&alien->lock);
	} else {
1073
		spin_lock(&(cachep->node[nodeid])->list_lock);
1074
		free_block(cachep, &objp, 1, nodeid);
1075
		spin_unlock(&(cachep->node[nodeid])->list_lock);
1076
1077
1078
	}
	return 1;
}
1079
1080
#endif

1081
/*
1082
 * Allocates and initializes node for a node on each slab cache, used for
1083
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1084
 * will be allocated off-node since memory is not yet online for the new node.
1085
 * When hotplugging memory or a cpu, existing node are not replaced if
1086
1087
 * already in use.
 *
1088
 * Must hold slab_mutex.
1089
 */
1090
static int init_cache_node_node(int node)
1091
1092
{
	struct kmem_cache *cachep;
1093
	struct kmem_cache_node *n;
1094
	const int memsize = sizeof(struct kmem_cache_node);
1095

1096
	list_for_each_entry(cachep, &slab_caches, list) {
1097
1098
1099
1100
1101
		/*
		 * Set up the size64 kmemlist for cpu before we can
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
1102
		if (!cachep->node[node]) {
1103
1104
			n = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!n)
1105
				return -ENOMEM;
1106
1107
			kmem_cache_node_init(n);
			n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1108
1109
1110
1111
			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;

			/*
			 * The l3s don't come and go as CPUs come and
1112
			 * go.  slab_mutex is sufficient
1113
1114
			 * protection here.
			 */
1115
			cachep->node[node] = n;
1116
1117
		}

1118
1119
		spin_lock_irq(&cachep->node[node]->list_lock);
		cachep->node[node]->free_limit =
1120
1121
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
1122
		spin_unlock_irq(&cachep->node[node]->list_lock);
1123
1124
1125
1126
	}
	return 0;
}

1127
1128
1129
1130
1131
1132
static inline int slabs_tofree(struct kmem_cache *cachep,
						struct kmem_cache_node *n)
{
	return (n->free_objects + cachep->num - 1) / cachep->num;
}

1133
static void cpuup_canceled(long cpu)
1134
1135
{
	struct kmem_cache *cachep;
1136
	struct kmem_cache_node *n = NULL;
1137
	int node = cpu_to_mem(cpu);
1138
	const struct cpumask *mask = cpumask_of_node(node);
1139

1140
	list_for_each_entry(cachep, &slab_caches, list) {
1141
1142
1143
1144
1145
1146
1147
		struct array_cache *nc;
		struct array_cache *shared;
		struct array_cache **alien;

		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		cachep->array[cpu] = NULL;
1148
		n = cachep->node[node];
1149

1150
		if (!n)
1151
1152
			goto free_array_cache;

1153
		spin_lock_irq(&n->list_lock);
1154

1155
1156
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1157
1158
1159
		if (nc)
			free_block(cachep, nc->entry, nc->avail, node);

1160
		if (!cpumask_empty(mask)) {
1161
			spin_unlock_irq(&n->list_lock);
1162
1163
1164
			goto free_array_cache;
		}

1165
		shared = n->shared;
1166
1167
1168
		if (shared) {
			free_block(cachep, shared->entry,
				   shared->avail, node);
1169
			n->shared = NULL