slab.c 110 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
29
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
102
103
104
105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119

120
121
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
122
123
124
125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126
127
#include <trace/events/kmem.h>

128
129
#include	"internal.h"

130
131
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157
158
159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160
161
162
163
164
165
166
167
168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

171
172
173
174
175
176
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

Linus Torvalds's avatar
Linus Torvalds committed
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
194
	spinlock_t lock;
195
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
196
197
198
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
199
200
201
202
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
Andrew Morton's avatar
Andrew Morton committed
203
			 */
Linus Torvalds's avatar
Linus Torvalds committed
204
205
};

206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

Andrew Morton's avatar
Andrew Morton committed
223
224
225
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
Linus Torvalds's avatar
Linus Torvalds committed
226
227
228
229
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
230
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
231
232
};

233
234
235
/*
 * Need this for bootstrapping a per node allocator.
 */
236
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
237
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
238
#define	CACHE_CACHE 0
239
#define	SIZE_AC MAX_NUMNODES
240
#define	SIZE_NODE (2 * MAX_NUMNODES)
241

242
static int drain_freelist(struct kmem_cache *cache,
243
			struct kmem_cache_node *n, int tofree);
244
245
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
246
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
247
static void cache_reap(struct work_struct *unused);
248

249
250
static int slab_early_init = 1;

251
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
252
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
253

254
static void kmem_cache_node_init(struct kmem_cache_node *parent)
255
256
257
258
259
260
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
261
	parent->colour_next = 0;
262
263
264
265
266
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
267
268
269
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
270
		list_splice(&(cachep->node[nodeid]->slab), listp);	\
271
272
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
273
274
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
275
276
277
278
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
279
280
281
282
283

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
284
285
286
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
287
 *
Adrian Bunk's avatar
Adrian Bunk committed
288
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
289
290
 * which could lock up otherwise freeable slabs.
 */
291
292
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
293
294
295
296
297
298

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
299
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
300
301
302
303
304
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
305
306
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
307
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
308
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
309
310
311
312
313
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
314
315
316
317
318
319
320
321
322
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
323
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
324
325
326
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
327
#define	STATS_INC_NODEFREES(x)	do { } while (0)
328
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
329
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
330
331
332
333
334
335
336
337
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
338
339
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
340
 * 0		: objp
341
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
342
343
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
344
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
345
 * 		redzone word.
346
 * cachep->obj_offset: The real object.
347
348
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
349
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
350
 */
351
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
352
{
353
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
354
355
}

356
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
357
358
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
359
360
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
361
362
}

363
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
364
365
366
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
367
		return (unsigned long long *)(objp + cachep->size -
368
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
369
					      REDZONE_ALIGN);
370
	return (unsigned long long *) (objp + cachep->size -
371
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
372
373
}

374
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
375
376
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
377
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
378
379
380
381
}

#else

382
#define obj_offset(x)			0
383
384
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
385
386
387
388
389
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
390
391
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
392
 */
393
394
395
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
396
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
397

398
399
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
400
	struct page *page = virt_to_head_page(obj);
401
	return page->slab_cache;
402
403
}

404
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
405
406
				 unsigned int idx)
{
407
	return page->s_mem + cache->size * idx;
408
409
}

410
/*
411
412
413
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
414
415
416
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
417
					const struct page *page, void *obj)
418
{
419
	u32 offset = (obj - page->s_mem);
420
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
421
422
}

Linus Torvalds's avatar
Linus Torvalds committed
423
static struct arraycache_init initarray_generic =
424
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
425
426

/* internal cache of cache description objs */
427
static struct kmem_cache kmem_cache_boot = {
428
429
430
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
431
	.size = sizeof(struct kmem_cache),
432
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
433
434
};

435
436
#define BAD_ALIEN_MAGIC 0x01020304ul

437
438
439
440
441
442
443
444
#ifdef CONFIG_LOCKDEP

/*
 * Slab sometimes uses the kmalloc slabs to store the slab headers
 * for other slabs "off slab".
 * The locking for this is tricky in that it nests within the locks
 * of all other slabs in a few places; to deal with this special
 * locking we put on-slab caches into a separate lock-class.
445
446
447
448
 *
 * We set lock class for alien array caches which are up during init.
 * The lock annotation will be lost if all cpus of a node goes down and
 * then comes back up during hotplug
449
 */
450
451
452
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

453
454
455
456
457
458
459
460
static struct lock_class_key debugobj_l3_key;
static struct lock_class_key debugobj_alc_key;

static void slab_set_lock_classes(struct kmem_cache *cachep,
		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
		int q)
{
	struct array_cache **alc;
461
	struct kmem_cache_node *n;
462
463
	int r;

464
465
	n = cachep->node[q];
	if (!n)
466
467
		return;

468
469
	lockdep_set_class(&n->list_lock, l3_key);
	alc = n->alien;
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
	/*
	 * FIXME: This check for BAD_ALIEN_MAGIC
	 * should go away when common slab code is taught to
	 * work even without alien caches.
	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
	 * for alloc_alien_cache,
	 */
	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
		return;
	for_each_node(r) {
		if (alc[r])
			lockdep_set_class(&alc[r]->lock, alc_key);
	}
}

static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
	int node;

	for_each_online_node(node)
		slab_set_debugobj_lock_classes_node(cachep, node);
}

498
static void init_node_lock_keys(int q)
499
{
500
	int i;
501

502
	if (slab_state < UP)
503
504
		return;

Christoph Lameter's avatar
Christoph Lameter committed
505
	for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
506
		struct kmem_cache_node *n;
507
508
509
510
		struct kmem_cache *cache = kmalloc_caches[i];

		if (!cache)
			continue;
511

512
513
		n = cache->node[q];
		if (!n || OFF_SLAB(cache))
514
			continue;
515

516
		slab_set_lock_classes(cache, &on_slab_l3_key,
517
				&on_slab_alc_key, q);
518
519
	}
}
520

521
522
static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
{
523
	if (!cachep->node[q])
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
		return;

	slab_set_lock_classes(cachep, &on_slab_l3_key,
			&on_slab_alc_key, q);
}

static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
	int node;

	VM_BUG_ON(OFF_SLAB(cachep));
	for_each_node(node)
		on_slab_lock_classes_node(cachep, node);
}

539
540
541
542
543
544
545
static inline void init_lock_keys(void)
{
	int node;

	for_each_node(node)
		init_node_lock_keys(node);
}
546
#else
547
548
549
550
static void init_node_lock_keys(int q)
{
}

551
static inline void init_lock_keys(void)
552
553
{
}
554

555
556
557
558
559
560
561
562
static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
}

static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

563
564
565
566
567
568
569
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
}
570
571
#endif

572
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
573

574
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
575
576
577
578
{
	return cachep->array[smp_processor_id()];
}

579
580
static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
				size_t idx_size, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
581
{
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
	int nr_objs;
	size_t freelist_size;

	/*
	 * Ignore padding for the initial guess. The padding
	 * is at most @align-1 bytes, and @buffer_size is at
	 * least @align. In the worst case, this result will
	 * be one greater than the number of objects that fit
	 * into the memory allocation when taking the padding
	 * into account.
	 */
	nr_objs = slab_size / (buffer_size + idx_size);

	/*
	 * This calculated number will be either the right
	 * amount, or one greater than what we want.
	 */
	freelist_size = slab_size - nr_objs * buffer_size;
	if (freelist_size < ALIGN(nr_objs * idx_size, align))
		nr_objs--;

	return nr_objs;
604
}
Linus Torvalds's avatar
Linus Torvalds committed
605

Andrew Morton's avatar
Andrew Morton committed
606
607
608
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
609
610
611
612
613
614
615
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
616

617
618
619
620
621
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
Joonsoo Kim's avatar
Joonsoo Kim committed
622
	 * - One unsigned int for each object
623
624
625
626
627
628
629
630
631
632
633
634
635
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

	} else {
636
		nr_objs = calculate_nr_objs(slab_size, buffer_size,
637
638
					sizeof(freelist_idx_t), align);
		mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
639
640
641
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
642
643
}

644
#if DEBUG
645
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
646

Andrew Morton's avatar
Andrew Morton committed
647
648
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
649
650
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
651
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
652
	dump_stack();
653
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
654
}
655
#endif
Linus Torvalds's avatar
Linus Torvalds committed
656

657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

673
674
675
676
677
678
679
680
681
682
683
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

684
685
686
687
688
689
690
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
691
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
692
693
694
695
696

static void init_reap_node(int cpu)
{
	int node;

697
	node = next_node(cpu_to_mem(cpu), node_online_map);
698
	if (node == MAX_NUMNODES)
699
		node = first_node(node_online_map);
700

701
	per_cpu(slab_reap_node, cpu) = node;
702
703
704
705
}

static void next_reap_node(void)
{
706
	int node = __this_cpu_read(slab_reap_node);
707
708
709
710

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
711
	__this_cpu_write(slab_reap_node, node);
712
713
714
715
716
717
718
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
719
720
721
722
723
724
725
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
726
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
727
{
728
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
729
730
731
732
733
734

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
735
	if (keventd_up() && reap_work->work.func == NULL) {
736
		init_reap_node(cpu);
737
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
738
739
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
740
741
742
	}
}

743
static struct array_cache *alloc_arraycache(int node, int entries,
744
					    int batchcount, gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
745
{
746
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds's avatar
Linus Torvalds committed
747
748
	struct array_cache *nc = NULL;

749
	nc = kmalloc_node(memsize, gfp, node);
750
751
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
752
	 * However, when such objects are allocated or transferred to another
753
754
755
756
757
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
	kmemleak_no_scan(nc);
Linus Torvalds's avatar
Linus Torvalds committed
758
759
760
761
762
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
763
		spin_lock_init(&nc->lock);
Linus Torvalds's avatar
Linus Torvalds committed
764
765
766
767
	}
	return nc;
}

768
static inline bool is_slab_pfmemalloc(struct page *page)
769
770
771
772
773
774
775
776
{
	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
777
	struct kmem_cache_node *n = cachep->node[numa_mem_id()];
778
	struct page *page;
779
780
781
782
783
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

784
	spin_lock_irqsave(&n->list_lock, flags);
785
786
	list_for_each_entry(page, &n->slabs_full, lru)
		if (is_slab_pfmemalloc(page))
787
788
			goto out;

789
790
	list_for_each_entry(page, &n->slabs_partial, lru)
		if (is_slab_pfmemalloc(page))
791
792
			goto out;

793
794
	list_for_each_entry(page, &n->slabs_free, lru)
		if (is_slab_pfmemalloc(page))
795
796
797
798
			goto out;

	pfmemalloc_active = false;
out:
799
	spin_unlock_irqrestore(&n->list_lock, flags);
800
801
}

802
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
803
804
805
806
807
808
809
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
810
		struct kmem_cache_node *n;
811
812
813
814
815
816
817

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
818
		for (i = 0; i < ac->avail; i++) {
819
820
821
822
823
824
825
826
827
828
829
830
831
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
832
833
		n = cachep->node[numa_mem_id()];
		if (!list_empty(&n->slabs_free) && force_refill) {
834
			struct page *page = virt_to_head_page(objp);
835
			ClearPageSlabPfmemalloc(page);
836
837
838
839
840
841
842
843
844
845
846
847
848
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

849
850
851
852
853
854
855
856
857
858
859
860
861
862
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
863
864
865
866
								void *objp)
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
867
		struct page *page = virt_to_head_page(objp);
868
869
870
871
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

872
873
874
875
876
877
878
879
880
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

881
882
883
	ac->entry[ac->avail++] = objp;
}

884
885
886
887
888
889
890
891
892
893
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
894
	int nr = min3(from->avail, max, to->limit - to->avail);
895
896
897
898
899
900
901
902
903
904
905
906

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

907
908
909
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
910
#define reap_alien(cachep, n) do { } while (0)
911

912
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
{
	return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

932
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
933
934
935
936
937
938
939
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

940
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
941
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
942

943
static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
944
945
{
	struct array_cache **ac_ptr;
946
	int memsize = sizeof(void *) * nr_node_ids;
947
948
949
950
	int i;

	if (limit > 1)
		limit = 12;
951
	ac_ptr = kzalloc_node(memsize, gfp, node);
952
953
	if (ac_ptr) {
		for_each_node(i) {
954
			if (i == node || !node_online(i))
955
				continue;
956
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
957
			if (!ac_ptr[i]) {
958
				for (i--; i >= 0; i--)
959
960
961
962
963
964
965
966
967
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

Pekka Enberg's avatar
Pekka Enberg committed
968
static void free_alien_cache(struct array_cache **ac_ptr)
969
970
971
972
973
974
{
	int i;

	if (!ac_ptr)
		return;
	for_each_node(i)
975
	    kfree(ac_ptr[i]);
976
977
978
	kfree(ac_ptr);
}

979
static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg's avatar
Pekka Enberg committed
980
				struct array_cache *ac, int node)
981
{
982
	struct kmem_cache_node *n = cachep->node[node];
983
984

	if (ac->avail) {
985
		spin_lock(&n->list_lock);
986
987
988
989
990
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
991
992
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
993

994
		free_block(cachep, ac->entry, ac->avail, node);
995
		ac->avail = 0;
996
		spin_unlock(&n->list_lock);
997
998
999
	}
}

1000
1001
1002
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
1003
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1004
{
1005
	int node = __this_cpu_read(slab_reap_node);
1006

1007
1008
	if (n->alien) {
		struct array_cache *ac = n->alien[node];
1009
1010

		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1011
1012
1013
1014
1015
1016
			__drain_alien_cache(cachep, ac, node);
			spin_unlock_irq(&ac->lock);
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
1017
1018
static void drain_alien_cache(struct kmem_cache *cachep,
				struct array_cache **alien)
1019
{
1020
	int i = 0;
1021
1022
1023
1024
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
1025
		ac = alien[i];
1026
1027
1028
1029
1030
1031
1032
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
1033

1034
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1035
{
1036
	int nodeid = page_to_nid(virt_to_page(objp));
1037
	struct kmem_cache_node *n;
1038
	struct array_cache *alien = NULL;
1039
1040
	int node;

1041
	node = numa_mem_id();
1042
1043
1044
1045
1046

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
1047
	if (likely(nodeid == node))
1048
1049
		return 0;

1050
	n = cachep->node[node];
1051
	STATS_INC_NODEFREES(cachep);
1052
1053
	if (n->alien && n->alien[nodeid]) {
		alien = n->alien[nodeid];
1054
		spin_lock(&alien->lock);
1055
1056
1057
1058
		if (unlikely(alien->avail == alien->limit)) {
			STATS_INC_ACOVERFLOW(cachep);
			__drain_alien_cache(cachep, alien, nodeid);
		}
1059
		ac_put_obj(cachep, alien, objp);
1060
1061
		spin_unlock(&alien->lock);
	} else {
1062
		spin_lock(&(cachep->node[nodeid])->list_lock);
1063
		free_block(cachep, &objp, 1, nodeid);
1064
		spin_unlock(&(cachep->node[nodeid])->list_lock);
1065
1066
1067
	}
	return 1;
}
1068
1069
#endif

1070
/*
1071
 * Allocates and initializes node for a node on each slab cache, used for
1072
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1073
 * will be allocated off-node since memory is not yet online for the new node.
1074
 * When hotplugging memory or a cpu, existing node are not replaced if
1075
1076
 * already in use.
 *
1077
 * Must hold slab_mutex.
1078
 */
1079
static int init_cache_node_node(int node)
1080
1081
{
	struct kmem_cache *cachep;
1082
	struct kmem_cache_node *n;
1083
	const int memsize = sizeof(struct kmem_cache_node);
1084

1085
	list_for_each_entry(cachep, &slab_caches, list) {
1086
		/*
1087
		 * Set up the kmem_cache_node for cpu before we can
1088
1089
1090
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
1091
		if (!cachep->node[node]) {
1092
1093
			n = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!n)
1094
				return -ENOMEM;
1095
			kmem_cache_node_init(n);
1096
1097
			n->next_reap = jiffies + REAPTIMEOUT_NODE +
			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1098
1099

			/*
1100
1101
			 * The kmem_cache_nodes don't come and go as CPUs
			 * come and go.  slab_mutex is sufficient
1102
1103
			 * protection here.
			 */
1104
			cachep->node[node] = n;
1105
1106
		}

1107
1108
		spin_lock_irq(&cachep->node[node]->list_lock);
		cachep->node[node]->free_limit =
1109
1110
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
1111
		spin_unlock_irq(&cachep->node[node]->list_lock);
1112
1113
1114
1115
	}
	return 0;
}

1116
1117
1118
1119
1120
1121
static inline int slabs_tofree(struct kmem_cache *cachep,
						struct kmem_cache_node *n)
{
	return (n->free_objects + cachep->num - 1) / cachep->num;
}

1122
static void cpuup_canceled(long cpu)
1123
1124
{
	struct kmem_cache *cachep;
1125
	struct kmem_cache_node *n = NULL;
1126
	int node = cpu_to_mem(cpu);
1127
	const struct cpumask *mask = cpumask_of_node(node);
1128

1129
	list_for_each_entry(cachep, &slab_caches, list) {
1130
1131
1132
1133
1134
1135
1136
		struct array_cache *nc;
		struct array_cache *shared;
		struct array_cache **alien;

		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		cachep->array[cpu] = NULL;
1137
		n = cachep->node[node];
1138

1139
		if (!n)
1140
1141
			goto free_array_cache;

1142
		spin_lock_irq(&n->list_lock);
1143

1144
1145
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1146
1147
1148
		if (nc)
			free_block(cachep, nc->entry, nc->avail, node);

1149
		if (!cpumask_empty(mask)) {
1150
			spin_unlock_irq(&n->list_lock);