slab.c 109 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
29
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
102
103
104
105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119

120
121
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
122
123
124
125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126
127
#include <trace/events/kmem.h>

128
129
#include	"internal.h"

130
131
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157
158
159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160
161
162
163
164
165
166
167
168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

171
172
173
174
175
176
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

Linus Torvalds's avatar
Linus Torvalds committed
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
194
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
195
196
197
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
198
199
200
201
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
Andrew Morton's avatar
Andrew Morton committed
202
			 */
Linus Torvalds's avatar
Linus Torvalds committed
203
204
};

Joonsoo Kim's avatar
Joonsoo Kim committed
205
206
207
208
209
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

227
228
229
/*
 * Need this for bootstrapping a per node allocator.
 */
230
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
231
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
232
#define	CACHE_CACHE 0
233
#define	SIZE_NODE (MAX_NUMNODES)
234

235
static int drain_freelist(struct kmem_cache *cache,
236
			struct kmem_cache_node *n, int tofree);
237
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
238
239
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
240
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
241
static void cache_reap(struct work_struct *unused);
242

243
244
static int slab_early_init = 1;

245
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
246

247
static void kmem_cache_node_init(struct kmem_cache_node *parent)
248
249
250
251
252
253
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
254
	parent->colour_next = 0;
255
256
257
258
259
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
260
261
262
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
263
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
264
265
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
266
267
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
268
269
270
271
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
272

273
#define CFLGS_OBJFREELIST_SLAB	(0x40000000UL)
Linus Torvalds's avatar
Linus Torvalds committed
274
#define CFLGS_OFF_SLAB		(0x80000000UL)
275
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
Linus Torvalds's avatar
Linus Torvalds committed
276
277
278
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
279
280
281
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
282
 *
Adrian Bunk's avatar
Adrian Bunk committed
283
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
284
285
 * which could lock up otherwise freeable slabs.
 */
286
287
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
288
289
290
291
292
293

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
294
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
295
296
297
298
299
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
300
301
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
302
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
303
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
304
305
306
307
308
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
309
310
311
312
313
314
315
316
317
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
318
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
319
320
321
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
322
#define	STATS_INC_NODEFREES(x)	do { } while (0)
323
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
324
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
325
326
327
328
329
330
331
332
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
333
334
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
335
 * 0		: objp
336
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
337
338
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
339
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
340
 * 		redzone word.
341
 * cachep->obj_offset: The real object.
342
343
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
344
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
345
 */
346
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
347
{
348
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
349
350
}

351
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
352
353
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
354
355
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
356
357
}

358
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
359
360
361
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
362
		return (unsigned long long *)(objp + cachep->size -
363
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
364
					      REDZONE_ALIGN);
365
	return (unsigned long long *) (objp + cachep->size -
366
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
367
368
}

369
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
370
371
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
372
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
373
374
375
376
}

#else

377
#define obj_offset(x)			0
378
379
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
380
381
382
383
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

384
385
#ifdef CONFIG_DEBUG_SLAB_LEAK

386
static inline bool is_store_user_clean(struct kmem_cache *cachep)
387
{
388
389
	return atomic_read(&cachep->store_user_clean) == 1;
}
390

391
392
393
394
static inline void set_store_user_clean(struct kmem_cache *cachep)
{
	atomic_set(&cachep->store_user_clean, 1);
}
395

396
397
398
399
static inline void set_store_user_dirty(struct kmem_cache *cachep)
{
	if (is_store_user_clean(cachep))
		atomic_set(&cachep->store_user_clean, 0);
400
401
402
}

#else
403
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
404
405
406

#endif

Linus Torvalds's avatar
Linus Torvalds committed
407
/*
408
409
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
410
 */
411
412
413
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
414
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
415

416
417
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
418
	struct page *page = virt_to_head_page(obj);
419
	return page->slab_cache;
420
421
}

422
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
423
424
				 unsigned int idx)
{
425
	return page->s_mem + cache->size * idx;
426
427
}

428
/*
429
430
431
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
432
433
434
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
435
					const struct page *page, void *obj)
436
{
437
	u32 offset = (obj - page->s_mem);
438
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
439
440
}

441
#define BOOT_CPUCACHE_ENTRIES	1
Linus Torvalds's avatar
Linus Torvalds committed
442
/* internal cache of cache description objs */
443
static struct kmem_cache kmem_cache_boot = {
444
445
446
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
447
	.size = sizeof(struct kmem_cache),
448
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
449
450
};

451
452
#define BAD_ALIEN_MAGIC 0x01020304ul

453
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
454

455
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
456
{
457
	return this_cpu_ptr(cachep->cpu_cache);
Linus Torvalds's avatar
Linus Torvalds committed
458
459
}

Andrew Morton's avatar
Andrew Morton committed
460
461
462
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
463
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
464
		unsigned long flags, size_t *left_over, unsigned int *num)
465
466
{
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
467

468
469
470
471
472
473
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
474
475
476
477
478
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
479
480
481
482
483
484
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
485
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
486
487
		*num = slab_size / buffer_size;
		*left_over = slab_size % buffer_size;
488
	} else {
489
490
491
		*num = slab_size / (buffer_size + sizeof(freelist_idx_t));
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
492
	}
Linus Torvalds's avatar
Linus Torvalds committed
493
494
}

495
#if DEBUG
496
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
497

Andrew Morton's avatar
Andrew Morton committed
498
499
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
500
501
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
502
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
503
	dump_stack();
504
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
505
}
506
#endif
Linus Torvalds's avatar
Linus Torvalds committed
507

508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

524
525
526
527
528
529
530
531
532
533
534
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

535
536
537
538
539
540
541
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
542
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
543
544
545
546
547

static void init_reap_node(int cpu)
{
	int node;

548
	node = next_node(cpu_to_mem(cpu), node_online_map);
549
	if (node == MAX_NUMNODES)
550
		node = first_node(node_online_map);
551

552
	per_cpu(slab_reap_node, cpu) = node;
553
554
555
556
}

static void next_reap_node(void)
{
557
	int node = __this_cpu_read(slab_reap_node);
558
559
560
561

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
562
	__this_cpu_write(slab_reap_node, node);
563
564
565
566
567
568
569
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
570
571
572
573
574
575
576
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
577
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
578
{
579
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
580
581
582
583
584
585

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
586
	if (keventd_up() && reap_work->work.func == NULL) {
587
		init_reap_node(cpu);
588
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
589
590
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
591
592
593
	}
}

594
static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds's avatar
Linus Torvalds committed
595
{
596
597
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
598
	 * However, when such objects are allocated or transferred to another
599
600
601
602
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
603
604
605
606
607
608
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
Linus Torvalds's avatar
Linus Torvalds committed
609
	}
610
611
612
613
614
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
615
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
616
617
618
619
620
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
Linus Torvalds's avatar
Linus Torvalds committed
621
622
}

623
static inline bool is_slab_pfmemalloc(struct page *page)
624
625
626
627
628
629
630
631
{
	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
632
	struct kmem_cache_node *n = get_node(cachep, numa_mem_id());
633
	struct page *page;
634
635
636
637
638
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

639
	spin_lock_irqsave(&n->list_lock, flags);
640
641
	list_for_each_entry(page, &n->slabs_full, lru)
		if (is_slab_pfmemalloc(page))
642
643
			goto out;

644
645
	list_for_each_entry(page, &n->slabs_partial, lru)
		if (is_slab_pfmemalloc(page))
646
647
			goto out;

648
649
	list_for_each_entry(page, &n->slabs_free, lru)
		if (is_slab_pfmemalloc(page))
650
651
652
653
			goto out;

	pfmemalloc_active = false;
out:
654
	spin_unlock_irqrestore(&n->list_lock, flags);
655
656
}

657
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
658
659
660
661
662
663
664
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
665
		struct kmem_cache_node *n;
666
667
668
669
670
671
672

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
673
		for (i = 0; i < ac->avail; i++) {
674
675
676
677
678
679
680
681
682
683
684
685
686
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
687
		n = get_node(cachep, numa_mem_id());
688
		if (!list_empty(&n->slabs_free) && force_refill) {
689
			struct page *page = virt_to_head_page(objp);
690
			ClearPageSlabPfmemalloc(page);
691
692
693
694
695
696
697
698
699
700
701
702
703
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

704
705
706
707
708
709
710
711
712
713
714
715
716
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

Joonsoo Kim's avatar
Joonsoo Kim committed
717
718
static noinline void *__ac_put_obj(struct kmem_cache *cachep,
			struct array_cache *ac, void *objp)
719
720
721
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
722
		struct page *page = virt_to_head_page(objp);
723
724
725
726
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

727
728
729
730
731
732
733
734
735
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

736
737
738
	ac->entry[ac->avail++] = objp;
}

739
740
741
742
743
744
745
746
747
748
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
749
	int nr = min3(from->avail, max, to->limit - to->avail);
750
751
752
753
754
755
756
757
758
759
760
761

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

762
763
764
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
765
#define reap_alien(cachep, n) do { } while (0)
766

Joonsoo Kim's avatar
Joonsoo Kim committed
767
768
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
769
{
770
	return (struct alien_cache **)BAD_ALIEN_MAGIC;
771
772
}

Joonsoo Kim's avatar
Joonsoo Kim committed
773
static inline void free_alien_cache(struct alien_cache **ac_ptr)
774
775
776
777
778
779
780
781
782
783
784
785
786
787
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

788
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
789
790
791
792
793
		 gfp_t flags, int nodeid)
{
	return NULL;
}

David Rientjes's avatar
David Rientjes committed
794
795
796
797
798
static inline gfp_t gfp_exact_node(gfp_t flags)
{
	return flags;
}

799
800
#else	/* CONFIG_NUMA */

801
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
802
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
803

Joonsoo Kim's avatar
Joonsoo Kim committed
804
805
806
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
807
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
Joonsoo Kim's avatar
Joonsoo Kim committed
808
809
810
811
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
812
	spin_lock_init(&alc->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
813
814
815
816
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
817
{
Joonsoo Kim's avatar
Joonsoo Kim committed
818
	struct alien_cache **alc_ptr;
819
	size_t memsize = sizeof(void *) * nr_node_ids;
820
821
822
823
	int i;

	if (limit > 1)
		limit = 12;
Joonsoo Kim's avatar
Joonsoo Kim committed
824
825
826
827
828
829
830
831
832
833
834
835
836
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
837
838
		}
	}
Joonsoo Kim's avatar
Joonsoo Kim committed
839
	return alc_ptr;
840
841
}

Joonsoo Kim's avatar
Joonsoo Kim committed
842
static void free_alien_cache(struct alien_cache **alc_ptr)
843
844
845
{
	int i;

Joonsoo Kim's avatar
Joonsoo Kim committed
846
	if (!alc_ptr)
847
848
		return;
	for_each_node(i)
Joonsoo Kim's avatar
Joonsoo Kim committed
849
850
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
851
852
}

853
static void __drain_alien_cache(struct kmem_cache *cachep,
854
855
				struct array_cache *ac, int node,
				struct list_head *list)
856
{
857
	struct kmem_cache_node *n = get_node(cachep, node);
858
859

	if (ac->avail) {
860
		spin_lock(&n->list_lock);
861
862
863
864
865
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
866
867
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
868

869
		free_block(cachep, ac->entry, ac->avail, node, list);
870
		ac->avail = 0;
871
		spin_unlock(&n->list_lock);
872
873
874
	}
}

875
876
877
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
878
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
879
{
880
	int node = __this_cpu_read(slab_reap_node);
881

882
	if (n->alien) {
Joonsoo Kim's avatar
Joonsoo Kim committed
883
884
885
886
887
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
888
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
889
890
891
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
892
				spin_unlock_irq(&alc->lock);
893
				slabs_destroy(cachep, &list);
Joonsoo Kim's avatar
Joonsoo Kim committed
894
			}
895
896
897
898
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
899
static void drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kim's avatar
Joonsoo Kim committed
900
				struct alien_cache **alien)
901
{
902
	int i = 0;
Joonsoo Kim's avatar
Joonsoo Kim committed
903
	struct alien_cache *alc;
904
905
906
907
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
Joonsoo Kim's avatar
Joonsoo Kim committed
908
909
		alc = alien[i];
		if (alc) {
910
911
			LIST_HEAD(list);

Joonsoo Kim's avatar
Joonsoo Kim committed
912
			ac = &alc->ac;
913
			spin_lock_irqsave(&alc->lock, flags);
914
			__drain_alien_cache(cachep, ac, i, &list);
915
			spin_unlock_irqrestore(&alc->lock, flags);
916
			slabs_destroy(cachep, &list);
917
918
919
		}
	}
}
920

921
922
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
923
{
924
	struct kmem_cache_node *n;
Joonsoo Kim's avatar
Joonsoo Kim committed
925
926
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
927
	LIST_HEAD(list);
928

929
	n = get_node(cachep, node);
930
	STATS_INC_NODEFREES(cachep);
931
932
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
Joonsoo Kim's avatar
Joonsoo Kim committed
933
		ac = &alien->ac;
934
		spin_lock(&alien->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
935
		if (unlikely(ac->avail == ac->limit)) {
936
			STATS_INC_ACOVERFLOW(cachep);
937
			__drain_alien_cache(cachep, ac, page_node, &list);
938
		}
Joonsoo Kim's avatar
Joonsoo Kim committed
939
		ac_put_obj(cachep, ac, objp);
940
		spin_unlock(&alien->lock);
941
		slabs_destroy(cachep, &list);
942
	} else {
943
		n = get_node(cachep, page_node);
944
		spin_lock(&n->list_lock);
945
		free_block(cachep, &objp, 1, page_node, &list);
946
		spin_unlock(&n->list_lock);
947
		slabs_destroy(cachep, &list);
948
949
950
	}
	return 1;
}
951
952
953
954
955
956
957
958
959
960
961
962
963
964

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
David Rientjes's avatar
David Rientjes committed
965
966

/*
967
968
 * Construct gfp mask to allocate from a specific node but do not direct reclaim
 * or warn about failures. kswapd may still wake to reclaim in the background.
David Rientjes's avatar
David Rientjes committed
969
970
971
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
972
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_DIRECT_RECLAIM;
David Rientjes's avatar
David Rientjes committed
973
}
974
975
#endif

976
/*
977
 * Allocates and initializes node for a node on each slab cache, used for
978
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
979
 * will be allocated off-node since memory is not yet online for the new node.
980
 * When hotplugging memory or a cpu, existing node are not replaced if
981
982
 * already in use.
 *
983
 * Must hold slab_mutex.
984
 */
985
static int init_cache_node_node(int node)
986
987
{
	struct kmem_cache *cachep;
988
	struct kmem_cache_node *n;
989
	const size_t memsize = sizeof(struct kmem_cache_node);
990

991
	list_for_each_entry(cachep, &slab_caches, list) {
992
		/*
993
		 * Set up the kmem_cache_node for cpu before we can
994
995
996
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
997
998
		n = get_node(cachep, node);
		if (!n) {
999
1000
			n = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!n)
1001
				return -ENOMEM;
1002
			kmem_cache_node_init(n);
1003
1004
			n->next_reap = jiffies + REAPTIMEOUT_NODE +
			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1005
1006

			/*
1007
1008
			 * The kmem_cache_nodes don't come and go as CPUs
			 * come and go.  slab_mutex is sufficient
1009
1010
			 * protection here.
			 */
1011
			cachep->node[node] = n;
1012
1013
		}

1014
1015
		spin_lock_irq(&n->list_lock);
		n->free_limit =
1016
1017
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
1018
		spin_unlock_irq(&n->list_lock);
1019
1020
1021
1022
	}
	return 0;
}

1023
1024
1025
1026
1027
1028
static inline int slabs_tofree(struct kmem_cache *cachep,
						struct kmem_cache_node *n)
{
	return (n->free_objects + cachep->num - 1) / cachep->num;
}

1029
static void cpuup_canceled(long cpu)
1030
1031
{
	struct kmem_cache *cachep;
1032
	struct kmem_cache_node *n = NULL;
1033
	int node = cpu_to_mem(cpu);
1034
	const struct cpumask *mask = cpumask_of_node(node);
1035

1036
	list_for_each_entry(cachep, &slab_caches, list) {
1037
1038
		struct array_cache *nc;
		struct array_cache *shared;
Joonsoo Kim's avatar
Joonsoo Kim committed
1039
		struct alien_cache **alien;
1040
		LIST_HEAD(list);
1041

1042
		n = get_node(cachep, node);
1043
		if (!n)
1044
			continue;
1045

1046
		spin_lock_irq(&n->list_lock);
1047

1048
1049
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1050
1051
1052
1053

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
		if (nc) {
1054
			free_block(cachep, nc->entry, nc->avail, node, &list);
1055
1056
			nc->avail = 0;
		}
1057

1058
		if (!cpumask_empty(mask)) {
1059
			spin_unlock_irq(&n->list_lock);
1060
			goto free_slab;
1061
1062
		}

1063
		shared = n->shared;
1064
1065
		if (shared) {
			free_block(cachep, shared->entry,
1066
				   shared->avail, node, &list);