slab.c 110 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
30
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
54
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
55
56
57
58
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
59
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
60
61
62
63
64
65
66
67
68
69
70
71
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
72
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
73
74
75
76
77
78
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
79
80
81
82
83
84
85
86
87
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
88
89
90
91
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
92
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
93
94
95
96
97
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
98
#include	<linux/cpuset.h>
99
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
100
101
102
103
104
105
106
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
107
#include	<linux/string.h>
108
#include	<linux/uaccess.h>
109
#include	<linux/nodemask.h>
110
#include	<linux/kmemleak.h>
111
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
112
#include	<linux/mutex.h>
113
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
114
#include	<linux/rtmutex.h>
115
#include	<linux/reciprocal_div.h>
116
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
117
#include	<linux/kmemcheck.h>
118
#include	<linux/memory.h>
119
#include	<linux/prefetch.h>
120
#include	<linux/sched/task_stack.h>
Linus Torvalds's avatar
Linus Torvalds committed
121

122
123
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
124
125
126
127
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

128
129
#include <trace/events/kmem.h>

130
131
#include	"internal.h"

132
133
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
134
/*
135
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
156
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
157
158
159
160
161

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

162
163
164
165
166
167
168
169
170
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

171
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
172

Linus Torvalds's avatar
Linus Torvalds committed
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
190
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
191
192
193
194
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
Linus Torvalds's avatar
Linus Torvalds committed
195
196
};

Joonsoo Kim's avatar
Joonsoo Kim committed
197
198
199
200
201
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

202
203
204
/*
 * Need this for bootstrapping a per node allocator.
 */
205
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
206
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
207
#define	CACHE_CACHE 0
208
#define	SIZE_NODE (MAX_NUMNODES)
209

210
static int drain_freelist(struct kmem_cache *cache,
211
			struct kmem_cache_node *n, int tofree);
212
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
213
214
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
215
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
216
static void cache_reap(struct work_struct *unused);
217

218
219
220
221
222
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
223
224
static int slab_early_init = 1;

225
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
226

227
static void kmem_cache_node_init(struct kmem_cache_node *parent)
228
229
230
231
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
232
	parent->total_slabs = 0;
233
	parent->free_slabs = 0;
234
235
	parent->shared = NULL;
	parent->alien = NULL;
236
	parent->colour_next = 0;
237
238
239
240
241
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
242
243
244
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
245
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
246
247
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
248
249
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
250
251
252
253
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
254

255
256
#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000UL)
#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000UL)
257
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
Linus Torvalds's avatar
Linus Torvalds committed
258
259
260
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
261
262
263
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
264
 *
Adrian Bunk's avatar
Adrian Bunk committed
265
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
266
267
 * which could lock up otherwise freeable slabs.
 */
268
269
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
270
271
272
273
274
275

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
276
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
277
278
279
280
281
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
282
283
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
284
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
285
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
286
287
288
289
290
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
291
292
293
294
295
296
297
298
299
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
300
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
301
302
303
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
304
#define	STATS_INC_NODEFREES(x)	do { } while (0)
305
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
306
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
307
308
309
310
311
312
313
314
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
315
316
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
317
 * 0		: objp
318
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
319
320
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
321
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
322
 * 		redzone word.
323
 * cachep->obj_offset: The real object.
324
325
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
326
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
327
 */
328
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
329
{
330
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
331
332
}

333
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
334
335
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
336
337
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
338
339
}

340
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
341
342
343
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
344
		return (unsigned long long *)(objp + cachep->size -
345
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
346
					      REDZONE_ALIGN);
347
	return (unsigned long long *) (objp + cachep->size -
348
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
349
350
}

351
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
352
353
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
354
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
355
356
357
358
}

#else

359
#define obj_offset(x)			0
360
361
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
362
363
364
365
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

366
367
#ifdef CONFIG_DEBUG_SLAB_LEAK

368
static inline bool is_store_user_clean(struct kmem_cache *cachep)
369
{
370
371
	return atomic_read(&cachep->store_user_clean) == 1;
}
372

373
374
375
376
static inline void set_store_user_clean(struct kmem_cache *cachep)
{
	atomic_set(&cachep->store_user_clean, 1);
}
377

378
379
380
381
static inline void set_store_user_dirty(struct kmem_cache *cachep)
{
	if (is_store_user_clean(cachep))
		atomic_set(&cachep->store_user_clean, 0);
382
383
384
}

#else
385
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
386
387
388

#endif

Linus Torvalds's avatar
Linus Torvalds committed
389
/*
390
391
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
392
 */
393
394
395
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
396
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
397

398
399
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
400
	struct page *page = virt_to_head_page(obj);
401
	return page->slab_cache;
402
403
}

404
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
405
406
				 unsigned int idx)
{
407
	return page->s_mem + cache->size * idx;
408
409
}

410
/*
411
412
413
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
414
415
416
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
417
					const struct page *page, void *obj)
418
{
419
	u32 offset = (obj - page->s_mem);
420
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
421
422
}

423
#define BOOT_CPUCACHE_ENTRIES	1
Linus Torvalds's avatar
Linus Torvalds committed
424
/* internal cache of cache description objs */
425
static struct kmem_cache kmem_cache_boot = {
426
427
428
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
429
	.size = sizeof(struct kmem_cache),
430
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
431
432
};

433
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
434

435
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
436
{
437
	return this_cpu_ptr(cachep->cpu_cache);
Linus Torvalds's avatar
Linus Torvalds committed
438
439
}

Andrew Morton's avatar
Andrew Morton committed
440
441
442
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
443
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
444
		slab_flags_t flags, size_t *left_over)
445
{
446
	unsigned int num;
447
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
448

449
450
451
452
453
454
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
455
456
457
458
459
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
460
461
462
463
464
465
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
466
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
467
		num = slab_size / buffer_size;
468
		*left_over = slab_size % buffer_size;
469
	} else {
470
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
471
472
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
473
	}
474
475

	return num;
Linus Torvalds's avatar
Linus Torvalds committed
476
477
}

478
#if DEBUG
479
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
480

Andrew Morton's avatar
Andrew Morton committed
481
482
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
483
{
484
	pr_err("slab error in %s(): cache `%s': %s\n",
485
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
486
	dump_stack();
487
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
488
}
489
#endif
Linus Torvalds's avatar
Linus Torvalds committed
490

491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

507
508
509
510
511
512
513
514
515
516
517
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

518
519
520
521
522
523
524
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
525
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
526
527
528

static void init_reap_node(int cpu)
{
529
530
	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
						    node_online_map);
531
532
533
534
}

static void next_reap_node(void)
{
535
	int node = __this_cpu_read(slab_reap_node);
536

537
	node = next_node_in(node, node_online_map);
538
	__this_cpu_write(slab_reap_node, node);
539
540
541
542
543
544
545
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
546
547
548
549
550
551
552
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
553
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
554
{
555
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
556

557
	if (reap_work->work.func == NULL) {
558
		init_reap_node(cpu);
559
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
560
561
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
562
563
564
	}
}

565
static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds's avatar
Linus Torvalds committed
566
{
567
568
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
569
	 * However, when such objects are allocated or transferred to another
570
571
572
573
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
574
575
576
577
578
579
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
Linus Torvalds's avatar
Linus Torvalds committed
580
	}
581
582
583
584
585
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
586
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
587
588
589
590
591
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
Linus Torvalds's avatar
Linus Torvalds committed
592
593
}

594
595
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
596
{
597
598
599
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
600

601
602
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
603

604
605
606
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
607

608
	slabs_destroy(cachep, &list);
609
610
}

611
612
613
614
615
616
617
618
619
620
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
621
	int nr = min3(from->avail, max, to->limit - to->avail);
622
623
624
625
626
627
628
629
630
631
632
633

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

634
635
636
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
637
#define reap_alien(cachep, n) do { } while (0)
638

Joonsoo Kim's avatar
Joonsoo Kim committed
639
640
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
641
{
642
	return NULL;
643
644
}

Joonsoo Kim's avatar
Joonsoo Kim committed
645
static inline void free_alien_cache(struct alien_cache **ac_ptr)
646
647
648
649
650
651
652
653
654
655
656
657
658
659
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

660
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
661
662
663
664
665
		 gfp_t flags, int nodeid)
{
	return NULL;
}

David Rientjes's avatar
David Rientjes committed
666
667
static inline gfp_t gfp_exact_node(gfp_t flags)
{
668
	return flags & ~__GFP_NOFAIL;
David Rientjes's avatar
David Rientjes committed
669
670
}

671
672
#else	/* CONFIG_NUMA */

673
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
674
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
675

Joonsoo Kim's avatar
Joonsoo Kim committed
676
677
678
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
679
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
Joonsoo Kim's avatar
Joonsoo Kim committed
680
681
682
683
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
684
	spin_lock_init(&alc->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
685
686
687
688
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
689
{
Joonsoo Kim's avatar
Joonsoo Kim committed
690
	struct alien_cache **alc_ptr;
691
	size_t memsize = sizeof(void *) * nr_node_ids;
692
693
694
695
	int i;

	if (limit > 1)
		limit = 12;
Joonsoo Kim's avatar
Joonsoo Kim committed
696
697
698
699
700
701
702
703
704
705
706
707
708
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
709
710
		}
	}
Joonsoo Kim's avatar
Joonsoo Kim committed
711
	return alc_ptr;
712
713
}

Joonsoo Kim's avatar
Joonsoo Kim committed
714
static void free_alien_cache(struct alien_cache **alc_ptr)
715
716
717
{
	int i;

Joonsoo Kim's avatar
Joonsoo Kim committed
718
	if (!alc_ptr)
719
720
		return;
	for_each_node(i)
Joonsoo Kim's avatar
Joonsoo Kim committed
721
722
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
723
724
}

725
static void __drain_alien_cache(struct kmem_cache *cachep,
726
727
				struct array_cache *ac, int node,
				struct list_head *list)
728
{
729
	struct kmem_cache_node *n = get_node(cachep, node);
730
731

	if (ac->avail) {
732
		spin_lock(&n->list_lock);
733
734
735
736
737
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
738
739
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
740

741
		free_block(cachep, ac->entry, ac->avail, node, list);
742
		ac->avail = 0;
743
		spin_unlock(&n->list_lock);
744
745
746
	}
}

747
748
749
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
750
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
751
{
752
	int node = __this_cpu_read(slab_reap_node);
753

754
	if (n->alien) {
Joonsoo Kim's avatar
Joonsoo Kim committed
755
756
757
758
759
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
760
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
761
762
763
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
764
				spin_unlock_irq(&alc->lock);
765
				slabs_destroy(cachep, &list);
Joonsoo Kim's avatar
Joonsoo Kim committed
766
			}
767
768
769
770
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
771
static void drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kim's avatar
Joonsoo Kim committed
772
				struct alien_cache **alien)
773
{
774
	int i = 0;
Joonsoo Kim's avatar
Joonsoo Kim committed
775
	struct alien_cache *alc;
776
777
778
779
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
Joonsoo Kim's avatar
Joonsoo Kim committed
780
781
		alc = alien[i];
		if (alc) {
782
783
			LIST_HEAD(list);

Joonsoo Kim's avatar
Joonsoo Kim committed
784
			ac = &alc->ac;
785
			spin_lock_irqsave(&alc->lock, flags);
786
			__drain_alien_cache(cachep, ac, i, &list);
787
			spin_unlock_irqrestore(&alc->lock, flags);
788
			slabs_destroy(cachep, &list);
789
790
791
		}
	}
}
792

793
794
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
795
{
796
	struct kmem_cache_node *n;
Joonsoo Kim's avatar
Joonsoo Kim committed
797
798
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
799
	LIST_HEAD(list);
800

801
	n = get_node(cachep, node);
802
	STATS_INC_NODEFREES(cachep);
803
804
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
Joonsoo Kim's avatar
Joonsoo Kim committed
805
		ac = &alien->ac;
806
		spin_lock(&alien->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
807
		if (unlikely(ac->avail == ac->limit)) {
808
			STATS_INC_ACOVERFLOW(cachep);
809
			__drain_alien_cache(cachep, ac, page_node, &list);
810
		}
811
		ac->entry[ac->avail++] = objp;
812
		spin_unlock(&alien->lock);
813
		slabs_destroy(cachep, &list);
814
	} else {
815
		n = get_node(cachep, page_node);
816
		spin_lock(&n->list_lock);
817
		free_block(cachep, &objp, 1, page_node, &list);
818
		spin_unlock(&n->list_lock);
819
		slabs_destroy(cachep, &list);
820
821
822
	}
	return 1;
}
823
824
825
826
827
828
829
830
831
832
833
834
835
836

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
David Rientjes's avatar
David Rientjes committed
837
838

/*
839
840
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
David Rientjes's avatar
David Rientjes committed
841
842
843
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
844
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
David Rientjes's avatar
David Rientjes committed
845
}
846
847
#endif

848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

888
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
889
/*
890
 * Allocates and initializes node for a node on each slab cache, used for
891
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
892
 * will be allocated off-node since memory is not yet online for the new node.
893
 * When hotplugging memory or a cpu, existing node are not replaced if
894
895
 * already in use.
 *
896
 * Must hold slab_mutex.
897
 */
898
static int init_cache_node_node(int node)
899
{
900
	int ret;
901
902
	struct kmem_cache *cachep;

903
	list_for_each_entry(cachep, &slab_caches, list) {
904
905
906
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
907
	}
908

909
910
	return 0;
}
911
#endif
912

913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

962
963
964
965
966
967
	/*
	 * To protect lockless access to n->shared during irq disabled context.
	 * If n->shared isn't NULL in irq disabled context, accessing to it is
	 * guaranteed to be valid until irq is re-enabled, because it will be
	 * freed after synchronize_sched().
	 */
968
	if (old_shared && force_change)
969
970
		synchronize_sched();

971
972
973
974
975
976
977
978
fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

979
980
#ifdef CONFIG_SMP

981
static void cpuup_canceled(long cpu)
982
983
{
	struct kmem_cache *cachep;
984
	struct kmem_cache_node *n = NULL;
985
	int node = cpu_to_mem(cpu);
986
	const struct cpumask *mask = cpumask_of_node(node);
987

988
	list_for_each_entry(cachep, &slab_caches, list) {
989
990
		struct array_cache *nc;
		struct array_cache *shared;
Joonsoo Kim's avatar
Joonsoo Kim committed
991
		struct alien_cache **alien;
992
		LIST_HEAD(list);
993

994
		n = get_node(cachep, node);
995
		if (!n)
996
			continue;
997

998
		spin_lock_irq(&n->list_lock);
999

1000
1001
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1002
1003
1004
1005

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
		if (nc) {
1006
			free_block(cachep, nc->entry, nc->avail, node, &list);
1007
1008
			nc->avail = 0;
		}
1009

1010
		if (!cpumask_empty(mask)) {
1011
			spin_unlock_irq(&n->list_lock);
1012
			goto free_slab;
1013
1014
		}

1015
		shared = n->shared;
1016
1017
		if (shared) {
			free_block(cachep, shared->entry,
1018
				   shared->avail, node, &list);
1019
			n->shared = NULL;
1020
1021
		}

1022
1023
		alien = n->alien;
		n->alien = NULL;
1024

1025
		spin_unlock_irq(&n->list_lock);
1026
1027
1028
1029
1030
1031

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
1032
1033

free_slab:
1034
		slabs_destroy(cachep, &list);
1035
1036
1037
1038
1039
1040
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1041
	list_for_each_entry(cachep, &slab_caches, list) {
1042
		n = get_node(cachep, node);
1043
		if (!n)
1044
			continue;
1045
		drain_freelist(cachep, n, INT_MAX);
1046
1047
1048
	}
}

1049
static int cpuup_prepare(long cpu)
Linus Torvalds's avatar
Linus Torvalds committed
1050
{
1051
	struct kmem_cache *cachep;
1052
	int node = cpu_to_mem(cpu);
1053
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
1054

1055
1056
1057
1058
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1059
	 * kmem_cache_node and not this cpu's kmem_cache_node
1060
	 */
1061
	err = init_cache_node_