slab.c 117 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
29
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
102
103
104
105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119

120
121
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
122
123
124
125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126
127
#include <trace/events/kmem.h>

128
129
#include	"internal.h"

130
131
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157
158
159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160
161
162
163
164
165
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

Linus Torvalds's avatar
Linus Torvalds committed
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
/*
 * kmem_bufctl_t:
 *
 * Bufctl's are used for linking objs within a slab
 * linked offsets.
 *
 * This implementation relies on "struct page" for locating the cache &
 * slab an object belongs to.
 * This allows the bufctl structure to be small (one int), but limits
 * the number of objects a slab (not a cache) can contain when off-slab
 * bufctls are used. The limit is the size of the largest general cache
 * that does not use off-slab slabs.
 * For 32bit archs with 4 kB pages, is this 56.
 * This is not serious, as it is only for large objects, when it is unwise
 * to have too many per slab.
 * Note: This limit can be raised by introducing a general cache whose size
 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
 */

185
typedef unsigned int kmem_bufctl_t;
Linus Torvalds's avatar
Linus Torvalds committed
186
187
#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
188
189
#define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
Linus Torvalds's avatar
Linus Torvalds committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205

/*
 * struct slab_rcu
 *
 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
 * arrange for kmem_freepages to be called via RCU.  This is useful if
 * we need to approach a kernel structure obliquely, from its address
 * obtained without the usual locking.  We can lock the structure to
 * stabilize it and check it's still at the given address, only if we
 * can be sure that the memory has not been meanwhile reused for some
 * other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
 */
struct slab_rcu {
206
	struct rcu_head head;
207
	struct kmem_cache *cachep;
208
	void *addr;
Linus Torvalds's avatar
Linus Torvalds committed
209
210
};

211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
	union {
		struct {
			struct list_head list;
			unsigned long colouroff;
			void *s_mem;		/* including colour offset */
			unsigned int inuse;	/* num of objs active in slab */
			kmem_bufctl_t free;
			unsigned short nodeid;
		};
		struct slab_rcu __slab_cover_slab_rcu;
	};
};

Linus Torvalds's avatar
Linus Torvalds committed
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
249
	spinlock_t lock;
250
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
251
252
253
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
254
255
256
257
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
Andrew Morton's avatar
Andrew Morton committed
258
			 */
Linus Torvalds's avatar
Linus Torvalds committed
259
260
};

261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

Andrew Morton's avatar
Andrew Morton committed
278
279
280
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
Linus Torvalds's avatar
Linus Torvalds committed
281
282
283
284
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
285
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
286
287
288
};

/*
289
 * The slab lists for all objects.
Linus Torvalds's avatar
Linus Torvalds committed
290
 */
291
struct kmem_cache_node {
292
293
294
295
296
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
	unsigned long free_objects;
	unsigned int free_limit;
297
	unsigned int colour_next;	/* Per-node cache coloring */
298
299
300
	spinlock_t list_lock;
	struct array_cache *shared;	/* shared per node */
	struct array_cache **alien;	/* on other nodes */
301
302
	unsigned long next_reap;	/* updated without locking */
	int free_touched;		/* updated without locking */
Linus Torvalds's avatar
Linus Torvalds committed
303
304
};

305
306
307
/*
 * Need this for bootstrapping a per node allocator.
 */
308
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
309
static struct kmem_cache_node __initdata initkmem_list3[NUM_INIT_LISTS];
310
#define	CACHE_CACHE 0
311
312
#define	SIZE_AC MAX_NUMNODES
#define	SIZE_L3 (2 * MAX_NUMNODES)
313

314
static int drain_freelist(struct kmem_cache *cache,
315
			struct kmem_cache_node *l3, int tofree);
316
317
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
318
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
319
static void cache_reap(struct work_struct *unused);
320

321
322
323
324
325
326
327
struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_caches);

#ifdef CONFIG_ZONE_DMA
struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_dma_caches);
#endif
328

329
330
static int slab_early_init = 1;

331
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
332
#define INDEX_L3 kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
333

334
static void kmem_list3_init(struct kmem_cache_node *parent)
335
336
337
338
339
340
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
341
	parent->colour_next = 0;
342
343
344
345
346
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
347
348
349
350
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
351
352
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
353
354
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
355
356
357
358
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
359
360
361
362
363

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
364
365
366
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
367
 *
Adrian Bunk's avatar
Adrian Bunk committed
368
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
369
370
371
372
373
374
375
376
377
378
 * which could lock up otherwise freeable slabs.
 */
#define REAPTIMEOUT_CPUC	(2*HZ)
#define REAPTIMEOUT_LIST3	(4*HZ)

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
379
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
380
381
382
383
384
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
385
386
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
387
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
388
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
389
390
391
392
393
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
394
395
396
397
398
399
400
401
402
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
403
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
404
405
406
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
407
#define	STATS_INC_NODEFREES(x)	do { } while (0)
408
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
409
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
410
411
412
413
414
415
416
417
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
418
419
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
420
 * 0		: objp
421
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
422
423
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
424
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
425
 * 		redzone word.
426
 * cachep->obj_offset: The real object.
427
428
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
429
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
430
 */
431
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
432
{
433
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
434
435
}

436
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
437
438
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
439
440
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
441
442
}

443
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
444
445
446
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
447
		return (unsigned long long *)(objp + cachep->size -
448
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
449
					      REDZONE_ALIGN);
450
	return (unsigned long long *) (objp + cachep->size -
451
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
452
453
}

454
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
455
456
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
457
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
458
459
460
461
}

#else

462
#define obj_offset(x)			0
463
464
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
465
466
467
468
469
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
470
471
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
472
 */
473
474
475
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
476
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
477

478
479
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
480
	struct page *page = virt_to_head_page(obj);
481
	return page->slab_cache;
482
483
484
485
}

static inline struct slab *virt_to_slab(const void *obj)
{
486
	struct page *page = virt_to_head_page(obj);
487
488
489

	VM_BUG_ON(!PageSlab(page));
	return page->slab_page;
490
491
}

492
493
494
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
				 unsigned int idx)
{
495
	return slab->s_mem + cache->size * idx;
496
497
}

498
/*
499
500
501
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
502
503
504
505
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
					const struct slab *slab, void *obj)
506
{
507
508
	u32 offset = (obj - slab->s_mem);
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
509
510
}

Linus Torvalds's avatar
Linus Torvalds committed
511
static struct arraycache_init initarray_generic =
512
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
513
514

/* internal cache of cache description objs */
515
static struct kmem_cache kmem_cache_boot = {
516
517
518
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
519
	.size = sizeof(struct kmem_cache),
520
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
521
522
};

523
524
#define BAD_ALIEN_MAGIC 0x01020304ul

525
526
527
528
529
530
531
532
#ifdef CONFIG_LOCKDEP

/*
 * Slab sometimes uses the kmalloc slabs to store the slab headers
 * for other slabs "off slab".
 * The locking for this is tricky in that it nests within the locks
 * of all other slabs in a few places; to deal with this special
 * locking we put on-slab caches into a separate lock-class.
533
534
535
536
 *
 * We set lock class for alien array caches which are up during init.
 * The lock annotation will be lost if all cpus of a node goes down and
 * then comes back up during hotplug
537
 */
538
539
540
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

541
542
543
544
545
546
547
548
static struct lock_class_key debugobj_l3_key;
static struct lock_class_key debugobj_alc_key;

static void slab_set_lock_classes(struct kmem_cache *cachep,
		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
		int q)
{
	struct array_cache **alc;
549
	struct kmem_cache_node *l3;
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
	int r;

	l3 = cachep->nodelists[q];
	if (!l3)
		return;

	lockdep_set_class(&l3->list_lock, l3_key);
	alc = l3->alien;
	/*
	 * FIXME: This check for BAD_ALIEN_MAGIC
	 * should go away when common slab code is taught to
	 * work even without alien caches.
	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
	 * for alloc_alien_cache,
	 */
	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
		return;
	for_each_node(r) {
		if (alc[r])
			lockdep_set_class(&alc[r]->lock, alc_key);
	}
}

static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
	int node;

	for_each_online_node(node)
		slab_set_debugobj_lock_classes_node(cachep, node);
}

586
static void init_node_lock_keys(int q)
587
{
588
	int i;
589

590
	if (slab_state < UP)
591
592
		return;

593
	for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
594
		struct kmem_cache_node *l3;
595
596
597
598
		struct kmem_cache *cache = kmalloc_caches[i];

		if (!cache)
			continue;
599

600
601
		l3 = cache->nodelists[q];
		if (!l3 || OFF_SLAB(cache))
602
			continue;
603

604
		slab_set_lock_classes(cache, &on_slab_l3_key,
605
				&on_slab_alc_key, q);
606
607
	}
}
608

609
610
static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
{
611
612

	if (!cachep->nodelists[q])
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
		return;

	slab_set_lock_classes(cachep, &on_slab_l3_key,
			&on_slab_alc_key, q);
}

static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
	int node;

	VM_BUG_ON(OFF_SLAB(cachep));
	for_each_node(node)
		on_slab_lock_classes_node(cachep, node);
}

628
629
630
631
632
633
634
static inline void init_lock_keys(void)
{
	int node;

	for_each_node(node)
		init_node_lock_keys(node);
}
635
#else
636
637
638
639
static void init_node_lock_keys(int q)
{
}

640
static inline void init_lock_keys(void)
641
642
{
}
643

644
645
646
647
648
649
650
651
static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
}

static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

652
653
654
655
656
657
658
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
}
659
660
#endif

661
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
662

663
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
664
665
666
667
{
	return cachep->array[smp_processor_id()];
}

Andrew Morton's avatar
Andrew Morton committed
668
669
static inline struct kmem_cache *__find_general_cachep(size_t size,
							gfp_t gfpflags)
Linus Torvalds's avatar
Linus Torvalds committed
670
{
671
	int i;
Linus Torvalds's avatar
Linus Torvalds committed
672
673
674

#if DEBUG
	/* This happens if someone tries to call
675
676
677
	 * kmem_cache_create(), or __kmalloc(), before
	 * the generic caches are initialized.
	 */
678
	BUG_ON(kmalloc_caches[INDEX_AC] == NULL);
Linus Torvalds's avatar
Linus Torvalds committed
679
#endif
680
681
682
	if (!size)
		return ZERO_SIZE_PTR;

683
	i = kmalloc_index(size);
Linus Torvalds's avatar
Linus Torvalds committed
684
685

	/*
686
	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds's avatar
Linus Torvalds committed
687
688
689
	 * has cs_{dma,}cachep==NULL. Thus no special case
	 * for large kmalloc calls required.
	 */
690
#ifdef CONFIG_ZONE_DMA
Linus Torvalds's avatar
Linus Torvalds committed
691
	if (unlikely(gfpflags & GFP_DMA))
692
		return kmalloc_dma_caches[i];
693
#endif
694
	return kmalloc_caches[i];
Linus Torvalds's avatar
Linus Torvalds committed
695
696
}

697
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
698
699
700
701
{
	return __find_general_cachep(size, gfpflags);
}

702
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
703
{
704
705
	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
Linus Torvalds's avatar
Linus Torvalds committed
706

Andrew Morton's avatar
Andrew Morton committed
707
708
709
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
710
711
712
713
714
715
716
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
717

718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - The struct slab
	 * - One kmem_bufctl_t for each object
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;
	} else {
		/*
		 * Ignore padding for the initial guess. The padding
		 * is at most @align-1 bytes, and @buffer_size is at
		 * least @align. In the worst case, this result will
		 * be one greater than the number of objects that fit
		 * into the memory allocation when taking the padding
		 * into account.
		 */
		nr_objs = (slab_size - sizeof(struct slab)) /
			  (buffer_size + sizeof(kmem_bufctl_t));

		/*
		 * This calculated number will be either the right
		 * amount, or one greater than what we want.
		 */
		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
		       > slab_size)
			nr_objs--;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;

		mgmt_size = slab_mgmt_size(nr_objs, align);
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
766
767
}

768
#if DEBUG
769
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
770

Andrew Morton's avatar
Andrew Morton committed
771
772
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
773
774
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
775
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
776
	dump_stack();
777
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
778
}
779
#endif
Linus Torvalds's avatar
Linus Torvalds committed
780

781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

797
798
799
800
801
802
803
804
805
806
807
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

808
809
810
811
812
813
814
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
815
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
816
817
818
819
820

static void init_reap_node(int cpu)
{
	int node;

821
	node = next_node(cpu_to_mem(cpu), node_online_map);
822
	if (node == MAX_NUMNODES)
823
		node = first_node(node_online_map);
824

825
	per_cpu(slab_reap_node, cpu) = node;
826
827
828
829
}

static void next_reap_node(void)
{
830
	int node = __this_cpu_read(slab_reap_node);
831
832
833
834

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
835
	__this_cpu_write(slab_reap_node, node);
836
837
838
839
840
841
842
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
843
844
845
846
847
848
849
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
850
static void __cpuinit start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
851
{
852
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
853
854
855
856
857
858

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
859
	if (keventd_up() && reap_work->work.func == NULL) {
860
		init_reap_node(cpu);
861
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
862
863
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
864
865
866
	}
}

867
static struct array_cache *alloc_arraycache(int node, int entries,
868
					    int batchcount, gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
869
{
870
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds's avatar
Linus Torvalds committed
871
872
	struct array_cache *nc = NULL;

873
	nc = kmalloc_node(memsize, gfp, node);
874
875
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
876
	 * However, when such objects are allocated or transferred to another
877
878
879
880
881
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
	kmemleak_no_scan(nc);
Linus Torvalds's avatar
Linus Torvalds committed
882
883
884
885
886
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
887
		spin_lock_init(&nc->lock);
Linus Torvalds's avatar
Linus Torvalds committed
888
889
890
891
	}
	return nc;
}

892
893
894
895
896
897
898
899
900
901
902
static inline bool is_slab_pfmemalloc(struct slab *slabp)
{
	struct page *page = virt_to_page(slabp->s_mem);

	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
903
	struct kmem_cache_node *l3 = cachep->nodelists[numa_mem_id()];
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
	struct slab *slabp;
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

	spin_lock_irqsave(&l3->list_lock, flags);
	list_for_each_entry(slabp, &l3->slabs_full, list)
		if (is_slab_pfmemalloc(slabp))
			goto out;

	list_for_each_entry(slabp, &l3->slabs_partial, list)
		if (is_slab_pfmemalloc(slabp))
			goto out;

	list_for_each_entry(slabp, &l3->slabs_free, list)
		if (is_slab_pfmemalloc(slabp))
			goto out;

	pfmemalloc_active = false;
out:
	spin_unlock_irqrestore(&l3->list_lock, flags);
}

928
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
929
930
931
932
933
934
935
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
936
		struct kmem_cache_node *l3;
937
938
939
940
941
942
943

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
944
		for (i = 0; i < ac->avail; i++) {
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
		l3 = cachep->nodelists[numa_mem_id()];
		if (!list_empty(&l3->slabs_free) && force_refill) {
			struct slab *slabp = virt_to_slab(objp);
961
			ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
962
963
964
965
966
967
968
969
970
971
972
973
974
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

975
976
977
978
979
980
981
982
983
984
985
986
987
988
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
989
990
991
992
								void *objp)
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
993
		struct page *page = virt_to_head_page(objp);
994
995
996
997
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

998
999
1000
1001
1002
1003
1004
1005
1006
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

1007
1008
1009
	ac->entry[ac->avail++] = objp;
}

1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
1020
	int nr = min3(from->avail, max, to->limit - to->avail);
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

1033
1034
1035
1036
1037
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0)

1038
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
{
	return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

1058
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1059
1060
1061
1062
1063
1064
1065
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

1066
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1067
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1068

1069
static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1070
1071
{
	struct array_cache **ac_ptr;
1072
	int memsize = sizeof(void *) * nr_node_ids;
1073
1074
1075
1076
	int i;

	if (limit > 1)
		limit = 12;
1077
	ac_ptr = kzalloc_node(memsize, gfp, node);
1078
1079
	if (ac_ptr) {
		for_each_node(i) {
1080
			if (i == node || !node_online(i))
1081
				continue;
1082
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
1083
			if (!ac_ptr[i]) {
1084
				for (i--; i >= 0; i--)
1085
1086
1087
1088
1089
1090
1091
1092
1093
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

Pekka Enberg's avatar
Pekka Enberg committed
1094
static void free_alien_cache(struct array_cache **ac_ptr)
1095
1096
1097
1098
1099
1100
{
	int i;

	if (!ac_ptr)
		return;
	for_each_node(i)
1101
	    kfree(ac_ptr[i]);
1102
1103
1104
	kfree(ac_ptr);
}

1105
static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg's avatar
Pekka Enberg committed
1106
				struct array_cache *ac, int node)
1107
{
1108
	struct kmem_cache_node *rl3 = cachep->nodelists[node];
1109
1110
1111

	if (ac->avail) {
		spin_lock(&rl3->list_lock);
1112
1113
1114
1115
1116
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
1117
1118
		if (rl3->shared)
			transfer_objects(rl3->shared, ac, ac->limit);
1119

1120
		free_block(cachep, ac->entry, ac->avail, node);
1121
1122
1123
1124
1125
		ac->avail = 0;
		spin_unlock(&rl3->list_lock);
	}
}

1126
1127
1128
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
1129
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *l3)
1130
{
1131
	int node = __this_cpu_read(slab_reap_node);
1132
1133
1134

	if (l3->alien) {
		struct array_cache *ac = l3->alien[node];
1135
1136

		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1137
1138
1139
1140
1141
1142
			__drain_alien_cache(cachep, ac, node);
			spin_unlock_irq(&ac->lock);
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
1143
1144
static void drain_alien_cache(struct kmem_cache *cachep,
				struct array_cache **alien)
1145
{
1146
	int i = 0;
1147
1148
1149
1150
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
1151
		ac = alien[i];
1152
1153
1154
1155
1156
1157
1158
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
1159

1160
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1161
1162
1163
{
	struct slab *slabp = virt_to_slab(objp);
	int nodeid = slabp->nodeid;
1164
	struct kmem_cache_node *l3;
1165
	struct array_cache *alien = NULL;
1166
1167
	int node;

1168
	node = numa_mem_id();
1169
1170
1171
1172
1173

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
1174
	if (likely(slabp->nodeid == node))
1175
1176
		return 0;

1177
	l3 = cachep->nodelists[node];
1178
1179
1180
	STATS_INC_NODEFREES(cachep);
	if (l3->alien && l3->alien[nodeid]) {
		alien = l3->alien[nodeid];
1181
		spin_lock(&alien->lock);
1182
1183
1184
1185
		if (unlikely(alien->avail == alien->limit)) {
			STATS_INC_ACOVERFLOW(cachep);
			__drain_alien_cache(cachep, alien, nodeid);
		}
1186
		ac_put_obj(cachep, alien, objp);
1187
1188
1189
1190
1191
1192
1193
1194
		spin_unlock(&alien->lock);
	} else {
		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
		free_block(cachep, &objp, 1, nodeid);
		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
	}
	return 1;
}
1195
1196
#endif

1197
1198
1199
1200
1201
1202
1203
/*
 * Allocates and initializes nodelists for a node on each slab cache, used for
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_list3
 * will be allocated off-node since memory is not yet online for the new node.
 * When hotplugging memory or a cpu, existing nodelists are not replaced if
 * already in use.
 *
1204
 * Must hold slab_mutex.
1205
1206
1207
1208
 */
static int init_cache_nodelists_node(int node)
{
	struct kmem_cache *cachep;
1209
1210
	struct kmem_cache_node *l3;
	const int memsize = sizeof(struct kmem_cache_node);
1211

1212
	list_for_each_entry(cachep, &slab_caches, list) {
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
		/*
		 * Set up the size64 kmemlist for cpu before we can
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
		if (!cachep->nodelists[node]) {
			l3 = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!l3)
				return -ENOMEM;
			kmem_list3_init(l3);
			l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;

			/*
			 * The l3s don't come and go as CPUs come and
1228
			 * go.  slab_mutex is sufficient
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
			 * protection here.
			 */
			cachep->nodelists[node] = l3;
		}

		spin_lock_irq(&cachep->nodelists[node]->list_lock);
		cachep->nodelists[node]->free_limit =
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
		spin_unlock_irq(&cachep->nodelists[node]->list_lock);
	}
	return 0;
}

1243
1244
1245
static void __cpuinit cpuup_canceled(long cpu)
{
	struct kmem_cache *cachep;
1246
	struct kmem_cache_node *l3 = NULL;
1247
	int node = cpu_to_mem(cpu);
1248
	const struct cpumask *mask = cpumask_of_node(node);
1249

1250
	list_for_each_entry(cachep, &slab_caches, list) {
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
		struct array_cache *nc;
		struct array_cache *shared;
		struct array_cache **alien;

		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		cachep->array[cpu] = NULL;
		l3 = cachep->nodelists[node];

		if (!l3)
			goto free_array_cache;

		spin_lock_irq(&l3->list_lock);

		/* Free limit for this kmem_list3 */
		l3->free_limit -= cachep->batchcount;
		if (nc)
			free_block(cachep, nc->entry, nc->avail, node);

1270
		if (!cpumask_empty(mask)) {
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
			spin_unlock_irq(&l3->list_lock);
			goto free_array_cache;
		}

		shared = l3->shared;
		if (shared) {
			free_block(cachep, shared->entry,
				   shared->avail, node);
			l3->shared = NULL;
		}

		alien = l3->alien;
		l3->alien = NULL;

		spin_unlock_irq(&l3->list_lock);

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
free_array_cache:
		kfree(nc);
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1300
	list_for_each_entry(cachep, &slab_caches, list) {
1301
1302
1303
1304
1305
1306
1307
1308
		l3 = cachep->nodelists[node];
		if (!l3)
			continue;
		drain_freelist(cachep, l3, l3->free_objects);
	}
}

static int __cpuinit cpuup_prepare(long cpu)
Linus Torvalds's avatar
Linus Torvalds committed
1309
{