slab.c 117 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
 * slabs and you must pass objects with the same intializations to
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
Ingo Molnar's avatar
Ingo Molnar committed
71
 *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
Linus Torvalds's avatar
Linus Torvalds committed
98
99
100
101
102
103
104
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
105
#include	<linux/string.h>
106
#include	<linux/uaccess.h>
107
#include	<linux/nodemask.h>
108
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
109
#include	<linux/mutex.h>
110
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/rtmutex.h>
112
#include	<linux/reciprocal_div.h>
Linus Torvalds's avatar
Linus Torvalds committed
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

/*
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
 *		  SLAB_RED_ZONE & SLAB_POISON.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)

#ifndef cache_line_size
#define cache_line_size()	L1_CACHE_BYTES
#endif

#ifndef ARCH_KMALLOC_MINALIGN
/*
 * Enforce a minimum alignment for the kmalloc caches.
 * Usually, the kmalloc caches are cache_line_size() aligned, except when
 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
 * Note that this flag disables some debug features.
 */
#define ARCH_KMALLOC_MINALIGN 0
#endif

#ifndef ARCH_SLAB_MINALIGN
/*
 * Enforce a minimum alignment for all caches.
 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
 * some debug features.
 */
#define ARCH_SLAB_MINALIGN 0
#endif

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
177
			 SLAB_CACHE_DMA | \
Linus Torvalds's avatar
Linus Torvalds committed
178
179
			 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180
			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds's avatar
Linus Torvalds committed
181
#else
182
# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
Linus Torvalds's avatar
Linus Torvalds committed
183
184
			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
185
			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds's avatar
Linus Torvalds committed
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
#endif

/*
 * kmem_bufctl_t:
 *
 * Bufctl's are used for linking objs within a slab
 * linked offsets.
 *
 * This implementation relies on "struct page" for locating the cache &
 * slab an object belongs to.
 * This allows the bufctl structure to be small (one int), but limits
 * the number of objects a slab (not a cache) can contain when off-slab
 * bufctls are used. The limit is the size of the largest general cache
 * that does not use off-slab slabs.
 * For 32bit archs with 4 kB pages, is this 56.
 * This is not serious, as it is only for large objects, when it is unwise
 * to have too many per slab.
 * Note: This limit can be raised by introducing a general cache whose size
 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
 */

207
typedef unsigned int kmem_bufctl_t;
Linus Torvalds's avatar
Linus Torvalds committed
208
209
#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
210
211
#define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
Linus Torvalds's avatar
Linus Torvalds committed
212
213
214
215
216
217
218
219
220

/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
221
222
223
224
225
226
	struct list_head list;
	unsigned long colouroff;
	void *s_mem;		/* including colour offset */
	unsigned int inuse;	/* num of objs active in slab */
	kmem_bufctl_t free;
	unsigned short nodeid;
Linus Torvalds's avatar
Linus Torvalds committed
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
};

/*
 * struct slab_rcu
 *
 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
 * arrange for kmem_freepages to be called via RCU.  This is useful if
 * we need to approach a kernel structure obliquely, from its address
 * obtained without the usual locking.  We can lock the structure to
 * stabilize it and check it's still at the given address, only if we
 * can be sure that the memory has not been meanwhile reused for some
 * other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
 *
 * We assume struct slab_rcu can overlay struct slab when destroying.
 */
struct slab_rcu {
246
	struct rcu_head head;
247
	struct kmem_cache *cachep;
248
	void *addr;
Linus Torvalds's avatar
Linus Torvalds committed
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
};

/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
268
	spinlock_t lock;
Andrew Morton's avatar
Andrew Morton committed
269
270
271
272
273
274
	void *entry[0];	/*
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 * [0] is for gcc 2.95. It should really be [].
			 */
Linus Torvalds's avatar
Linus Torvalds committed
275
276
};

Andrew Morton's avatar
Andrew Morton committed
277
278
279
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
Linus Torvalds's avatar
Linus Torvalds committed
280
281
282
283
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
284
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
285
286
287
};

/*
288
 * The slab lists for all objects.
Linus Torvalds's avatar
Linus Torvalds committed
289
290
 */
struct kmem_list3 {
291
292
293
294
295
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
	unsigned long free_objects;
	unsigned int free_limit;
296
	unsigned int colour_next;	/* Per-node cache coloring */
297
298
299
	spinlock_t list_lock;
	struct array_cache *shared;	/* shared per node */
	struct array_cache **alien;	/* on other nodes */
300
301
	unsigned long next_reap;	/* updated without locking */
	int free_touched;		/* updated without locking */
Linus Torvalds's avatar
Linus Torvalds committed
302
303
};

304
305
306
307
308
309
310
311
312
/*
 * Need this for bootstrapping a per node allocator.
 */
#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define	CACHE_CACHE 0
#define	SIZE_AC 1
#define	SIZE_L3 (1 + MAX_NUMNODES)

313
314
315
316
static int drain_freelist(struct kmem_cache *cache,
			struct kmem_list3 *l3, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
317
static int enable_cpucache(struct kmem_cache *cachep);
318
static void cache_reap(struct work_struct *unused);
319

320
/*
Andrew Morton's avatar
Andrew Morton committed
321
322
 * This function must be completely optimized away if a constant is passed to
 * it.  Mostly the same as what is in linux/slab.h except it returns an index.
323
 */
324
static __always_inline int index_of(const size_t size)
325
{
326
327
	extern void __bad_size(void);

328
329
330
331
332
333
334
335
336
337
	if (__builtin_constant_p(size)) {
		int i = 0;

#define CACHE(x) \
	if (size <=x) \
		return i; \
	else \
		i++;
#include "linux/kmalloc_sizes.h"
#undef CACHE
338
		__bad_size();
339
	} else
340
		__bad_size();
341
342
343
	return 0;
}

344
345
static int slab_early_init = 1;

346
347
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
Linus Torvalds's avatar
Linus Torvalds committed
348

Pekka Enberg's avatar
Pekka Enberg committed
349
static void kmem_list3_init(struct kmem_list3 *parent)
350
351
352
353
354
355
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
356
	parent->colour_next = 0;
357
358
359
360
361
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
362
363
364
365
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
366
367
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
368
369
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
370
371
372
373
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
374
375

/*
376
 * struct kmem_cache
Linus Torvalds's avatar
Linus Torvalds committed
377
378
379
 *
 * manages a cache.
 */
380

381
struct kmem_cache {
Linus Torvalds's avatar
Linus Torvalds committed
382
/* 1) per-cpu data, touched during every alloc/free */
383
	struct array_cache *array[NR_CPUS];
384
/* 2) Cache tunables. Protected by cache_chain_mutex */
385
386
387
	unsigned int batchcount;
	unsigned int limit;
	unsigned int shared;
388

389
	unsigned int buffer_size;
390
	u32 reciprocal_buffer_size;
391
392
/* 3) touched by every alloc & free from the backend */

Andrew Morton's avatar
Andrew Morton committed
393
394
	unsigned int flags;		/* constant flags */
	unsigned int num;		/* # of objs per slab */
Linus Torvalds's avatar
Linus Torvalds committed
395

396
/* 4) cache_grow/shrink */
Linus Torvalds's avatar
Linus Torvalds committed
397
	/* order of pgs per slab (2^n) */
398
	unsigned int gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
399
400

	/* force GFP flags, e.g. GFP_DMA */
401
	gfp_t gfpflags;
Linus Torvalds's avatar
Linus Torvalds committed
402

Andrew Morton's avatar
Andrew Morton committed
403
	size_t colour;			/* cache colouring range */
404
	unsigned int colour_off;	/* colour offset */
405
	struct kmem_cache *slabp_cache;
406
	unsigned int slab_size;
Andrew Morton's avatar
Andrew Morton committed
407
	unsigned int dflags;		/* dynamic flags */
Linus Torvalds's avatar
Linus Torvalds committed
408
409

	/* constructor func */
410
	void (*ctor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
411
412

	/* de-constructor func */
413
	void (*dtor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
414

415
/* 5) cache creation/removal */
416
417
	const char *name;
	struct list_head next;
Linus Torvalds's avatar
Linus Torvalds committed
418

419
/* 6) statistics */
Linus Torvalds's avatar
Linus Torvalds committed
420
#if STATS
421
422
423
424
425
426
427
428
429
	unsigned long num_active;
	unsigned long num_allocations;
	unsigned long high_mark;
	unsigned long grown;
	unsigned long reaped;
	unsigned long errors;
	unsigned long max_freeable;
	unsigned long node_allocs;
	unsigned long node_frees;
430
	unsigned long node_overflow;
431
432
433
434
	atomic_t allochit;
	atomic_t allocmiss;
	atomic_t freehit;
	atomic_t freemiss;
Linus Torvalds's avatar
Linus Torvalds committed
435
436
#endif
#if DEBUG
437
438
439
440
441
442
443
444
	/*
	 * If debugging is enabled, then the allocator can add additional
	 * fields and/or padding to every object. buffer_size contains the total
	 * object size including these internal fields, the following two
	 * variables contain the offset to the user object and its size.
	 */
	int obj_offset;
	int obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
445
#endif
Eric Dumazet's avatar
Eric Dumazet committed
446
447
448
449
450
451
452
453
454
455
456
	/*
	 * We put nodelists[] at the end of kmem_cache, because we want to size
	 * this array to nr_node_ids slots instead of MAX_NUMNODES
	 * (see kmem_cache_init())
	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
	 * is statically defined, so we reserve the max number of nodes.
	 */
	struct kmem_list3 *nodelists[MAX_NUMNODES];
	/*
	 * Do not add fields after nodelists[]
	 */
Linus Torvalds's avatar
Linus Torvalds committed
457
458
459
460
461
462
};

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
463
464
465
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
466
 *
Adrian Bunk's avatar
Adrian Bunk committed
467
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
468
469
470
471
472
473
474
475
476
477
 * which could lock up otherwise freeable slabs.
 */
#define REAPTIMEOUT_CPUC	(2*HZ)
#define REAPTIMEOUT_LIST3	(4*HZ)

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
478
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
479
480
481
482
483
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
484
485
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
486
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
487
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
488
489
490
491
492
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
493
494
495
496
497
498
499
500
501
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
502
#define	STATS_ADD_REAPED(x,y)	do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
503
504
505
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
506
#define	STATS_INC_NODEFREES(x)	do { } while (0)
507
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
508
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
509
510
511
512
513
514
515
516
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
517
518
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
519
 * 0		: objp
520
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
521
522
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
523
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
524
 * 		redzone word.
525
526
 * cachep->obj_offset: The real object.
 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
Andrew Morton's avatar
Andrew Morton committed
527
528
 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
529
 */
530
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
531
{
532
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
533
534
}

535
static int obj_size(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
536
{
537
	return cachep->obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
538
539
}

540
static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
541
542
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
543
	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
544
545
}

546
static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
547
548
549
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
550
		return (unsigned long *)(objp + cachep->buffer_size -
551
					 2 * BYTES_PER_WORD);
552
	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
553
554
}

555
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
556
557
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
558
	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
559
560
561
562
}

#else

563
564
#define obj_offset(x)			0
#define obj_size(cachep)		(cachep->buffer_size)
Linus Torvalds's avatar
Linus Torvalds committed
565
566
567
568
569
570
571
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
Andrew Morton's avatar
Andrew Morton committed
572
573
 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
 * order.
Linus Torvalds's avatar
Linus Torvalds committed
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
 */
#if defined(CONFIG_LARGE_ALLOCS)
#define	MAX_OBJ_ORDER	13	/* up to 32Mb */
#define	MAX_GFP_ORDER	13	/* up to 32Mb */
#elif defined(CONFIG_MMU)
#define	MAX_OBJ_ORDER	5	/* 32 pages */
#define	MAX_GFP_ORDER	5	/* 32 pages */
#else
#define	MAX_OBJ_ORDER	8	/* up to 1Mb */
#define	MAX_GFP_ORDER	8	/* up to 1Mb */
#endif

/*
 * Do not go above this order unless 0 objects fit into the slab.
 */
#define	BREAK_GFP_ORDER_HI	1
#define	BREAK_GFP_ORDER_LO	0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;

Andrew Morton's avatar
Andrew Morton committed
593
594
595
596
/*
 * Functions for storing/retrieving the cachep and or slab from the page
 * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
 * these are used to find the cache which an obj belongs to.
Linus Torvalds's avatar
Linus Torvalds committed
597
 */
598
599
600
601
602
603
604
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
	page->lru.next = (struct list_head *)cache;
}

static inline struct kmem_cache *page_get_cache(struct page *page)
{
605
	page = compound_head(page);
606
	BUG_ON(!PageSlab(page));
607
608
609
610
611
612
613
614
615
616
	return (struct kmem_cache *)page->lru.next;
}

static inline void page_set_slab(struct page *page, struct slab *slab)
{
	page->lru.prev = (struct list_head *)slab;
}

static inline struct slab *page_get_slab(struct page *page)
{
617
	page = compound_head(page);
618
	BUG_ON(!PageSlab(page));
619
620
	return (struct slab *)page->lru.prev;
}
Linus Torvalds's avatar
Linus Torvalds committed
621

622
623
624
625
626
627
628
629
630
631
632
633
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_cache(page);
}

static inline struct slab *virt_to_slab(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_slab(page);
}

634
635
636
637
638
639
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
				 unsigned int idx)
{
	return slab->s_mem + cache->buffer_size * idx;
}

640
641
642
643
644
645
646
647
/*
 * We want to avoid an expensive divide : (offset / cache->buffer_size)
 *   Using the fact that buffer_size is a constant for a particular cache,
 *   we can replace (offset / cache->buffer_size) by
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
					const struct slab *slab, void *obj)
648
{
649
650
	u32 offset = (obj - slab->s_mem);
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
651
652
}

Andrew Morton's avatar
Andrew Morton committed
653
654
655
/*
 * These are the default caches for kmalloc. Custom caches can have other sizes.
 */
Linus Torvalds's avatar
Linus Torvalds committed
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
	CACHE(ULONG_MAX)
#undef CACHE
};
EXPORT_SYMBOL(malloc_sizes);

/* Must match cache_sizes above. Out of line to keep cache footprint low. */
struct cache_names {
	char *name;
	char *name_dma;
};

static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
673
	{NULL,}
Linus Torvalds's avatar
Linus Torvalds committed
674
675
676
677
#undef CACHE
};

static struct arraycache_init initarray_cache __initdata =
678
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
679
static struct arraycache_init initarray_generic =
680
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
681
682

/* internal cache of cache description objs */
683
static struct kmem_cache cache_cache = {
684
685
686
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
687
	.buffer_size = sizeof(struct kmem_cache),
688
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
689
690
};

691
692
#define BAD_ALIEN_MAGIC 0x01020304ul

693
694
695
696
697
698
699
700
#ifdef CONFIG_LOCKDEP

/*
 * Slab sometimes uses the kmalloc slabs to store the slab headers
 * for other slabs "off slab".
 * The locking for this is tricky in that it nests within the locks
 * of all other slabs in a few places; to deal with this special
 * locking we put on-slab caches into a separate lock-class.
701
702
703
704
 *
 * We set lock class for alien array caches which are up during init.
 * The lock annotation will be lost if all cpus of a node goes down and
 * then comes back up during hotplug
705
 */
706
707
708
709
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

static inline void init_lock_keys(void)
710
711
712

{
	int q;
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
	struct cache_sizes *s = malloc_sizes;

	while (s->cs_size != ULONG_MAX) {
		for_each_node(q) {
			struct array_cache **alc;
			int r;
			struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
			if (!l3 || OFF_SLAB(s->cs_cachep))
				continue;
			lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
			alc = l3->alien;
			/*
			 * FIXME: This check for BAD_ALIEN_MAGIC
			 * should go away when common slab code is taught to
			 * work even without alien caches.
			 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
			 * for alloc_alien_cache,
			 */
			if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
				continue;
			for_each_node(r) {
				if (alc[r])
					lockdep_set_class(&alc[r]->lock,
					     &on_slab_alc_key);
			}
		}
		s++;
740
741
742
	}
}
#else
743
static inline void init_lock_keys(void)
744
745
746
747
{
}
#endif

748
749
750
751
/*
 * 1. Guard access to the cache-chain.
 * 2. Protect sanity of cpu_online_map against cpu hotplug events
 */
Ingo Molnar's avatar
Ingo Molnar committed
752
static DEFINE_MUTEX(cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
753
754
755
756
757
758
759
760
static struct list_head cache_chain;

/*
 * chicken and egg problem: delay the per-cpu array allocation
 * until the general caches are up.
 */
static enum {
	NONE,
761
762
	PARTIAL_AC,
	PARTIAL_L3,
Linus Torvalds's avatar
Linus Torvalds committed
763
764
765
	FULL
} g_cpucache_up;

766
767
768
769
770
771
772
773
/*
 * used by boot code to determine if it can use slab based allocator
 */
int slab_is_available(void)
{
	return g_cpucache_up == FULL;
}

774
static DEFINE_PER_CPU(struct delayed_work, reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
775

776
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
777
778
779
780
{
	return cachep->array[smp_processor_id()];
}

Andrew Morton's avatar
Andrew Morton committed
781
782
static inline struct kmem_cache *__find_general_cachep(size_t size,
							gfp_t gfpflags)
Linus Torvalds's avatar
Linus Torvalds committed
783
784
785
786
787
{
	struct cache_sizes *csizep = malloc_sizes;

#if DEBUG
	/* This happens if someone tries to call
788
789
790
	 * kmem_cache_create(), or __kmalloc(), before
	 * the generic caches are initialized.
	 */
791
	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
Linus Torvalds's avatar
Linus Torvalds committed
792
793
794
795
796
#endif
	while (size > csizep->cs_size)
		csizep++;

	/*
797
	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds's avatar
Linus Torvalds committed
798
799
800
	 * has cs_{dma,}cachep==NULL. Thus no special case
	 * for large kmalloc calls required.
	 */
801
#ifdef CONFIG_ZONE_DMA
Linus Torvalds's avatar
Linus Torvalds committed
802
803
	if (unlikely(gfpflags & GFP_DMA))
		return csizep->cs_dmacachep;
804
#endif
Linus Torvalds's avatar
Linus Torvalds committed
805
806
807
	return csizep->cs_cachep;
}

808
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
809
810
811
812
{
	return __find_general_cachep(size, gfpflags);
}

813
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
814
{
815
816
	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
Linus Torvalds's avatar
Linus Torvalds committed
817

Andrew Morton's avatar
Andrew Morton committed
818
819
820
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
821
822
823
824
825
826
827
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
828

829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - The struct slab
	 * - One kmem_bufctl_t for each object
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;
	} else {
		/*
		 * Ignore padding for the initial guess. The padding
		 * is at most @align-1 bytes, and @buffer_size is at
		 * least @align. In the worst case, this result will
		 * be one greater than the number of objects that fit
		 * into the memory allocation when taking the padding
		 * into account.
		 */
		nr_objs = (slab_size - sizeof(struct slab)) /
			  (buffer_size + sizeof(kmem_bufctl_t));

		/*
		 * This calculated number will be either the right
		 * amount, or one greater than what we want.
		 */
		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
		       > slab_size)
			nr_objs--;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;

		mgmt_size = slab_mgmt_size(nr_objs, align);
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
877
878
879
880
}

#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)

Andrew Morton's avatar
Andrew Morton committed
881
882
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
883
884
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
885
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
886
887
888
	dump_stack();
}

889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
static DEFINE_PER_CPU(unsigned long, reap_node);

static void init_reap_node(int cpu)
{
	int node;

	node = next_node(cpu_to_node(cpu), node_online_map);
	if (node == MAX_NUMNODES)
920
		node = first_node(node_online_map);
921

922
	per_cpu(reap_node, cpu) = node;
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
}

static void next_reap_node(void)
{
	int node = __get_cpu_var(reap_node);

	/*
	 * Also drain per cpu pages on remote zones
	 */
	if (node != numa_node_id())
		drain_node_pages(node);

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
	__get_cpu_var(reap_node) = node;
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
946
947
948
949
950
951
952
953
954
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
static void __devinit start_cpu_timer(int cpu)
{
955
	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
956
957
958
959
960
961

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
962
	if (keventd_up() && reap_work->work.func == NULL) {
963
		init_reap_node(cpu);
964
		INIT_DELAYED_WORK(reap_work, cache_reap);
965
966
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
967
968
969
	}
}

970
static struct array_cache *alloc_arraycache(int node, int entries,
971
					    int batchcount)
Linus Torvalds's avatar
Linus Torvalds committed
972
{
973
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds's avatar
Linus Torvalds committed
974
975
	struct array_cache *nc = NULL;

976
	nc = kmalloc_node(memsize, GFP_KERNEL, node);
Linus Torvalds's avatar
Linus Torvalds committed
977
978
979
980
981
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
982
		spin_lock_init(&nc->lock);
Linus Torvalds's avatar
Linus Torvalds committed
983
984
985
986
	}
	return nc;
}

987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
	int nr = min(min(from->avail, max), to->limit - to->avail);

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	to->touched = 1;
	return nr;
}

1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0)

static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
	return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

1036
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1037
1038
1039
1040
1041
1042
1043
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

1044
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1045
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1046

Pekka Enberg's avatar
Pekka Enberg committed
1047
static struct array_cache **alloc_alien_cache(int node, int limit)
1048
1049
{
	struct array_cache **ac_ptr;
1050
	int memsize = sizeof(void *) * nr_node_ids;
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
	int i;

	if (limit > 1)
		limit = 12;
	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
	if (ac_ptr) {
		for_each_node(i) {
			if (i == node || !node_online(i)) {
				ac_ptr[i] = NULL;
				continue;
			}
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
			if (!ac_ptr[i]) {
1064
				for (i--; i <= 0; i--)
1065
1066
1067
1068
1069
1070
1071
1072
1073
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

Pekka Enberg's avatar
Pekka Enberg committed
1074
static void free_alien_cache(struct array_cache **ac_ptr)
1075
1076
1077
1078
1079
1080
{
	int i;

	if (!ac_ptr)
		return;
	for_each_node(i)
1081
	    kfree(ac_ptr[i]);
1082
1083
1084
	kfree(ac_ptr);
}

1085
static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg's avatar
Pekka Enberg committed
1086
				struct array_cache *ac, int node)
1087
1088
1089
1090
1091
{
	struct kmem_list3 *rl3 = cachep->nodelists[node];

	if (ac->avail) {
		spin_lock(&rl3->list_lock);
1092
1093
1094
1095
1096
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
1097
1098
		if (rl3->shared)
			transfer_objects(rl3->shared, ac, ac->limit);
1099

1100
		free_block(cachep, ac->entry, ac->avail, node);
1101
1102
1103
1104
1105
		ac->avail = 0;
		spin_unlock(&rl3->list_lock);
	}
}

1106
1107
1108
1109
1110
1111
1112
1113
1114
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
{
	int node = __get_cpu_var(reap_node);

	if (l3->alien) {
		struct array_cache *ac = l3->alien[node];
1115
1116

		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1117
1118
1119
1120
1121
1122
			__drain_alien_cache(cachep, ac, node);
			spin_unlock_irq(&ac->lock);
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
1123
1124
static void drain_alien_cache(struct kmem_cache *cachep,
				struct array_cache **alien)
1125
{
1126
	int i = 0;
1127
1128
1129
1130
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
1131
		ac = alien[i];
1132
1133
1134
1135
1136
1137
1138
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
1139

1140
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1141
1142
1143
1144
1145
{
	struct slab *slabp = virt_to_slab(objp);
	int nodeid = slabp->nodeid;
	struct kmem_list3 *l3;
	struct array_cache *alien = NULL;
1146
1147
1148
	int node;

	node = numa_node_id();
1149
1150
1151
1152
1153

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
1154
	if (likely(slabp->nodeid == node))
1155
1156
		return 0;

1157
	l3 = cachep->nodelists[node];
1158
1159
1160
	STATS_INC_NODEFREES(cachep);
	if (l3->alien && l3->alien[nodeid]) {
		alien = l3->alien[nodeid];
1161
		spin_lock(&alien->lock);
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
		if (unlikely(alien->avail == alien->limit)) {
			STATS_INC_ACOVERFLOW(cachep);
			__drain_alien_cache(cachep, alien, nodeid);
		}
		alien->entry[alien->avail++] = objp;
		spin_unlock(&alien->lock);
	} else {
		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
		free_block(cachep, &objp, 1, nodeid);
		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
	}
	return 1;
}
1175
1176
#endif

1177
static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1178
				    unsigned long action, void *hcpu)
Linus Torvalds's avatar
Linus Torvalds committed
1179
1180
{
	long cpu = (long)hcpu;
1181
	struct kmem_cache *cachep;
1182
1183
1184
	struct kmem_list3 *l3 = NULL;
	int node = cpu_to_node(cpu);
	int memsize = sizeof(struct kmem_list3);
Linus Torvalds's avatar
Linus Torvalds committed
1185
1186
1187

	switch (action) {
	case CPU_UP_PREPARE:
Ingo Molnar's avatar
Ingo Molnar committed
1188
		mutex_lock(&cache_chain_mutex);
Andrew Morton's avatar
Andrew Morton committed
1189
1190
		/*
		 * We need to do this right in the beginning since
1191
1192
1193
1194
1195
		 * alloc_arraycache's are going to use this list.
		 * kmalloc_node allows us to add the slab to the right
		 * kmem_list3 and not this cpu's kmem_list3
		 */

Linus Torvalds's avatar
Linus Torvalds committed
1196
		list_for_each_entry(cachep, &cache_chain, next) {
Andrew Morton's avatar
Andrew Morton committed
1197
1198
			/*
			 * Set up the size64 kmemlist for cpu before we can
1199
1200
1201
1202
			 * begin anything. Make sure some other cpu on this
			 * node has not already allocated this
			 */
			if (!cachep->nodelists[node]) {
Andrew Morton's avatar
Andrew Morton committed
1203
1204
				l3 = kmalloc_node(memsize, GFP_KERNEL, node);
				if (!l3)
1205
1206
1207
					goto bad;
				kmem_list3_init(l3);
				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1208
				    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1209

1210
1211
1212
1213
1214
				/*
				 * The l3s don't come and go as CPUs come and
				 * go.  cache_chain_mutex is sufficient
				 * protection here.
				 */
1215
1216
				cachep->nodelists[node] = l3;
			}
Linus Torvalds's avatar
Linus Torvalds committed
1217

1218
1219
			spin_lock_irq(&cachep->nodelists[node]->list_lock);
			cachep->nodelists[node]->free_limit =
Andrew Morton's avatar
Andrew Morton committed
1220
1221
				(1 + nr_cpus_node(node)) *
				cachep->batchcount + cachep->num;
1222
1223
1224
			spin_unlock_irq(&cachep->nodelists[node]->list_lock);
		}

Andrew Morton's avatar
Andrew Morton committed
1225
1226
1227
1228
		/*
		 * Now we can go ahead with allocating the shared arrays and
		 * array caches
		 */
1229
		list_for_each_entry(cachep, &cache_chain, next) {
1230
			struct array_cache *nc;
1231
			struct array_cache *shared = NULL;
1232
			struct array_cache **alien = NULL;
1233

1234
			nc = alloc_arraycache(node, cachep->limit,
1235
						cachep->batchcount);
Linus Torvalds's avatar
Linus Torvalds committed
1236
1237
			if (!nc)
				goto bad;
1238
1239
			if (cachep->shared) {
				shared = alloc_arraycache(node,
1240
1241
					cachep->shared * cachep->batchcount,
					0xbaadf00d);
1242
1243
1244
				if (!shared)
					goto bad;
			}
1245
1246
1247
1248
1249
			if (use_alien_caches) {
                                alien = alloc_alien_cache(node, cachep->limit);
                                if (!alien)
                                        goto bad;
                        }
Linus Torvalds's avatar
Linus Torvalds committed
1250
			cachep->array[cpu] = nc;
1251
1252
1253
			l3 = cachep->nodelists[node];
			BUG_ON(!l3);

1254
1255
1256
1257
1258
1259
1260
1261
			spin_lock_irq(&l3->list_lock);
			if (!l3->shared) {
				/*
				 * We are serialised from CPU_DEAD or
				 * CPU_UP_CANCELLED by the cpucontrol lock
				 */
				l3->shared = shared;
				shared = NULL;
1262
			}
1263
1264
1265
1266
1267
1268
1269
1270
1271
#ifdef CONFIG_NUMA
			if (!l3->alien) {
				l3->alien = alien;
				alien = NULL;
			}
#endif
			spin_unlock_irq(&l3->list_lock);
			kfree(shared);
			free_alien_cache(alien);
Linus Torvalds's avatar
Linus Torvalds committed
1272
1273
1274
		}
		break;
	case CPU_ONLINE:
1275
		mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1276
1277
1278
		start_cpu_timer(cpu);
		break;
#ifdef CONFIG_HOTPLUG_CPU
1279
1280
1281
1282
1283
1284
	case CPU_DOWN_PREPARE:
		mutex_lock(&cache_chain_mutex);
		break;
	case CPU_DOWN_FAILED:
		mutex_unlock(&cache_chain_mutex);
		break;
Linus Torvalds's avatar
Linus Torvalds committed
1285
	case CPU_DEAD:
1286
1287
1288
1289
1290
1291
1292
1293
		/*
		 * Even if all the cpus of a node are down, we don't free the
		 * kmem_list3 of any cache. This to avoid a race between
		 * cpu_down, and a kmalloc allocation from another cpu for
		 * memory from the node of the cpu going down.  The list3
		 * structure is usually allocated from kmem_cache_create() and
		 * gets destroyed at kmem_cache_destroy().
		 */
Linus Torvalds's avatar
Linus Torvalds committed
1294
		/* fall thru */
1295
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1296
1297
1298
	case CPU_UP_CANCELED:
		list_for_each_entry(cachep, &cache_chain, next) {
			struct array_cache *nc;
1299
1300
			struct array_cache *shared;
			struct array_cache **alien;
1301
			cpumask_t mask;
Linus Torvalds's avatar
Linus Torvalds committed
1302

1303
			mask = node_to_cpumask(node);
Linus Torvalds's avatar
Linus Torvalds committed
1304
1305
1306
			/* cpu is dead; no one can alloc from it. */
			nc = cachep->array[cpu];
			cachep->array[cpu] = NULL;
1307
1308
1309
			l3 = cachep->nodelists[node];

			if (!l3)
1310
				goto free_array_cache;
1311

1312
			spin_lock_irq(&l3->list_lock);
1313
1314
1315
1316

			/* Free limit for this kmem_list3 */
			l3->free_limit -= cachep->batchcount;
			if (nc)
1317
				free_block(cachep, nc->entry, nc->avail, node);
1318
1319

			if (!cpus_empty(mask)) {
1320
				spin_unlock_irq(&l3->list_lock);
1321
				goto free_array_cache;
1322
			}
1323

1324
1325
			shared = l3->shared;
			if (shared) {
1326
1327
				free_block(cachep, shared->entry,
					   shared->avail, node);
1328
1329
1330
				l3->shared = NULL;
			}

1331
1332
1333
1334
1335
1336
1337
1338
1339
			alien = l3->alien;
			l3->alien = NULL;

			spin_unlock_irq(&l3->list_lock);

			kfree(shared);
			if (alien) {
				drain_alien_cache(cachep, alien);
				free_alien_cache(alien);
1340
			}
1341
free_array_cache:
Linus Torvalds's avatar
Linus Torvalds committed
1342
1343
			kfree(nc);
		}
1344
1345
1346
1347
1348
1349
1350
1351
1352
		/*
		 * In the previous loop, all the objects were freed to
		 * the respective cache's slabs,  now we can go ahead and
		 * shrink each nodelist to its limit.
		 */
		list_for_each_entry(cachep, &cache_chain, next) {
			l3 = cachep->nodelists[node];
			if (!l3)
				continue;
1353
			drain_freelist(cachep, l3, l3->free_objects);
1354
		}
Ingo Molnar's avatar
Ingo Molnar committed
1355
		mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1356
1357
1358
		break;
	}
	return NOTIFY_OK;
Andrew Morton's avatar
Andrew Morton committed
1359
bad:
Linus Torvalds's avatar
Linus Torvalds committed
1360
1361
1362
	return NOTIFY_BAD;
}

1363
1364
1365