slab.c 108 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
 * slabs and you must pass objects with the same intializations to
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
Ingo Molnar's avatar
Ingo Molnar committed
71
 *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
91
92
93
94
95
96
 */

#include	<linux/config.h>
#include	<linux/slab.h>
#include	<linux/mm.h>
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
Linus Torvalds's avatar
Linus Torvalds committed
98
99
100
101
102
103
104
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
105
#include	<linux/string.h>
106
#include	<linux/nodemask.h>
107
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
108
#include	<linux/mutex.h>
Linus Torvalds's avatar
Linus Torvalds committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

#include	<asm/uaccess.h>
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

/*
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
 *		  SLAB_RED_ZONE & SLAB_POISON.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)

#ifndef cache_line_size
#define cache_line_size()	L1_CACHE_BYTES
#endif

#ifndef ARCH_KMALLOC_MINALIGN
/*
 * Enforce a minimum alignment for the kmalloc caches.
 * Usually, the kmalloc caches are cache_line_size() aligned, except when
 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
 * Note that this flag disables some debug features.
 */
#define ARCH_KMALLOC_MINALIGN 0
#endif

#ifndef ARCH_SLAB_MINALIGN
/*
 * Enforce a minimum alignment for all caches.
 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
 * some debug features.
 */
#define ARCH_SLAB_MINALIGN 0
#endif

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
174
			 SLAB_CACHE_DMA | \
Linus Torvalds's avatar
Linus Torvalds committed
175
176
			 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
177
			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds's avatar
Linus Torvalds committed
178
#else
179
# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
Linus Torvalds's avatar
Linus Torvalds committed
180
181
			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
182
			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds's avatar
Linus Torvalds committed
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
#endif

/*
 * kmem_bufctl_t:
 *
 * Bufctl's are used for linking objs within a slab
 * linked offsets.
 *
 * This implementation relies on "struct page" for locating the cache &
 * slab an object belongs to.
 * This allows the bufctl structure to be small (one int), but limits
 * the number of objects a slab (not a cache) can contain when off-slab
 * bufctls are used. The limit is the size of the largest general cache
 * that does not use off-slab slabs.
 * For 32bit archs with 4 kB pages, is this 56.
 * This is not serious, as it is only for large objects, when it is unwise
 * to have too many per slab.
 * Note: This limit can be raised by introducing a general cache whose size
 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
 */

204
typedef unsigned int kmem_bufctl_t;
Linus Torvalds's avatar
Linus Torvalds committed
205
206
#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
207
208
#define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
Linus Torvalds's avatar
Linus Torvalds committed
209
210
211
212
213
214
215
216
217
218
219
220
221
222

/* Max number of objs-per-slab for caches which use off-slab slabs.
 * Needed to avoid a possible looping condition in cache_grow().
 */
static unsigned long offslab_limit;

/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
223
224
225
226
227
228
	struct list_head list;
	unsigned long colouroff;
	void *s_mem;		/* including colour offset */
	unsigned int inuse;	/* num of objs active in slab */
	kmem_bufctl_t free;
	unsigned short nodeid;
Linus Torvalds's avatar
Linus Torvalds committed
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
};

/*
 * struct slab_rcu
 *
 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
 * arrange for kmem_freepages to be called via RCU.  This is useful if
 * we need to approach a kernel structure obliquely, from its address
 * obtained without the usual locking.  We can lock the structure to
 * stabilize it and check it's still at the given address, only if we
 * can be sure that the memory has not been meanwhile reused for some
 * other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
 *
 * We assume struct slab_rcu can overlay struct slab when destroying.
 */
struct slab_rcu {
248
	struct rcu_head head;
249
	struct kmem_cache *cachep;
250
	void *addr;
Linus Torvalds's avatar
Linus Torvalds committed
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
};

/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
270
	spinlock_t lock;
Andrew Morton's avatar
Andrew Morton committed
271
272
273
274
275
276
	void *entry[0];	/*
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 * [0] is for gcc 2.95. It should really be [].
			 */
Linus Torvalds's avatar
Linus Torvalds committed
277
278
};

Andrew Morton's avatar
Andrew Morton committed
279
280
281
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
Linus Torvalds's avatar
Linus Torvalds committed
282
283
284
285
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
286
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
287
288
289
};

/*
290
 * The slab lists for all objects.
Linus Torvalds's avatar
Linus Torvalds committed
291
292
 */
struct kmem_list3 {
293
294
295
296
297
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
	unsigned long free_objects;
	unsigned int free_limit;
298
	unsigned int colour_next;	/* Per-node cache coloring */
299
300
301
	spinlock_t list_lock;
	struct array_cache *shared;	/* shared per node */
	struct array_cache **alien;	/* on other nodes */
302
303
	unsigned long next_reap;	/* updated without locking */
	int free_touched;		/* updated without locking */
Linus Torvalds's avatar
Linus Torvalds committed
304
305
};

306
307
308
309
310
311
312
313
314
315
/*
 * Need this for bootstrapping a per node allocator.
 */
#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define	CACHE_CACHE 0
#define	SIZE_AC 1
#define	SIZE_L3 (1 + MAX_NUMNODES)

/*
Andrew Morton's avatar
Andrew Morton committed
316
317
 * This function must be completely optimized away if a constant is passed to
 * it.  Mostly the same as what is in linux/slab.h except it returns an index.
318
 */
319
static __always_inline int index_of(const size_t size)
320
{
321
322
	extern void __bad_size(void);

323
324
325
326
327
328
329
330
331
332
	if (__builtin_constant_p(size)) {
		int i = 0;

#define CACHE(x) \
	if (size <=x) \
		return i; \
	else \
		i++;
#include "linux/kmalloc_sizes.h"
#undef CACHE
333
		__bad_size();
334
	} else
335
		__bad_size();
336
337
338
339
340
	return 0;
}

#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
Linus Torvalds's avatar
Linus Torvalds committed
341

Pekka Enberg's avatar
Pekka Enberg committed
342
static void kmem_list3_init(struct kmem_list3 *parent)
343
344
345
346
347
348
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
349
	parent->colour_next = 0;
350
351
352
353
354
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
355
356
357
358
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
359
360
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
361
362
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
363
364
365
366
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
367
368

/*
369
 * struct kmem_cache
Linus Torvalds's avatar
Linus Torvalds committed
370
371
372
 *
 * manages a cache.
 */
373

374
struct kmem_cache {
Linus Torvalds's avatar
Linus Torvalds committed
375
/* 1) per-cpu data, touched during every alloc/free */
376
	struct array_cache *array[NR_CPUS];
377
/* 2) Cache tunables. Protected by cache_chain_mutex */
378
379
380
	unsigned int batchcount;
	unsigned int limit;
	unsigned int shared;
381

382
	unsigned int buffer_size;
383
/* 3) touched by every alloc & free from the backend */
384
	struct kmem_list3 *nodelists[MAX_NUMNODES];
385

Andrew Morton's avatar
Andrew Morton committed
386
387
	unsigned int flags;		/* constant flags */
	unsigned int num;		/* # of objs per slab */
Linus Torvalds's avatar
Linus Torvalds committed
388

389
/* 4) cache_grow/shrink */
Linus Torvalds's avatar
Linus Torvalds committed
390
	/* order of pgs per slab (2^n) */
391
	unsigned int gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
392
393

	/* force GFP flags, e.g. GFP_DMA */
394
	gfp_t gfpflags;
Linus Torvalds's avatar
Linus Torvalds committed
395

Andrew Morton's avatar
Andrew Morton committed
396
	size_t colour;			/* cache colouring range */
397
	unsigned int colour_off;	/* colour offset */
398
	struct kmem_cache *slabp_cache;
399
	unsigned int slab_size;
Andrew Morton's avatar
Andrew Morton committed
400
	unsigned int dflags;		/* dynamic flags */
Linus Torvalds's avatar
Linus Torvalds committed
401
402

	/* constructor func */
403
	void (*ctor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
404
405

	/* de-constructor func */
406
	void (*dtor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
407

408
/* 5) cache creation/removal */
409
410
	const char *name;
	struct list_head next;
Linus Torvalds's avatar
Linus Torvalds committed
411

412
/* 6) statistics */
Linus Torvalds's avatar
Linus Torvalds committed
413
#if STATS
414
415
416
417
418
419
420
421
422
	unsigned long num_active;
	unsigned long num_allocations;
	unsigned long high_mark;
	unsigned long grown;
	unsigned long reaped;
	unsigned long errors;
	unsigned long max_freeable;
	unsigned long node_allocs;
	unsigned long node_frees;
423
	unsigned long node_overflow;
424
425
426
427
	atomic_t allochit;
	atomic_t allocmiss;
	atomic_t freehit;
	atomic_t freemiss;
Linus Torvalds's avatar
Linus Torvalds committed
428
429
#endif
#if DEBUG
430
431
432
433
434
435
436
437
	/*
	 * If debugging is enabled, then the allocator can add additional
	 * fields and/or padding to every object. buffer_size contains the total
	 * object size including these internal fields, the following two
	 * variables contain the offset to the user object and its size.
	 */
	int obj_offset;
	int obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
438
439
440
441
442
443
444
#endif
};

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
445
446
447
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
448
 *
Adrian Bunk's avatar
Adrian Bunk committed
449
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
450
451
452
453
454
455
456
457
458
459
460
 * which could lock up otherwise freeable slabs.
 */
#define REAPTIMEOUT_CPUC	(2*HZ)
#define REAPTIMEOUT_LIST3	(4*HZ)

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
#define	STATS_INC_REAPED(x)	((x)->reaped++)
Andrew Morton's avatar
Andrew Morton committed
461
462
463
464
465
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
466
467
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
468
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
469
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
470
471
472
473
474
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
475
476
477
478
479
480
481
482
483
484
485
486
487
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
#define	STATS_INC_REAPED(x)	do { } while (0)
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
488
#define	STATS_INC_NODEFREES(x)	do { } while (0)
489
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
490
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
491
492
493
494
495
496
497
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG
Andrew Morton's avatar
Andrew Morton committed
498
499
/*
 * Magic nums for obj red zoning.
Linus Torvalds's avatar
Linus Torvalds committed
500
501
502
503
504
505
506
507
508
509
 * Placed in the first word before and the first word after an obj.
 */
#define	RED_INACTIVE	0x5A2CF071UL	/* when obj is inactive */
#define	RED_ACTIVE	0x170FC2A5UL	/* when obj is active */

/* ...and for poisoning */
#define	POISON_INUSE	0x5a	/* for use-uninitialised poisoning */
#define POISON_FREE	0x6b	/* for use-after-free poisoning */
#define	POISON_END	0xa5	/* end-byte of poisoning */

Andrew Morton's avatar
Andrew Morton committed
510
511
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
512
 * 0		: objp
513
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
514
515
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
516
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
517
 * 		redzone word.
518
519
 * cachep->obj_offset: The real object.
 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
Andrew Morton's avatar
Andrew Morton committed
520
521
 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
522
 */
523
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
524
{
525
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
526
527
}

528
static int obj_size(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
529
{
530
	return cachep->obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
531
532
}

533
static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
534
535
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
536
	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
537
538
}

539
static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
540
541
542
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
543
		return (unsigned long *)(objp + cachep->buffer_size -
544
					 2 * BYTES_PER_WORD);
545
	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
546
547
}

548
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
549
550
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
551
	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
552
553
554
555
}

#else

556
557
#define obj_offset(x)			0
#define obj_size(cachep)		(cachep->buffer_size)
Linus Torvalds's avatar
Linus Torvalds committed
558
559
560
561
562
563
564
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
Andrew Morton's avatar
Andrew Morton committed
565
566
 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
 * order.
Linus Torvalds's avatar
Linus Torvalds committed
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
 */
#if defined(CONFIG_LARGE_ALLOCS)
#define	MAX_OBJ_ORDER	13	/* up to 32Mb */
#define	MAX_GFP_ORDER	13	/* up to 32Mb */
#elif defined(CONFIG_MMU)
#define	MAX_OBJ_ORDER	5	/* 32 pages */
#define	MAX_GFP_ORDER	5	/* 32 pages */
#else
#define	MAX_OBJ_ORDER	8	/* up to 1Mb */
#define	MAX_GFP_ORDER	8	/* up to 1Mb */
#endif

/*
 * Do not go above this order unless 0 objects fit into the slab.
 */
#define	BREAK_GFP_ORDER_HI	1
#define	BREAK_GFP_ORDER_LO	0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;

Andrew Morton's avatar
Andrew Morton committed
586
587
588
589
/*
 * Functions for storing/retrieving the cachep and or slab from the page
 * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
 * these are used to find the cache which an obj belongs to.
Linus Torvalds's avatar
Linus Torvalds committed
590
 */
591
592
593
594
595
596
597
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
	page->lru.next = (struct list_head *)cache;
}

static inline struct kmem_cache *page_get_cache(struct page *page)
{
598
599
	if (unlikely(PageCompound(page)))
		page = (struct page *)page_private(page);
600
601
602
603
604
605
606
607
608
609
	return (struct kmem_cache *)page->lru.next;
}

static inline void page_set_slab(struct page *page, struct slab *slab)
{
	page->lru.prev = (struct list_head *)slab;
}

static inline struct slab *page_get_slab(struct page *page)
{
610
611
	if (unlikely(PageCompound(page)))
		page = (struct page *)page_private(page);
612
613
	return (struct slab *)page->lru.prev;
}
Linus Torvalds's avatar
Linus Torvalds committed
614

615
616
617
618
619
620
621
622
623
624
625
626
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_cache(page);
}

static inline struct slab *virt_to_slab(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_slab(page);
}

627
628
629
630
631
632
633
634
635
636
637
638
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
				 unsigned int idx)
{
	return slab->s_mem + cache->buffer_size * idx;
}

static inline unsigned int obj_to_index(struct kmem_cache *cache,
					struct slab *slab, void *obj)
{
	return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
}

Andrew Morton's avatar
Andrew Morton committed
639
640
641
/*
 * These are the default caches for kmalloc. Custom caches can have other sizes.
 */
Linus Torvalds's avatar
Linus Torvalds committed
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
	CACHE(ULONG_MAX)
#undef CACHE
};
EXPORT_SYMBOL(malloc_sizes);

/* Must match cache_sizes above. Out of line to keep cache footprint low. */
struct cache_names {
	char *name;
	char *name_dma;
};

static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
659
	{NULL,}
Linus Torvalds's avatar
Linus Torvalds committed
660
661
662
663
#undef CACHE
};

static struct arraycache_init initarray_cache __initdata =
664
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
665
static struct arraycache_init initarray_generic =
666
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
667
668

/* internal cache of cache description objs */
669
static struct kmem_cache cache_cache = {
670
671
672
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
673
	.buffer_size = sizeof(struct kmem_cache),
674
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
675
#if DEBUG
676
	.obj_size = sizeof(struct kmem_cache),
Linus Torvalds's avatar
Linus Torvalds committed
677
678
679
680
#endif
};

/* Guard access to the cache-chain. */
Ingo Molnar's avatar
Ingo Molnar committed
681
static DEFINE_MUTEX(cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
682
683
684
static struct list_head cache_chain;

/*
Andrew Morton's avatar
Andrew Morton committed
685
686
 * vm_enough_memory() looks at this to determine how many slab-allocated pages
 * are possibly freeable under pressure
Linus Torvalds's avatar
Linus Torvalds committed
687
688
689
690
691
692
693
694
695
696
697
 *
 * SLAB_RECLAIM_ACCOUNT turns this on per-slab
 */
atomic_t slab_reclaim_pages;

/*
 * chicken and egg problem: delay the per-cpu array allocation
 * until the general caches are up.
 */
static enum {
	NONE,
698
699
	PARTIAL_AC,
	PARTIAL_L3,
Linus Torvalds's avatar
Linus Torvalds committed
700
701
702
703
704
	FULL
} g_cpucache_up;

static DEFINE_PER_CPU(struct work_struct, reap_work);

Andrew Morton's avatar
Andrew Morton committed
705
706
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
707
static void enable_cpucache(struct kmem_cache *cachep);
708
static void cache_reap(void *unused);
709
static int __node_shrink(struct kmem_cache *cachep, int node);
Linus Torvalds's avatar
Linus Torvalds committed
710

711
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
712
713
714
715
{
	return cachep->array[smp_processor_id()];
}

Andrew Morton's avatar
Andrew Morton committed
716
717
static inline struct kmem_cache *__find_general_cachep(size_t size,
							gfp_t gfpflags)
Linus Torvalds's avatar
Linus Torvalds committed
718
719
720
721
722
{
	struct cache_sizes *csizep = malloc_sizes;

#if DEBUG
	/* This happens if someone tries to call
723
724
725
	 * kmem_cache_create(), or __kmalloc(), before
	 * the generic caches are initialized.
	 */
726
	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
Linus Torvalds's avatar
Linus Torvalds committed
727
728
729
730
731
#endif
	while (size > csizep->cs_size)
		csizep++;

	/*
732
	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds's avatar
Linus Torvalds committed
733
734
735
736
737
738
739
740
	 * has cs_{dma,}cachep==NULL. Thus no special case
	 * for large kmalloc calls required.
	 */
	if (unlikely(gfpflags & GFP_DMA))
		return csizep->cs_dmacachep;
	return csizep->cs_cachep;
}

741
struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
742
743
744
745
746
{
	return __find_general_cachep(size, gfpflags);
}
EXPORT_SYMBOL(kmem_find_general_cachep);

747
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
748
{
749
750
	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
Linus Torvalds's avatar
Linus Torvalds committed
751

Andrew Morton's avatar
Andrew Morton committed
752
753
754
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
755
756
757
758
759
760
761
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
762

763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - The struct slab
	 * - One kmem_bufctl_t for each object
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;
	} else {
		/*
		 * Ignore padding for the initial guess. The padding
		 * is at most @align-1 bytes, and @buffer_size is at
		 * least @align. In the worst case, this result will
		 * be one greater than the number of objects that fit
		 * into the memory allocation when taking the padding
		 * into account.
		 */
		nr_objs = (slab_size - sizeof(struct slab)) /
			  (buffer_size + sizeof(kmem_bufctl_t));

		/*
		 * This calculated number will be either the right
		 * amount, or one greater than what we want.
		 */
		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
		       > slab_size)
			nr_objs--;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;

		mgmt_size = slab_mgmt_size(nr_objs, align);
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
811
812
813
814
}

#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)

Andrew Morton's avatar
Andrew Morton committed
815
816
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
817
818
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
819
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
820
821
822
	dump_stack();
}

823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
static DEFINE_PER_CPU(unsigned long, reap_node);

static void init_reap_node(int cpu)
{
	int node;

	node = next_node(cpu_to_node(cpu), node_online_map);
	if (node == MAX_NUMNODES)
838
		node = first_node(node_online_map);
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863

	__get_cpu_var(reap_node) = node;
}

static void next_reap_node(void)
{
	int node = __get_cpu_var(reap_node);

	/*
	 * Also drain per cpu pages on remote zones
	 */
	if (node != numa_node_id())
		drain_node_pages(node);

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
	__get_cpu_var(reap_node) = node;
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
static void __devinit start_cpu_timer(int cpu)
{
	struct work_struct *reap_work = &per_cpu(reap_work, cpu);

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
	if (keventd_up() && reap_work->func == NULL) {
881
		init_reap_node(cpu);
Linus Torvalds's avatar
Linus Torvalds committed
882
883
884
885
886
		INIT_WORK(reap_work, cache_reap, NULL);
		schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
	}
}

887
static struct array_cache *alloc_arraycache(int node, int entries,
888
					    int batchcount)
Linus Torvalds's avatar
Linus Torvalds committed
889
{
890
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds's avatar
Linus Torvalds committed
891
892
	struct array_cache *nc = NULL;

893
	nc = kmalloc_node(memsize, GFP_KERNEL, node);
Linus Torvalds's avatar
Linus Torvalds committed
894
895
896
897
898
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
899
		spin_lock_init(&nc->lock);
Linus Torvalds's avatar
Linus Torvalds committed
900
901
902
903
	}
	return nc;
}

904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
	int nr = min(min(from->avail, max), to->limit - to->avail);

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	to->touched = 1;
	return nr;
}

928
#ifdef CONFIG_NUMA
929
static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
930
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
931

Pekka Enberg's avatar
Pekka Enberg committed
932
static struct array_cache **alloc_alien_cache(int node, int limit)
933
934
{
	struct array_cache **ac_ptr;
935
	int memsize = sizeof(void *) * MAX_NUMNODES;
936
937
938
939
940
941
942
943
944
945
946
947
948
	int i;

	if (limit > 1)
		limit = 12;
	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
	if (ac_ptr) {
		for_each_node(i) {
			if (i == node || !node_online(i)) {
				ac_ptr[i] = NULL;
				continue;
			}
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
			if (!ac_ptr[i]) {
949
				for (i--; i <= 0; i--)
950
951
952
953
954
955
956
957
958
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

Pekka Enberg's avatar
Pekka Enberg committed
959
static void free_alien_cache(struct array_cache **ac_ptr)
960
961
962
963
964
965
{
	int i;

	if (!ac_ptr)
		return;
	for_each_node(i)
966
	    kfree(ac_ptr[i]);
967
968
969
	kfree(ac_ptr);
}

970
static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg's avatar
Pekka Enberg committed
971
				struct array_cache *ac, int node)
972
973
974
975
976
{
	struct kmem_list3 *rl3 = cachep->nodelists[node];

	if (ac->avail) {
		spin_lock(&rl3->list_lock);
977
978
979
980
981
982
983
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
		transfer_objects(rl3->shared, ac, ac->limit);

984
		free_block(cachep, ac->entry, ac->avail, node);
985
986
987
988
989
		ac->avail = 0;
		spin_unlock(&rl3->list_lock);
	}
}

990
991
992
993
994
995
996
997
998
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
{
	int node = __get_cpu_var(reap_node);

	if (l3->alien) {
		struct array_cache *ac = l3->alien[node];
999
1000

		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {