slab.c 98.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
 * slabs and you must pass objects with the same intializations to
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
 * The c_cpuarray may not be read with enabled local interrupts - 
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
Ingo Molnar's avatar
Ingo Molnar committed
71
 *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
 */

#include	<linux/config.h>
#include	<linux/slab.h>
#include	<linux/mm.h>
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
104
#include	<linux/string.h>
105
#include	<linux/nodemask.h>
106
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
107
#include	<linux/mutex.h>
Linus Torvalds's avatar
Linus Torvalds committed
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202

#include	<asm/uaccess.h>
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

/*
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
 *		  SLAB_RED_ZONE & SLAB_POISON.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)

#ifndef cache_line_size
#define cache_line_size()	L1_CACHE_BYTES
#endif

#ifndef ARCH_KMALLOC_MINALIGN
/*
 * Enforce a minimum alignment for the kmalloc caches.
 * Usually, the kmalloc caches are cache_line_size() aligned, except when
 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
 * Note that this flag disables some debug features.
 */
#define ARCH_KMALLOC_MINALIGN 0
#endif

#ifndef ARCH_SLAB_MINALIGN
/*
 * Enforce a minimum alignment for all caches.
 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
 * some debug features.
 */
#define ARCH_SLAB_MINALIGN 0
#endif

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
			 SLAB_NO_REAP | SLAB_CACHE_DMA | \
			 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU)
#else
# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU)
#endif

/*
 * kmem_bufctl_t:
 *
 * Bufctl's are used for linking objs within a slab
 * linked offsets.
 *
 * This implementation relies on "struct page" for locating the cache &
 * slab an object belongs to.
 * This allows the bufctl structure to be small (one int), but limits
 * the number of objects a slab (not a cache) can contain when off-slab
 * bufctls are used. The limit is the size of the largest general cache
 * that does not use off-slab slabs.
 * For 32bit archs with 4 kB pages, is this 56.
 * This is not serious, as it is only for large objects, when it is unwise
 * to have too many per slab.
 * Note: This limit can be raised by introducing a general cache whose size
 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
 */

203
typedef unsigned int kmem_bufctl_t;
Linus Torvalds's avatar
Linus Torvalds committed
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-2)

/* Max number of objs-per-slab for caches which use off-slab slabs.
 * Needed to avoid a possible looping condition in cache_grow().
 */
static unsigned long offslab_limit;

/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
221
222
223
224
225
226
	struct list_head list;
	unsigned long colouroff;
	void *s_mem;		/* including colour offset */
	unsigned int inuse;	/* num of objs active in slab */
	kmem_bufctl_t free;
	unsigned short nodeid;
Linus Torvalds's avatar
Linus Torvalds committed
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
};

/*
 * struct slab_rcu
 *
 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
 * arrange for kmem_freepages to be called via RCU.  This is useful if
 * we need to approach a kernel structure obliquely, from its address
 * obtained without the usual locking.  We can lock the structure to
 * stabilize it and check it's still at the given address, only if we
 * can be sure that the memory has not been meanwhile reused for some
 * other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
 *
 * We assume struct slab_rcu can overlay struct slab when destroying.
 */
struct slab_rcu {
246
	struct rcu_head head;
247
	struct kmem_cache *cachep;
248
	void *addr;
Linus Torvalds's avatar
Linus Torvalds committed
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
};

/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
268
269
270
271
272
273
274
	spinlock_t lock;
	void *entry[0];		/*
				 * Must have this definition in here for the proper
				 * alignment of array_cache. Also simplifies accessing
				 * the entries.
				 * [0] is for gcc 2.95. It should really be [].
				 */
Linus Torvalds's avatar
Linus Torvalds committed
275
276
277
278
279
280
281
282
};

/* bootstrap: The caches do not work without cpuarrays anymore,
 * but the cpuarrays are allocated from the generic caches...
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
283
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
284
285
286
};

/*
287
 * The slab lists for all objects.
Linus Torvalds's avatar
Linus Torvalds committed
288
289
 */
struct kmem_list3 {
290
291
292
293
294
295
296
297
298
299
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
	unsigned long free_objects;
	unsigned long next_reap;
	int free_touched;
	unsigned int free_limit;
	spinlock_t list_lock;
	struct array_cache *shared;	/* shared per node */
	struct array_cache **alien;	/* on other nodes */
Linus Torvalds's avatar
Linus Torvalds committed
300
301
};

302
303
304
305
306
307
308
309
310
311
/*
 * Need this for bootstrapping a per node allocator.
 */
#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define	CACHE_CACHE 0
#define	SIZE_AC 1
#define	SIZE_L3 (1 + MAX_NUMNODES)

/*
312
 * This function must be completely optimized away if
313
314
315
316
 * a constant is passed to it. Mostly the same as
 * what is in linux/slab.h except it returns an
 * index.
 */
317
static __always_inline int index_of(const size_t size)
318
{
319
320
	extern void __bad_size(void);

321
322
323
324
325
326
327
328
329
330
	if (__builtin_constant_p(size)) {
		int i = 0;

#define CACHE(x) \
	if (size <=x) \
		return i; \
	else \
		i++;
#include "linux/kmalloc_sizes.h"
#undef CACHE
331
		__bad_size();
332
	} else
333
		__bad_size();
334
335
336
337
338
	return 0;
}

#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
Linus Torvalds's avatar
Linus Torvalds committed
339

Pekka Enberg's avatar
Pekka Enberg committed
340
static void kmem_list3_init(struct kmem_list3 *parent)
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

#define MAKE_LIST(cachep, listp, slab, nodeid)	\
	do {	\
		INIT_LIST_HEAD(listp);		\
		list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
	} while (0)

#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)			\
	do {					\
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
364
365

/*
366
 * struct kmem_cache
Linus Torvalds's avatar
Linus Torvalds committed
367
368
369
 *
 * manages a cache.
 */
370

371
struct kmem_cache {
Linus Torvalds's avatar
Linus Torvalds committed
372
/* 1) per-cpu data, touched during every alloc/free */
373
374
375
376
	struct array_cache *array[NR_CPUS];
	unsigned int batchcount;
	unsigned int limit;
	unsigned int shared;
377
	unsigned int buffer_size;
378
/* 2) touched by every alloc & free from the backend */
379
380
381
382
	struct kmem_list3 *nodelists[MAX_NUMNODES];
	unsigned int flags;	/* constant flags */
	unsigned int num;	/* # of objs per slab */
	spinlock_t spinlock;
Linus Torvalds's avatar
Linus Torvalds committed
383
384
385

/* 3) cache_grow/shrink */
	/* order of pgs per slab (2^n) */
386
	unsigned int gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
387
388

	/* force GFP flags, e.g. GFP_DMA */
389
	gfp_t gfpflags;
Linus Torvalds's avatar
Linus Torvalds committed
390

391
392
393
	size_t colour;		/* cache colouring range */
	unsigned int colour_off;	/* colour offset */
	unsigned int colour_next;	/* cache colouring */
394
	struct kmem_cache *slabp_cache;
395
396
	unsigned int slab_size;
	unsigned int dflags;	/* dynamic flags */
Linus Torvalds's avatar
Linus Torvalds committed
397
398

	/* constructor func */
399
	void (*ctor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
400
401

	/* de-constructor func */
402
	void (*dtor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
403
404

/* 4) cache creation/removal */
405
406
	const char *name;
	struct list_head next;
Linus Torvalds's avatar
Linus Torvalds committed
407
408
409

/* 5) statistics */
#if STATS
410
411
412
413
414
415
416
417
418
419
420
421
422
	unsigned long num_active;
	unsigned long num_allocations;
	unsigned long high_mark;
	unsigned long grown;
	unsigned long reaped;
	unsigned long errors;
	unsigned long max_freeable;
	unsigned long node_allocs;
	unsigned long node_frees;
	atomic_t allochit;
	atomic_t allocmiss;
	atomic_t freehit;
	atomic_t freemiss;
Linus Torvalds's avatar
Linus Torvalds committed
423
424
#endif
#if DEBUG
425
426
427
428
429
430
431
432
	/*
	 * If debugging is enabled, then the allocator can add additional
	 * fields and/or padding to every object. buffer_size contains the total
	 * object size including these internal fields, the following two
	 * variables contain the offset to the user object and its size.
	 */
	int obj_offset;
	int obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
433
434
435
436
437
438
439
440
441
442
#endif
};

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
/* Optimization question: fewer reaps means less 
 * probability for unnessary cpucache drain/refill cycles.
 *
Adrian Bunk's avatar
Adrian Bunk committed
443
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
 * which could lock up otherwise freeable slabs.
 */
#define REAPTIMEOUT_CPUC	(2*HZ)
#define REAPTIMEOUT_LIST3	(4*HZ)

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
#define	STATS_INC_REAPED(x)	((x)->reaped++)
#define	STATS_SET_HIGH(x)	do { if ((x)->num_active > (x)->high_mark) \
					(x)->high_mark = (x)->num_active; \
				} while (0)
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
460
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
Linus Torvalds's avatar
Linus Torvalds committed
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
#define	STATS_SET_FREEABLE(x, i) \
				do { if ((x)->max_freeable < i) \
					(x)->max_freeable = i; \
				} while (0)

#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
#define	STATS_INC_REAPED(x)	do { } while (0)
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
479
#define	STATS_INC_NODEFREES(x)	do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
#define	STATS_SET_FREEABLE(x, i) \
				do { } while (0)

#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG
/* Magic nums for obj red zoning.
 * Placed in the first word before and the first word after an obj.
 */
#define	RED_INACTIVE	0x5A2CF071UL	/* when obj is inactive */
#define	RED_ACTIVE	0x170FC2A5UL	/* when obj is active */

/* ...and for poisoning */
#define	POISON_INUSE	0x5a	/* for use-uninitialised poisoning */
#define POISON_FREE	0x6b	/* for use-after-free poisoning */
#define	POISON_END	0xa5	/* end-byte of poisoning */

/* memory layout of objects:
 * 0		: objp
503
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
504
505
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
506
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
507
 * 		redzone word.
508
509
510
 * cachep->obj_offset: The real object.
 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
511
 */
512
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
513
{
514
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
515
516
}

517
static int obj_size(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
518
{
519
	return cachep->obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
520
521
}

522
static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
523
524
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
525
	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
526
527
}

528
static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
529
530
531
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
532
		return (unsigned long *)(objp + cachep->buffer_size -
533
					 2 * BYTES_PER_WORD);
534
	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
535
536
}

537
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
538
539
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
540
	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
541
542
543
544
}

#else

545
546
#define obj_offset(x)			0
#define obj_size(cachep)		(cachep->buffer_size)
Linus Torvalds's avatar
Linus Torvalds committed
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
 * Maximum size of an obj (in 2^order pages)
 * and absolute limit for the gfp order.
 */
#if defined(CONFIG_LARGE_ALLOCS)
#define	MAX_OBJ_ORDER	13	/* up to 32Mb */
#define	MAX_GFP_ORDER	13	/* up to 32Mb */
#elif defined(CONFIG_MMU)
#define	MAX_OBJ_ORDER	5	/* 32 pages */
#define	MAX_GFP_ORDER	5	/* 32 pages */
#else
#define	MAX_OBJ_ORDER	8	/* up to 1Mb */
#define	MAX_GFP_ORDER	8	/* up to 1Mb */
#endif

/*
 * Do not go above this order unless 0 objects fit into the slab.
 */
#define	BREAK_GFP_ORDER_HI	1
#define	BREAK_GFP_ORDER_LO	0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;

575
/* Functions for storing/retrieving the cachep and or slab from the
Linus Torvalds's avatar
Linus Torvalds committed
576
577
578
 * global 'mem_map'. These are used to find the slab an obj belongs to.
 * With kfree(), these are used to find the cache which an obj belongs to.
 */
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
	page->lru.next = (struct list_head *)cache;
}

static inline struct kmem_cache *page_get_cache(struct page *page)
{
	return (struct kmem_cache *)page->lru.next;
}

static inline void page_set_slab(struct page *page, struct slab *slab)
{
	page->lru.prev = (struct list_head *)slab;
}

static inline struct slab *page_get_slab(struct page *page)
{
	return (struct slab *)page->lru.prev;
}
Linus Torvalds's avatar
Linus Torvalds committed
598

599
600
601
602
603
604
605
606
607
608
609
610
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_cache(page);
}

static inline struct slab *virt_to_slab(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_slab(page);
}

Linus Torvalds's avatar
Linus Torvalds committed
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
/* These are the default caches for kmalloc. Custom caches can have other sizes. */
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
	CACHE(ULONG_MAX)
#undef CACHE
};
EXPORT_SYMBOL(malloc_sizes);

/* Must match cache_sizes above. Out of line to keep cache footprint low. */
struct cache_names {
	char *name;
	char *name_dma;
};

static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
629
	{NULL,}
Linus Torvalds's avatar
Linus Torvalds committed
630
631
632
633
#undef CACHE
};

static struct arraycache_init initarray_cache __initdata =
634
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
635
static struct arraycache_init initarray_generic =
636
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
637
638

/* internal cache of cache description objs */
639
static struct kmem_cache cache_cache = {
640
641
642
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
643
	.buffer_size = sizeof(struct kmem_cache),
644
645
646
	.flags = SLAB_NO_REAP,
	.spinlock = SPIN_LOCK_UNLOCKED,
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
647
#if DEBUG
648
	.obj_size = sizeof(struct kmem_cache),
Linus Torvalds's avatar
Linus Torvalds committed
649
650
651
652
#endif
};

/* Guard access to the cache-chain. */
Ingo Molnar's avatar
Ingo Molnar committed
653
static DEFINE_MUTEX(cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
static struct list_head cache_chain;

/*
 * vm_enough_memory() looks at this to determine how many
 * slab-allocated pages are possibly freeable under pressure
 *
 * SLAB_RECLAIM_ACCOUNT turns this on per-slab
 */
atomic_t slab_reclaim_pages;

/*
 * chicken and egg problem: delay the per-cpu array allocation
 * until the general caches are up.
 */
static enum {
	NONE,
670
671
	PARTIAL_AC,
	PARTIAL_L3,
Linus Torvalds's avatar
Linus Torvalds committed
672
673
674
675
676
	FULL
} g_cpucache_up;

static DEFINE_PER_CPU(struct work_struct, reap_work);

677
678
static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
static void enable_cpucache(struct kmem_cache *cachep);
679
static void cache_reap(void *unused);
680
static int __node_shrink(struct kmem_cache *cachep, int node);
Linus Torvalds's avatar
Linus Torvalds committed
681

682
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
683
684
685
686
{
	return cachep->array[smp_processor_id()];
}

687
static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
Linus Torvalds's avatar
Linus Torvalds committed
688
689
690
691
692
{
	struct cache_sizes *csizep = malloc_sizes;

#if DEBUG
	/* This happens if someone tries to call
693
694
695
	 * kmem_cache_create(), or __kmalloc(), before
	 * the generic caches are initialized.
	 */
696
	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
Linus Torvalds's avatar
Linus Torvalds committed
697
698
699
700
701
#endif
	while (size > csizep->cs_size)
		csizep++;

	/*
702
	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds's avatar
Linus Torvalds committed
703
704
705
706
707
708
709
710
	 * has cs_{dma,}cachep==NULL. Thus no special case
	 * for large kmalloc calls required.
	 */
	if (unlikely(gfpflags & GFP_DMA))
		return csizep->cs_dmacachep;
	return csizep->cs_cachep;
}

711
struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
712
713
714
715
716
{
	return __find_general_cachep(size, gfpflags);
}
EXPORT_SYMBOL(kmem_find_general_cachep);

717
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
718
{
719
720
	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
Linus Torvalds's avatar
Linus Torvalds committed
721

722
723
724
725
726
727
728
729
730
/* Calculate the number of objects and left-over bytes for a given
   buffer size. */
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
731

732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - The struct slab
	 * - One kmem_bufctl_t for each object
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;
	} else {
		/*
		 * Ignore padding for the initial guess. The padding
		 * is at most @align-1 bytes, and @buffer_size is at
		 * least @align. In the worst case, this result will
		 * be one greater than the number of objects that fit
		 * into the memory allocation when taking the padding
		 * into account.
		 */
		nr_objs = (slab_size - sizeof(struct slab)) /
			  (buffer_size + sizeof(kmem_bufctl_t));

		/*
		 * This calculated number will be either the right
		 * amount, or one greater than what we want.
		 */
		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
		       > slab_size)
			nr_objs--;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;

		mgmt_size = slab_mgmt_size(nr_objs, align);
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
780
781
782
783
}

#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)

784
static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
785
786
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
787
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
	dump_stack();
}

/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
static void __devinit start_cpu_timer(int cpu)
{
	struct work_struct *reap_work = &per_cpu(reap_work, cpu);

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
	if (keventd_up() && reap_work->func == NULL) {
		INIT_WORK(reap_work, cache_reap, NULL);
		schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
	}
}

813
static struct array_cache *alloc_arraycache(int node, int entries,
814
					    int batchcount)
Linus Torvalds's avatar
Linus Torvalds committed
815
{
816
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds's avatar
Linus Torvalds committed
817
818
	struct array_cache *nc = NULL;

819
	nc = kmalloc_node(memsize, GFP_KERNEL, node);
Linus Torvalds's avatar
Linus Torvalds committed
820
821
822
823
824
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
825
		spin_lock_init(&nc->lock);
Linus Torvalds's avatar
Linus Torvalds committed
826
827
828
829
	}
	return nc;
}

830
#ifdef CONFIG_NUMA
831
static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
832

Pekka Enberg's avatar
Pekka Enberg committed
833
static struct array_cache **alloc_alien_cache(int node, int limit)
834
835
{
	struct array_cache **ac_ptr;
836
	int memsize = sizeof(void *) * MAX_NUMNODES;
837
838
839
840
841
842
843
844
845
846
847
848
849
	int i;

	if (limit > 1)
		limit = 12;
	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
	if (ac_ptr) {
		for_each_node(i) {
			if (i == node || !node_online(i)) {
				ac_ptr[i] = NULL;
				continue;
			}
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
			if (!ac_ptr[i]) {
850
				for (i--; i <= 0; i--)
851
852
853
854
855
856
857
858
859
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

Pekka Enberg's avatar
Pekka Enberg committed
860
static void free_alien_cache(struct array_cache **ac_ptr)
861
862
863
864
865
866
867
{
	int i;

	if (!ac_ptr)
		return;

	for_each_node(i)
868
	    kfree(ac_ptr[i]);
869
870
871
872

	kfree(ac_ptr);
}

873
static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg's avatar
Pekka Enberg committed
874
				struct array_cache *ac, int node)
875
876
877
878
879
{
	struct kmem_list3 *rl3 = cachep->nodelists[node];

	if (ac->avail) {
		spin_lock(&rl3->list_lock);
880
		free_block(cachep, ac->entry, ac->avail, node);
881
882
883
884
885
		ac->avail = 0;
		spin_unlock(&rl3->list_lock);
	}
}

886
static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
887
{
888
	int i = 0;
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
		ac = l3->alien[i];
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
#else
#define alloc_alien_cache(node, limit) do { } while (0)
#define free_alien_cache(ac_ptr) do { } while (0)
#define drain_alien_cache(cachep, l3) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
907
static int __devinit cpuup_callback(struct notifier_block *nfb,
908
				    unsigned long action, void *hcpu)
Linus Torvalds's avatar
Linus Torvalds committed
909
910
{
	long cpu = (long)hcpu;
911
	struct kmem_cache *cachep;
912
913
914
	struct kmem_list3 *l3 = NULL;
	int node = cpu_to_node(cpu);
	int memsize = sizeof(struct kmem_list3);
Linus Torvalds's avatar
Linus Torvalds committed
915
916
917

	switch (action) {
	case CPU_UP_PREPARE:
Ingo Molnar's avatar
Ingo Molnar committed
918
		mutex_lock(&cache_chain_mutex);
919
920
921
922
923
924
		/* we need to do this right in the beginning since
		 * alloc_arraycache's are going to use this list.
		 * kmalloc_node allows us to add the slab to the right
		 * kmem_list3 and not this cpu's kmem_list3
		 */

Linus Torvalds's avatar
Linus Torvalds committed
925
		list_for_each_entry(cachep, &cache_chain, next) {
926
927
928
929
930
931
			/* setup the size64 kmemlist for cpu before we can
			 * begin anything. Make sure some other cpu on this
			 * node has not already allocated this
			 */
			if (!cachep->nodelists[node]) {
				if (!(l3 = kmalloc_node(memsize,
932
							GFP_KERNEL, node)))
933
934
935
					goto bad;
				kmem_list3_init(l3);
				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
936
				    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
937
938
939

				cachep->nodelists[node] = l3;
			}
Linus Torvalds's avatar
Linus Torvalds committed
940

941
942
			spin_lock_irq(&cachep->nodelists[node]->list_lock);
			cachep->nodelists[node]->free_limit =
943
944
			    (1 + nr_cpus_node(node)) *
			    cachep->batchcount + cachep->num;
945
946
947
948
			spin_unlock_irq(&cachep->nodelists[node]->list_lock);
		}

		/* Now we can go ahead with allocating the shared array's
949
		   & array cache's */
950
		list_for_each_entry(cachep, &cache_chain, next) {
951
952
			struct array_cache *nc;

953
			nc = alloc_arraycache(node, cachep->limit,
954
					      cachep->batchcount);
Linus Torvalds's avatar
Linus Torvalds committed
955
956
957
958
			if (!nc)
				goto bad;
			cachep->array[cpu] = nc;

959
960
961
962
			l3 = cachep->nodelists[node];
			BUG_ON(!l3);
			if (!l3->shared) {
				if (!(nc = alloc_arraycache(node,
963
964
965
966
							    cachep->shared *
							    cachep->batchcount,
							    0xbaadf00d)))
					goto bad;
967
968

				/* we are serialised from CPU_DEAD or
969
				   CPU_UP_CANCELLED by the cpucontrol lock */
970
971
				l3->shared = nc;
			}
Linus Torvalds's avatar
Linus Torvalds committed
972
		}
Ingo Molnar's avatar
Ingo Molnar committed
973
		mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
974
975
976
977
978
979
980
981
		break;
	case CPU_ONLINE:
		start_cpu_timer(cpu);
		break;
#ifdef CONFIG_HOTPLUG_CPU
	case CPU_DEAD:
		/* fall thru */
	case CPU_UP_CANCELED:
Ingo Molnar's avatar
Ingo Molnar committed
982
		mutex_lock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
983
984
985

		list_for_each_entry(cachep, &cache_chain, next) {
			struct array_cache *nc;
986
			cpumask_t mask;
Linus Torvalds's avatar
Linus Torvalds committed
987

988
			mask = node_to_cpumask(node);
Linus Torvalds's avatar
Linus Torvalds committed
989
990
991
992
			spin_lock_irq(&cachep->spinlock);
			/* cpu is dead; no one can alloc from it. */
			nc = cachep->array[cpu];
			cachep->array[cpu] = NULL;
993
994
995
996
997
998
999
1000
1001
1002
			l3 = cachep->nodelists[node];

			if (!l3)
				goto unlock_cache;

			spin_lock(&l3->list_lock);

			/* Free limit for this kmem_list3 */
			l3->free_limit -= cachep->batchcount;
			if (nc)
1003
				free_block(cachep, nc->entry, nc->avail, node);
1004
1005

			if (!cpus_empty(mask)) {
1006
1007
1008
				spin_unlock(&l3->list_lock);
				goto unlock_cache;
			}
1009
1010
1011

			if (l3->shared) {
				free_block(cachep, l3->shared->entry,
1012
					   l3->shared->avail, node);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
				kfree(l3->shared);
				l3->shared = NULL;
			}
			if (l3->alien) {
				drain_alien_cache(cachep, l3);
				free_alien_cache(l3->alien);
				l3->alien = NULL;
			}

			/* free slabs belonging to this node */
			if (__node_shrink(cachep, node)) {
				cachep->nodelists[node] = NULL;
				spin_unlock(&l3->list_lock);
				kfree(l3);
			} else {
				spin_unlock(&l3->list_lock);
			}
1030
		      unlock_cache:
Linus Torvalds's avatar
Linus Torvalds committed
1031
1032
1033
			spin_unlock_irq(&cachep->spinlock);
			kfree(nc);
		}
Ingo Molnar's avatar
Ingo Molnar committed
1034
		mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1035
1036
1037
1038
		break;
#endif
	}
	return NOTIFY_OK;
1039
      bad:
Ingo Molnar's avatar
Ingo Molnar committed
1040
	mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1041
1042
1043
1044
1045
	return NOTIFY_BAD;
}

static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };

1046
1047
1048
/*
 * swap the static kmem_list3 with kmalloced memory
 */
1049
static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
{
	struct kmem_list3 *ptr;

	BUG_ON(cachep->nodelists[nodeid] != list);
	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
	BUG_ON(!ptr);

	local_irq_disable();
	memcpy(ptr, list, sizeof(struct kmem_list3));
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
	cachep->nodelists[nodeid] = ptr;
	local_irq_enable();
}

Linus Torvalds's avatar
Linus Torvalds committed
1064
1065
1066
1067
1068
1069
1070
1071
/* Initialisation.
 * Called after the gfp() functions have been enabled, and before smp_init().
 */
void __init kmem_cache_init(void)
{
	size_t left_over;
	struct cache_sizes *sizes;
	struct cache_names *names;
1072
1073
1074
1075
1076
1077
1078
	int i;

	for (i = 0; i < NUM_INIT_LISTS; i++) {
		kmem_list3_init(&initkmem_list3[i]);
		if (i < MAX_NUMNODES)
			cache_cache.nodelists[i] = NULL;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088

	/*
	 * Fragmentation resistance on low memory - only use bigger
	 * page orders on machines with more than 32MB of memory.
	 */
	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
		slab_break_gfp_order = BREAK_GFP_ORDER_HI;

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1089
	 * 1) initialize the cache_cache cache: it contains the struct kmem_cache
Linus Torvalds's avatar
Linus Torvalds committed
1090
1091
	 *    structures of all caches, except cache_cache itself: cache_cache
	 *    is statically allocated.
1092
1093
1094
	 *    Initially an __init data area is used for the head array and the
	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
	 *    array at the end of the bootstrap.
Linus Torvalds's avatar
Linus Torvalds committed
1095
	 * 2) Create the first kmalloc cache.
1096
	 *    The struct kmem_cache for the new cache is allocated normally.
1097
1098
1099
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
Linus Torvalds's avatar
Linus Torvalds committed
1100
1101
	 * 4) Replace the __init data head arrays for cache_cache and the first
	 *    kmalloc cache with kmalloc allocated arrays.
1102
1103
1104
	 * 5) Replace the __init data for kmem_list3 for cache_cache and
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
Linus Torvalds's avatar
Linus Torvalds committed
1105
1106
1107
1108
1109
1110
1111
	 */

	/* 1) create the cache_cache */
	INIT_LIST_HEAD(&cache_chain);
	list_add(&cache_cache.next, &cache_chain);
	cache_cache.colour_off = cache_line_size();
	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1112
	cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
Linus Torvalds's avatar
Linus Torvalds committed
1113

1114
	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
Linus Torvalds's avatar
Linus Torvalds committed
1115

1116
	cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
1117
		       &left_over, &cache_cache.num);
Linus Torvalds's avatar
Linus Torvalds committed
1118
1119
1120
	if (!cache_cache.num)
		BUG();

1121
	cache_cache.colour = left_over / cache_cache.colour_off;
Linus Torvalds's avatar
Linus Torvalds committed
1122
	cache_cache.colour_next = 0;
1123
1124
	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
				      sizeof(struct slab), cache_line_size());
Linus Torvalds's avatar
Linus Torvalds committed
1125
1126
1127
1128
1129

	/* 2+3) create the kmalloc caches */
	sizes = malloc_sizes;
	names = cache_names;

1130
1131
1132
1133
1134
1135
	/* Initialize the caches that provide memory for the array cache
	 * and the kmem_list3 structures first.
	 * Without this, further allocations will bug
	 */

	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1136
1137
1138
1139
						      sizes[INDEX_AC].cs_size,
						      ARCH_KMALLOC_MINALIGN,
						      (ARCH_KMALLOC_FLAGS |
						       SLAB_PANIC), NULL, NULL);
1140
1141
1142

	if (INDEX_AC != INDEX_L3)
		sizes[INDEX_L3].cs_cachep =
1143
1144
1145
1146
1147
		    kmem_cache_create(names[INDEX_L3].name,
				      sizes[INDEX_L3].cs_size,
				      ARCH_KMALLOC_MINALIGN,
				      (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
				      NULL);
1148

Linus Torvalds's avatar
Linus Torvalds committed
1149
	while (sizes->cs_size != ULONG_MAX) {
1150
1151
		/*
		 * For performance, all the general caches are L1 aligned.
Linus Torvalds's avatar
Linus Torvalds committed
1152
1153
1154
		 * This should be particularly beneficial on SMP boxes, as it
		 * eliminates "false sharing".
		 * Note for systems short on memory removing the alignment will
1155
1156
		 * allow tighter packing of the smaller caches.
		 */
1157
		if (!sizes->cs_cachep)
1158
			sizes->cs_cachep = kmem_cache_create(names->name,
1159
1160
1161
1162
1163
							     sizes->cs_size,
							     ARCH_KMALLOC_MINALIGN,
							     (ARCH_KMALLOC_FLAGS
							      | SLAB_PANIC),
							     NULL, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1164
1165
1166

		/* Inc off-slab bufctl limit until the ceiling is hit. */
		if (!(OFF_SLAB(sizes->cs_cachep))) {
1167
			offslab_limit = sizes->cs_size - sizeof(struct slab);
Linus Torvalds's avatar
Linus Torvalds committed
1168
1169
1170
1171
			offslab_limit /= sizeof(kmem_bufctl_t);
		}

		sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1172
1173
1174
1175
1176
1177
							sizes->cs_size,
							ARCH_KMALLOC_MINALIGN,
							(ARCH_KMALLOC_FLAGS |
							 SLAB_CACHE_DMA |
							 SLAB_PANIC), NULL,
							NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1178
1179
1180
1181
1182
1183

		sizes++;
		names++;
	}
	/* 4) Replace the bootstrap head arrays */
	{
1184
		void *ptr;
1185

Linus Torvalds's avatar
Linus Torvalds committed
1186
		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1187

Linus Torvalds's avatar
Linus Torvalds committed
1188
		local_irq_disable();
1189
1190
		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
		memcpy(ptr, cpu_cache_get(&cache_cache),
1191
		       sizeof(struct arraycache_init));
Linus Torvalds's avatar
Linus Torvalds committed
1192
1193
		cache_cache.array[smp_processor_id()] = ptr;
		local_irq_enable();
1194

Linus Torvalds's avatar
Linus Torvalds committed
1195
		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1196

Linus Torvalds's avatar
Linus Torvalds committed
1197
		local_irq_disable();
1198
		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1199
		       != &initarray_generic.cache);
1200
		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1201
		       sizeof(struct arraycache_init));
1202
		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1203
		    ptr;
Linus Torvalds's avatar
Linus Torvalds committed
1204
1205
		local_irq_enable();
	}
1206
1207
1208
1209
1210
	/* 5) Replace the bootstrap kmem_list3's */
	{
		int node;
		/* Replace the static kmem_list3 structures for the boot cpu */
		init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
1211
			  numa_node_id());
1212
1213
1214

		for_each_online_node(node) {
			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1215
				  &initkmem_list3[SIZE_AC + node], node);
1216
1217
1218

			if (INDEX_AC != INDEX_L3) {
				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1219
1220
					  &initkmem_list3[SIZE_L3 + node],
					  node);
1221
1222
1223
			}
		}
	}
Linus Torvalds's avatar
Linus Torvalds committed
1224

1225
	/* 6) resize the head arrays to their final sizes */
Linus Torvalds's avatar
Linus Torvalds committed
1226
	{
1227
		struct kmem_cache *cachep;
Ingo Molnar's avatar
Ingo Molnar committed
1228
		mutex_lock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1229
		list_for_each_entry(cachep, &cache_chain, next)
1230
		    enable_cpucache(cachep);
Ingo Molnar's avatar
Ingo Molnar committed
1231
		mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1232
1233
1234
1235
1236
1237
	}

	/* Done! */
	g_cpucache_up = FULL;

	/* Register a cpu startup notifier callback
1238
	 * that initializes cpu_cache_get for all new cpus
Linus Torvalds's avatar
Linus Torvalds committed
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
	 */
	register_cpu_notifier(&cpucache_notifier);

	/* The reap timers are started later, with a module init call:
	 * That part of the kernel is not yet operational.
	 */
}

static int __init cpucache_init(void)
{
	int cpu;

	/* 
	 * Register the timers that return unneeded
	 * pages to gfp.
	 */
1255
	for_each_online_cpu(cpu)
1256
	    start_cpu_timer(cpu);
Linus Torvalds's avatar
Linus Torvalds committed
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269

	return 0;
}

__initcall(cpucache_init);

/*
 * Interface to system's page allocator. No need to hold the cache-lock.
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1270
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
Linus Torvalds's avatar
Linus Torvalds committed
1271
1272
1273
1274
1275
1276
{
	struct page *page;
	void *addr;
	int i;

	flags |= cachep->gfpflags;
1277
	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
Linus Torvalds's avatar
Linus Torvalds committed
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
	if (!page)
		return NULL;
	addr = page_address(page);

	i = (1 << cachep->gfporder);
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		atomic_add(i, &slab_reclaim_pages);
	add_page_state(nr_slab, i);
	while (i--) {
		SetPageSlab(page);
		page++;
	}
	return addr;
}

/*
 * Interface to system's page release.
 */
1296
static void kmem_freepages(struct kmem_cache *cachep, void *addr)
Linus Torvalds's avatar
Linus Torvalds committed
1297
{
1298
	unsigned long i = (1 << cachep->gfporder);
Linus Torvalds's avatar
Linus Torvalds committed
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
	struct page *page = virt_to_page(addr);
	const unsigned long nr_freed = i;

	while (i--) {
		if (!TestClearPageSlab(page))
			BUG();
		page++;
	}
	sub_page_state(nr_slab, nr_freed);
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
	free_pages((unsigned long)addr, cachep->gfporder);
1311
1312
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
Linus Torvalds's avatar
Linus Torvalds committed
1313
1314
1315
1316
}

static void kmem_rcu_free(struct rcu_head *head)
{
1317
	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1318
	struct kmem_cache *cachep = slab_rcu->cachep;
Linus Torvalds's avatar
Linus Torvalds committed
1319
1320
1321
1322
1323
1324
1325
1326
1327

	kmem_freepages(cachep, slab_rcu->addr);
	if (OFF_SLAB(cachep))
		kmem_cache_free(cachep->slabp_cache, slab_rcu);
}

#if DEBUG

#ifdef CONFIG_DEBUG_PAGEALLOC
1328
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1329
			    unsigned long caller)
Linus Torvalds's avatar
Linus Torvalds committed
1330
{
1331
	int size = obj_size(cachep);
Linus Torvalds's avatar
Linus Torvalds committed
1332

1333
	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
Linus Torvalds's avatar
Linus Torvalds committed
1334

1335
	if (size < 5 * sizeof(unsigned long))
Linus Torvalds's avatar
Linus Torvalds committed
1336
1337
		return;

1338
1339
1340
1341
	*addr++ = 0x12345678;
	*addr++ = caller;
	*addr++ = smp_processor_id();
	size -= 3 * sizeof(unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
1342
1343
1344
1345
1346
1347
1348
	{
		unsigned long *sptr = &caller;
		unsigned long svalue;

		while (!kstack_end(sptr)) {
			svalue = *sptr++;
			if (kernel_text_address(svalue)) {
1349
				*addr++ = svalue;
Linus Torvalds's avatar
Linus Torvalds committed
1350
1351
1352
1353
1354
1355
1356
				size -= sizeof(unsigned long);
				if (size <= sizeof(unsigned long))
					break;
			}
		}

	}
1357
	*addr++ = 0x87654321;
Linus Torvalds's avatar
Linus Torvalds committed
1358
1359
1360
}
#endif

1361
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
Linus Torvalds's avatar
Linus Torvalds committed
1362
{
1363
1364
	int size = obj_size(cachep);
	addr = &((char *)addr)[obj_offset(cachep)];
Linus Torvalds's avatar
Linus Torvalds committed
1365
1366

	memset(addr, val, size);
1367
	*(unsigned char *)(addr + size - 1) = POISON_END;
Linus Torvalds's avatar
Linus Torvalds committed
1368
1369
1370
1371
1372
1373
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
	printk(KERN_ERR "%03x:", offset);
1374
1375
	for (i = 0; i < limit; i++) {
		printk(" %02x", (unsigned char)data[offset + i]);
Linus Torvalds's avatar
Linus Torvalds committed
1376
1377
1378
1379
1380
1381
1382
	}
	printk("\n");
}
#endif

#if DEBUG

1383
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
Linus Torvalds's avatar
Linus Torvalds committed
1384
1385
1386
1387
1388
1389
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
		printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
1390
1391
		       *dbg_redzone1(cachep, objp),
		       *dbg_redzone2(cachep, objp));
Linus Torvalds's avatar
Linus Torvalds committed
1392
1393
1394
1395
	}

	if (cachep->flags & SLAB_STORE_USER) {
		printk(KERN_ERR "Last user: [<%p>]",
1396
		       *dbg_userword(cachep, objp));
Linus Torvalds's avatar
Linus Torvalds committed
1397
		print_symbol("(%s)",
1398
			     (unsigned long)*dbg_userword(cachep, objp));
Linus Torvalds's avatar
Linus Torvalds committed
1399
1400
		printk("\n");
	}
1401
1402
	realobj = (char *)objp + obj_offset(cachep);
	size = obj_size(cachep);
1403
	for (i = 0; i < size && lines; i += 16, lines--) {
Linus Torvalds's avatar
Linus Torvalds committed
1404
1405
		int limit;
		limit = 16;
1406
1407
		if (i + limit > size)
			limit = size - i;
Linus Torvalds's avatar
Linus Torvalds committed
1408
1409
1410
1411
		dump_line(realobj, i, limit);
	}
}

1412
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
1413
1414
1415
1416
1417
{
	char *realobj;
	int size, i;
	int lines = 0;

1418
1419
	realobj = (char *)objp + obj_offset(cachep);
	size = obj_size(cachep);
Linus Torvalds's avatar
Linus Torvalds committed
1420

1421
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
1422
		char exp = POISON_FREE;
1423
		if (i == size - 1)
Linus Torvalds's avatar
Linus Torvalds committed
1424
1425
1426
1427
1428
1429
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
1430
1431
1432
				printk(KERN_ERR
				       "Slab corruption: start=%p, len=%d\n",
				       realobj, size);
Linus Torvalds's avatar
Linus Torvalds committed
1433
1434
1435
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
1436
			i = (i / 16) * 16;
Linus Torvalds's avatar
Linus Torvalds committed
1437
			limit = 16;
1438
1439
			if (i + limit > size)
				limit = size - i;
Linus Torvalds's avatar
Linus Torvalds committed
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {