slab.c 113 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
29
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59
60
61
62
63
64
65
66
67
68
69
70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78
79
80
81
82
83
84
85
86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
102
103
104
105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119

120
121
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
122
123
124
125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126
127
#include <trace/events/kmem.h>

128
129
#include	"internal.h"

130
131
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157
158
159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160
161
162
163
164
165
166
167
168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

171
172
173
174
175
176
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

Linus Torvalds's avatar
Linus Torvalds committed
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
194
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
195
196
197
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
198
199
200
201
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
Andrew Morton's avatar
Andrew Morton committed
202
			 */
Linus Torvalds's avatar
Linus Torvalds committed
203
204
};

Joonsoo Kim's avatar
Joonsoo Kim committed
205
206
207
208
209
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

Andrew Morton's avatar
Andrew Morton committed
227
228
229
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
Linus Torvalds's avatar
Linus Torvalds committed
230
231
232
233
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
234
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
235
236
};

237
238
239
/*
 * Need this for bootstrapping a per node allocator.
 */
240
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
241
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
242
#define	CACHE_CACHE 0
243
#define	SIZE_AC MAX_NUMNODES
244
#define	SIZE_NODE (2 * MAX_NUMNODES)
245

246
static int drain_freelist(struct kmem_cache *cache,
247
			struct kmem_cache_node *n, int tofree);
248
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
249
250
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
251
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
252
static void cache_reap(struct work_struct *unused);
253

254
255
static int slab_early_init = 1;

256
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
257
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
258

259
static void kmem_cache_node_init(struct kmem_cache_node *parent)
260
261
262
263
264
265
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
266
	parent->colour_next = 0;
267
268
269
270
271
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
272
273
274
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
275
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
276
277
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
278
279
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
280
281
282
283
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
284
285
286
287
288

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
289
290
291
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
292
 *
Adrian Bunk's avatar
Adrian Bunk committed
293
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
294
295
 * which could lock up otherwise freeable slabs.
 */
296
297
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
298
299
300
301
302
303

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
304
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
305
306
307
308
309
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
310
311
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
312
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
313
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
314
315
316
317
318
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
319
320
321
322
323
324
325
326
327
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
328
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
329
330
331
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
332
#define	STATS_INC_NODEFREES(x)	do { } while (0)
333
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
334
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
335
336
337
338
339
340
341
342
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
343
344
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
345
 * 0		: objp
346
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
347
348
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
349
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
350
 * 		redzone word.
351
 * cachep->obj_offset: The real object.
352
353
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
354
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
355
 */
356
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
357
{
358
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
359
360
}

361
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
362
363
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
364
365
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
366
367
}

368
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
369
370
371
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
372
		return (unsigned long long *)(objp + cachep->size -
373
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
374
					      REDZONE_ALIGN);
375
	return (unsigned long long *) (objp + cachep->size -
376
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
377
378
}

379
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
380
381
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
382
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
383
384
385
386
}

#else

387
#define obj_offset(x)			0
388
389
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
390
391
392
393
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
#define OBJECT_FREE (0)
#define OBJECT_ACTIVE (1)

#ifdef CONFIG_DEBUG_SLAB_LEAK

static void set_obj_status(struct page *page, int idx, int val)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;
	status[idx] = val;
}

static inline unsigned int get_obj_status(struct page *page, int idx)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;

	return status[idx];
}

#else
static inline void set_obj_status(struct page *page, int idx, int val) {}

#endif

Linus Torvalds's avatar
Linus Torvalds committed
427
/*
428
429
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
430
 */
431
432
433
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
434
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
435

436
437
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
438
	struct page *page = virt_to_head_page(obj);
439
	return page->slab_cache;
440
441
}

442
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
443
444
				 unsigned int idx)
{
445
	return page->s_mem + cache->size * idx;
446
447
}

448
/*
449
450
451
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
452
453
454
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
455
					const struct page *page, void *obj)
456
{
457
	u32 offset = (obj - page->s_mem);
458
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
459
460
}

Linus Torvalds's avatar
Linus Torvalds committed
461
static struct arraycache_init initarray_generic =
462
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
463
464

/* internal cache of cache description objs */
465
static struct kmem_cache kmem_cache_boot = {
466
467
468
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
469
	.size = sizeof(struct kmem_cache),
470
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
471
472
};

473
474
#define BAD_ALIEN_MAGIC 0x01020304ul

475
476
477
478
479
480
481
482
#ifdef CONFIG_LOCKDEP

/*
 * Slab sometimes uses the kmalloc slabs to store the slab headers
 * for other slabs "off slab".
 * The locking for this is tricky in that it nests within the locks
 * of all other slabs in a few places; to deal with this special
 * locking we put on-slab caches into a separate lock-class.
483
484
485
486
 *
 * We set lock class for alien array caches which are up during init.
 * The lock annotation will be lost if all cpus of a node goes down and
 * then comes back up during hotplug
487
 */
488
489
490
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

491
492
493
494
495
static struct lock_class_key debugobj_l3_key;
static struct lock_class_key debugobj_alc_key;

static void slab_set_lock_classes(struct kmem_cache *cachep,
		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
496
		struct kmem_cache_node *n)
497
{
Joonsoo Kim's avatar
Joonsoo Kim committed
498
	struct alien_cache **alc;
499
500
	int r;

501
502
	lockdep_set_class(&n->list_lock, l3_key);
	alc = n->alien;
503
504
505
506
507
508
509
510
511
512
513
	/*
	 * FIXME: This check for BAD_ALIEN_MAGIC
	 * should go away when common slab code is taught to
	 * work even without alien caches.
	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
	 * for alloc_alien_cache,
	 */
	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
		return;
	for_each_node(r) {
		if (alc[r])
514
			lockdep_set_class(&(alc[r]->lock), alc_key);
515
516
517
	}
}

518
519
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep,
	struct kmem_cache_node *n)
520
{
521
	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, n);
522
523
524
525
526
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
	int node;
527
	struct kmem_cache_node *n;
528

529
530
	for_each_kmem_cache_node(cachep, node, n)
		slab_set_debugobj_lock_classes_node(cachep, n);
531
532
}

533
static void init_node_lock_keys(int q)
534
{
535
	int i;
536

537
	if (slab_state < UP)
538
539
		return;

Christoph Lameter's avatar
Christoph Lameter committed
540
	for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
541
		struct kmem_cache_node *n;
542
543
544
545
		struct kmem_cache *cache = kmalloc_caches[i];

		if (!cache)
			continue;
546

547
		n = get_node(cache, q);
548
		if (!n || OFF_SLAB(cache))
549
			continue;
550

551
		slab_set_lock_classes(cache, &on_slab_l3_key,
552
				&on_slab_alc_key, n);
553
554
	}
}
555

556
557
static void on_slab_lock_classes_node(struct kmem_cache *cachep,
	struct kmem_cache_node *n)
558
559
{
	slab_set_lock_classes(cachep, &on_slab_l3_key,
560
			&on_slab_alc_key, n);
561
562
563
564
565
}

static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
	int node;
566
	struct kmem_cache_node *n;
567
568

	VM_BUG_ON(OFF_SLAB(cachep));
569
570
	for_each_kmem_cache_node(cachep, node, n)
		on_slab_lock_classes_node(cachep, n);
571
572
}

573
static inline void __init init_lock_keys(void)
574
575
576
577
578
579
{
	int node;

	for_each_node(node)
		init_node_lock_keys(node);
}
580
#else
581
static void __init init_node_lock_keys(int q)
582
583
584
{
}

585
static inline void init_lock_keys(void)
586
587
{
}
588

589
590
591
592
static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
}

593
594
static inline void on_slab_lock_classes_node(struct kmem_cache *cachep,
	struct kmem_cache_node *n)
595
596
597
{
}

598
599
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep,
	struct kmem_cache_node *n)
600
601
602
603
604
605
{
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
}
606
607
#endif

608
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
609

610
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
611
612
613
614
{
	return cachep->array[smp_processor_id()];
}

615
616
617
618
619
620
621
622
623
624
625
626
627
628
static size_t calculate_freelist_size(int nr_objs, size_t align)
{
	size_t freelist_size;

	freelist_size = nr_objs * sizeof(freelist_idx_t);
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		freelist_size += nr_objs * sizeof(char);

	if (align)
		freelist_size = ALIGN(freelist_size, align);

	return freelist_size;
}

629
630
static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
				size_t idx_size, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
631
{
632
	int nr_objs;
633
	size_t remained_size;
634
	size_t freelist_size;
635
	int extra_space = 0;
636

637
638
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		extra_space = sizeof(char);
639
640
641
642
643
644
645
646
	/*
	 * Ignore padding for the initial guess. The padding
	 * is at most @align-1 bytes, and @buffer_size is at
	 * least @align. In the worst case, this result will
	 * be one greater than the number of objects that fit
	 * into the memory allocation when taking the padding
	 * into account.
	 */
647
	nr_objs = slab_size / (buffer_size + idx_size + extra_space);
648
649
650
651
652

	/*
	 * This calculated number will be either the right
	 * amount, or one greater than what we want.
	 */
653
654
655
	remained_size = slab_size - nr_objs * buffer_size;
	freelist_size = calculate_freelist_size(nr_objs, align);
	if (remained_size < freelist_size)
656
657
658
		nr_objs--;

	return nr_objs;
659
}
Linus Torvalds's avatar
Linus Torvalds committed
660

Andrew Morton's avatar
Andrew Morton committed
661
662
663
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
664
665
666
667
668
669
670
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
671

672
673
674
675
676
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
Joonsoo Kim's avatar
Joonsoo Kim committed
677
	 * - One unsigned int for each object
678
679
680
681
682
683
684
685
686
687
688
689
690
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

	} else {
691
		nr_objs = calculate_nr_objs(slab_size, buffer_size,
692
					sizeof(freelist_idx_t), align);
693
		mgmt_size = calculate_freelist_size(nr_objs, align);
694
695
696
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
697
698
}

699
#if DEBUG
700
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
701

Andrew Morton's avatar
Andrew Morton committed
702
703
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
704
705
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
706
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
707
	dump_stack();
708
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
709
}
710
#endif
Linus Torvalds's avatar
Linus Torvalds committed
711

712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

728
729
730
731
732
733
734
735
736
737
738
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

739
740
741
742
743
744
745
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
746
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
747
748
749
750
751

static void init_reap_node(int cpu)
{
	int node;

752
	node = next_node(cpu_to_mem(cpu), node_online_map);
753
	if (node == MAX_NUMNODES)
754
		node = first_node(node_online_map);
755

756
	per_cpu(slab_reap_node, cpu) = node;
757
758
759
760
}

static void next_reap_node(void)
{
761
	int node = __this_cpu_read(slab_reap_node);
762
763
764
765

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
766
	__this_cpu_write(slab_reap_node, node);
767
768
769
770
771
772
773
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
774
775
776
777
778
779
780
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
781
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
782
{
783
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
784
785
786
787
788
789

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
790
	if (keventd_up() && reap_work->work.func == NULL) {
791
		init_reap_node(cpu);
792
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
793
794
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
795
796
797
	}
}

798
static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds's avatar
Linus Torvalds committed
799
{
800
801
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
802
	 * However, when such objects are allocated or transferred to another
803
804
805
806
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
807
808
809
810
811
812
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
Linus Torvalds's avatar
Linus Torvalds committed
813
	}
814
815
816
817
818
819
820
821
822
823
824
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
Linus Torvalds's avatar
Linus Torvalds committed
825
826
}

827
static inline bool is_slab_pfmemalloc(struct page *page)
828
829
830
831
832
833
834
835
{
	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
836
	struct kmem_cache_node *n = get_node(cachep, numa_mem_id());
837
	struct page *page;
838
839
840
841
842
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

843
	spin_lock_irqsave(&n->list_lock, flags);
844
845
	list_for_each_entry(page, &n->slabs_full, lru)
		if (is_slab_pfmemalloc(page))
846
847
			goto out;

848
849
	list_for_each_entry(page, &n->slabs_partial, lru)
		if (is_slab_pfmemalloc(page))
850
851
			goto out;

852
853
	list_for_each_entry(page, &n->slabs_free, lru)
		if (is_slab_pfmemalloc(page))
854
855
856
857
			goto out;

	pfmemalloc_active = false;
out:
858
	spin_unlock_irqrestore(&n->list_lock, flags);
859
860
}

861
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
862
863
864
865
866
867
868
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
869
		struct kmem_cache_node *n;
870
871
872
873
874
875
876

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
877
		for (i = 0; i < ac->avail; i++) {
878
879
880
881
882
883
884
885
886
887
888
889
890
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
891
		n = get_node(cachep, numa_mem_id());
892
		if (!list_empty(&n->slabs_free) && force_refill) {
893
			struct page *page = virt_to_head_page(objp);
894
			ClearPageSlabPfmemalloc(page);
895
896
897
898
899
900
901
902
903
904
905
906
907
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

908
909
910
911
912
913
914
915
916
917
918
919
920
921
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
922
923
924
925
								void *objp)
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
926
		struct page *page = virt_to_head_page(objp);
927
928
929
930
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

931
932
933
934
935
936
937
938
939
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

940
941
942
	ac->entry[ac->avail++] = objp;
}

943
944
945
946
947
948
949
950
951
952
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
953
	int nr = min3(from->avail, max, to->limit - to->avail);
954
955
956
957
958
959
960
961
962
963
964
965

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

966
967
968
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
969
#define reap_alien(cachep, n) do { } while (0)
970

Joonsoo Kim's avatar
Joonsoo Kim committed
971
972
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
973
{
Joonsoo Kim's avatar
Joonsoo Kim committed
974
	return (struct alien_cache **)BAD_ALIEN_MAGIC;
975
976
}

Joonsoo Kim's avatar
Joonsoo Kim committed
977
static inline void free_alien_cache(struct alien_cache **ac_ptr)
978
979
980
981
982
983
984
985
986
987
988
989
990
991
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

992
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
993
994
995
996
997
998
999
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

1000
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1001
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1002

Joonsoo Kim's avatar
Joonsoo Kim committed
1003
1004
1005
1006
1007
1008
1009
1010
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
	int memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
1011
	spin_lock_init(&alc->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
1012
1013
1014
1015
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1016
{
Joonsoo Kim's avatar
Joonsoo Kim committed
1017
	struct alien_cache **alc_ptr;
1018
	int memsize = sizeof(void *) * nr_node_ids;
1019
1020
1021
1022
	int i;

	if (limit > 1)
		limit = 12;
Joonsoo Kim's avatar
Joonsoo Kim committed
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
1036
1037
		}
	}
Joonsoo Kim's avatar
Joonsoo Kim committed
1038
	return alc_ptr;
1039
1040
}

Joonsoo Kim's avatar
Joonsoo Kim committed
1041
static void free_alien_cache(struct alien_cache **alc_ptr)
1042
1043
1044
{
	int i;

Joonsoo Kim's avatar
Joonsoo Kim committed
1045
	if (!alc_ptr)
1046
1047
		return;
	for_each_node(i)
Joonsoo Kim's avatar
Joonsoo Kim committed
1048
1049
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
1050
1051
}

1052
static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg's avatar
Pekka Enberg committed
1053
				struct array_cache *ac, int node)
1054
{
1055
	struct kmem_cache_node *n = get_node(cachep, node);
1056
	LIST_HEAD(list);
1057
1058

	if (ac->avail) {
1059
		spin_lock(&n->list_lock);
1060
1061
1062
1063
1064
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
1065
1066
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
1067

1068
		free_block(cachep, ac->entry, ac->avail, node, &list);
1069
		ac->avail = 0;
1070
		spin_unlock(&n->list_lock);
1071
		slabs_destroy(cachep, &list);
1072
1073
1074
	}
}

1075
1076
1077
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
1078
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1079
{
1080
	int node = __this_cpu_read(slab_reap_node);
1081

1082
	if (n->alien) {
Joonsoo Kim's avatar
Joonsoo Kim committed
1083
1084
1085
1086
1087
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
1088
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
Joonsoo Kim's avatar
Joonsoo Kim committed
1089
				__drain_alien_cache(cachep, ac, node);
1090
				spin_unlock_irq(&alc->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
1091
			}
1092
1093
1094
1095
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
1096
static void drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kim's avatar
Joonsoo Kim committed
1097
				struct alien_cache **alien)
1098
{
1099
	int i = 0;
Joonsoo Kim's avatar
Joonsoo Kim committed
1100
	struct alien_cache *alc;
1101
1102
1103
1104
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
Joonsoo Kim's avatar
Joonsoo Kim committed
1105
1106
1107
		alc = alien[i];
		if (alc) {
			ac = &alc->ac;
1108
			spin_lock_irqsave(&alc->lock, flags);
1109
			__drain_alien_cache(cachep, ac, i);
1110
			spin_unlock_irqrestore(&alc->lock, flags);
1111
1112
1113
		}
	}
}
1114

1115
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1116
{
1117
	int nodeid = page_to_nid(virt_to_page(objp));
1118
	struct kmem_cache_node *n;
Joonsoo Kim's avatar
Joonsoo Kim committed
1119
1120
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
1121
	int node;
1122
	LIST_HEAD(list);
1123

1124
	node = numa_mem_id();
1125
1126
1127
1128
1129

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
1130
	if (likely(nodeid == node))
1131
1132
		return 0;

1133
	n = get_node(cachep, node);
1134
	STATS_INC_NODEFREES(cachep);
1135
1136
	if (n->alien && n->alien[nodeid]) {
		alien = n->alien[nodeid];
Joonsoo Kim's avatar
Joonsoo Kim committed
1137
		ac = &alien->ac;
1138
		spin_lock(&alien->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
1139
		if (unlikely(ac->avail == ac->limit)) {
1140
			STATS_INC_ACOVERFLOW(cachep);
Joonsoo Kim's avatar
Joonsoo Kim committed
1141
			__drain_alien_cache(cachep, ac, nodeid);
1142
		}
Joonsoo Kim's avatar
Joonsoo Kim committed
1143
		ac_put_obj(cachep, ac, objp);
1144
		spin_unlock(&alien->lock);