slab.h 22.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds's avatar
Linus Torvalds committed
2
/*
3
4
 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
 *
Christoph Lameter's avatar
Christoph Lameter committed
5
 * (C) SGI 2006, Christoph Lameter
6
7
 * 	Cleaned up and restructured to ease the addition of alternative
 * 	implementations of SLAB allocators.
8
9
 * (C) Linux Foundation 2008-2013
 *      Unified interface for all slab allocators
Linus Torvalds's avatar
Linus Torvalds committed
10
11
12
13
14
 */

#ifndef _LINUX_SLAB_H
#define	_LINUX_SLAB_H

15
#include <linux/gfp.h>
16
#include <linux/overflow.h>
17
#include <linux/types.h>
Glauber Costa's avatar
Glauber Costa committed
18
19
#include <linux/workqueue.h>

Linus Torvalds's avatar
Linus Torvalds committed
20

21
22
/*
 * Flags to pass to kmem_cache_create().
23
 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
Linus Torvalds's avatar
Linus Torvalds committed
24
 */
25
/* DEBUG: Perform (expensive) checks on alloc/free */
26
#define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100U)
27
/* DEBUG: Red zone objs in a cache */
28
#define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400U)
29
/* DEBUG: Poison objects */
30
#define SLAB_POISON		((slab_flags_t __force)0x00000800U)
31
/* Align objs on cache lines */
32
#define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
33
/* Use GFP_DMA memory */
34
#define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
35
/* DEBUG: Store the last owner for bug hunting */
36
#define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
37
/* Panic if kmem_cache_create() fails */
38
#define SLAB_PANIC		((slab_flags_t __force)0x00040000U)
39
/*
40
 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
 *
 * This delays freeing the SLAB page by a grace period, it does _NOT_
 * delay object freeing. This means that if you do kmem_cache_free()
 * that memory location is free to be reused at any time. Thus it may
 * be possible to see another object there in the same RCU grace period.
 *
 * This feature only ensures the memory location backing the object
 * stays valid, the trick to using this is relying on an independent
 * object validation pass. Something like:
 *
 *  rcu_read_lock()
 * again:
 *  obj = lockless_lookup(key);
 *  if (obj) {
 *    if (!try_get_ref(obj)) // might fail for free objects
 *      goto again;
 *
 *    if (obj->key != key) { // not the object we expected
 *      put_ref(obj);
 *      goto again;
 *    }
 *  }
 *  rcu_read_unlock();
 *
65
66
67
68
69
70
71
72
 * This is useful if we need to approach a kernel structure obliquely,
 * from its address obtained without the usual locking. We can lock
 * the structure to stabilize it and check it's still at the given address,
 * only if we can be sure that the memory has not been meanwhile reused
 * for some other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
73
74
 *
 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
75
 */
76
/* Defer freeing slabs to RCU */
77
#define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000U)
78
/* Spread some memory over cpuset */
79
#define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000U)
80
/* Trace allocations and frees */
81
#define SLAB_TRACE		((slab_flags_t __force)0x00200000U)
Linus Torvalds's avatar
Linus Torvalds committed
82

83
84
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
85
# define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000U)
86
#else
87
# define SLAB_DEBUG_OBJECTS	0
88
89
#endif

90
/* Avoid kmemleak tracing */
91
#define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000U)
92

93
/* Fault injection mark */
94
#ifdef CONFIG_FAILSLAB
95
# define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000U)
96
#else
97
# define SLAB_FAILSLAB		0
98
#endif
99
/* Account to memcg */
100
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
101
# define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000U)
Vladimir Davydov's avatar
Vladimir Davydov committed
102
#else
103
# define SLAB_ACCOUNT		0
Vladimir Davydov's avatar
Vladimir Davydov committed
104
#endif
Vegard Nossum's avatar
Vegard Nossum committed
105

Alexander Potapenko's avatar
Alexander Potapenko committed
106
#ifdef CONFIG_KASAN
107
#define SLAB_KASAN		((slab_flags_t __force)0x08000000U)
Alexander Potapenko's avatar
Alexander Potapenko committed
108
#else
109
#define SLAB_KASAN		0
Alexander Potapenko's avatar
Alexander Potapenko committed
110
111
#endif

112
/* The following flags affect the page allocator grouping pages by mobility */
113
/* Objects are reclaimable */
114
#define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
115
#define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
116
117
118
119
120
121
122
123
124
125
/*
 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
 *
 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
 *
 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
 * Both make kfree a no-op.
 */
#define ZERO_SIZE_PTR ((void *)16)

126
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
127
128
				(unsigned long)ZERO_SIZE_PTR)

129
#include <linux/kasan.h>
130

131
struct mem_cgroup;
132
133
134
135
/*
 * struct kmem_cache related prototypes
 */
void __init kmem_cache_init(void);
136
bool slab_is_available(void);
Linus Torvalds's avatar
Linus Torvalds committed
137

138
139
extern bool usercopy_fallback;

140
141
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
			unsigned int align, slab_flags_t flags,
142
143
			void (*ctor)(void *));
struct kmem_cache *kmem_cache_create_usercopy(const char *name,
144
145
			unsigned int size, unsigned int align,
			slab_flags_t flags,
146
			unsigned int useroffset, unsigned int usersize,
147
			void (*ctor)(void *));
148
149
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
150
151
152
153

void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
void memcg_deactivate_kmem_caches(struct mem_cgroup *);
void memcg_destroy_kmem_caches(struct mem_cgroup *);
154

155
156
157
158
159
160
161
162
/*
 * Please use this macro to create slab caches. Simply specify the
 * name of the structure and maybe some flags that are listed above.
 *
 * The alignment of the struct determines object alignment. If you
 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 * then the objects will be properly aligned in SMP configurations.
 */
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#define KMEM_CACHE(__struct, __flags)					\
		kmem_cache_create(#__struct, sizeof(struct __struct),	\
			__alignof__(struct __struct), (__flags), NULL)

/*
 * To whitelist a single field for copying to/from usercopy, use this
 * macro instead for KMEM_CACHE() above.
 */
#define KMEM_CACHE_USERCOPY(__struct, __flags, __field)			\
		kmem_cache_create_usercopy(#__struct,			\
			sizeof(struct __struct),			\
			__alignof__(struct __struct), (__flags),	\
			offsetof(struct __struct, __field),		\
			sizeof_field(struct __struct, __field), NULL)
177

178
179
180
181
182
183
184
185
186
/*
 * Common kmalloc functions provided by all allocators
 */
void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);

Kees Cook's avatar
Kees Cook committed
187
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
188
189
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
			bool to_user);
Kees Cook's avatar
Kees Cook committed
190
#else
191
192
static inline void __check_heap_object(const void *ptr, unsigned long n,
				       struct page *page, bool to_user) { }
Kees Cook's avatar
Kees Cook committed
193
194
#endif

195
196
197
198
199
200
201
202
203
204
205
206
207
/*
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than the alignment of a 64-bit integer.
 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 */
#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
/*
 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 * Intended for arches that get misalignment faults even for 64 bit integer
 * aligned buffers.
 */
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

/*
 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
 * aligned pointers.
 */
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)

226
/*
227
228
229
230
231
232
 * Kmalloc array related definitions
 */

#ifdef CONFIG_SLAB
/*
 * The largest kmalloc size supported by the SLAB allocators is
233
234
235
236
237
238
239
 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 * less than 32 MB.
 *
 * WARNING: Its not easy to increase this value since the allocators have
 * to do various tricks to work around compiler limitations in order to
 * ensure proper constant folding.
 */
240
241
#define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
242
#define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
243
#ifndef KMALLOC_SHIFT_LOW
244
#define KMALLOC_SHIFT_LOW	5
245
#endif
246
247
248
#endif

#ifdef CONFIG_SLUB
249
/*
250
251
 * SLUB directly allocates requests fitting in to an order-1 page
 * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
252
253
 */
#define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
254
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
255
#ifndef KMALLOC_SHIFT_LOW
256
257
#define KMALLOC_SHIFT_LOW	3
#endif
258
#endif
259

260
261
#ifdef CONFIG_SLOB
/*
262
 * SLOB passes all requests larger than one page to the page allocator.
263
264
265
266
 * No kmalloc array is necessary since objects of different sizes can
 * be allocated from the same page.
 */
#define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
267
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
268
269
270
271
272
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW	3
#endif
#endif

273
274
275
276
277
278
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
/* Maximum order allocatable via the slab allocagtor */
#define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
279

280
281
282
/*
 * Kmalloc subsystem.
 */
283
#ifndef KMALLOC_MIN_SIZE
284
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
285
286
#endif

Joonsoo Kim's avatar
Joonsoo Kim committed
287
288
289
290
291
292
293
294
295
296
297
/*
 * This restriction comes from byte sized index implementation.
 * Page size is normally 2^12 bytes and, in this case, if we want to use
 * byte sized index which can represent 2^8 entries, the size of the object
 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
 * If minimum size of kmalloc is less than 16, we use it as minimum object
 * size and give up to use byte sized index.
 */
#define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
                               (KMALLOC_MIN_SIZE) : 16)

298
#ifndef CONFIG_SLOB
299
300
301
302
303
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
#endif

304
305
306
307
308
/*
 * Figure out which kmalloc slab an allocation of a certain size
 * belongs to.
 * 0 = zero alloc
 * 1 =  65 .. 96 bytes
309
310
 * 2 = 129 .. 192 bytes
 * n = 2^(n-1)+1 .. 2^n
311
 */
312
static __always_inline unsigned int kmalloc_index(size_t size)
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
{
	if (!size)
		return 0;

	if (size <= KMALLOC_MIN_SIZE)
		return KMALLOC_SHIFT_LOW;

	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
		return 1;
	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
		return 2;
	if (size <=          8) return 3;
	if (size <=         16) return 4;
	if (size <=         32) return 5;
	if (size <=         64) return 6;
	if (size <=        128) return 7;
	if (size <=        256) return 8;
	if (size <=        512) return 9;
	if (size <=       1024) return 10;
	if (size <=   2 * 1024) return 11;
	if (size <=   4 * 1024) return 12;
	if (size <=   8 * 1024) return 13;
	if (size <=  16 * 1024) return 14;
	if (size <=  32 * 1024) return 15;
	if (size <=  64 * 1024) return 16;
	if (size <= 128 * 1024) return 17;
	if (size <= 256 * 1024) return 18;
	if (size <= 512 * 1024) return 19;
	if (size <= 1024 * 1024) return 20;
	if (size <=  2 * 1024 * 1024) return 21;
	if (size <=  4 * 1024 * 1024) return 22;
	if (size <=  8 * 1024 * 1024) return 23;
	if (size <=  16 * 1024 * 1024) return 24;
	if (size <=  32 * 1024 * 1024) return 25;
	if (size <=  64 * 1024 * 1024) return 26;
	BUG();

	/* Will never be reached. Needed because the compiler may complain */
	return -1;
}
353
#endif /* !CONFIG_SLOB */
354

355
356
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
357
void kmem_cache_free(struct kmem_cache *, void *);
358

359
/*
Jesper Dangaard Brouer's avatar
Jesper Dangaard Brouer committed
360
 * Bulk allocation and freeing operations. These are accelerated in an
361
362
363
364
365
366
 * allocator specific way to avoid taking locks repeatedly or building
 * metadata structures unnecessarily.
 *
 * Note that interrupts must be enabled when calling these functions.
 */
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
367
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
368

369
370
371
372
373
374
375
376
377
/*
 * Caller must not use kfree_bulk() on memory not originally allocated
 * by kmalloc(), because the SLOB allocator cannot handle this.
 */
static __always_inline void kfree_bulk(size_t size, void **p)
{
	kmem_cache_free_bulk(NULL, size, p);
}

378
#ifdef CONFIG_NUMA
379
380
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
381
382
383
384
385
386
387
388
389
390
391
392
393
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __kmalloc(size, flags);
}

static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
{
	return kmem_cache_alloc(s, flags);
}
#endif

#ifdef CONFIG_TRACING
394
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
395
396
397
398

#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
					   gfp_t gfpflags,
399
					   int node, size_t size) __assume_slab_alignment __malloc;
400
401
402
403
404
405
406
407
408
409
410
411
412
413
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node, size_t size)
{
	return kmem_cache_alloc_trace(s, gfpflags, size);
}
#endif /* CONFIG_NUMA */

#else /* CONFIG_TRACING */
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
		gfp_t flags, size_t size)
{
414
415
	void *ret = kmem_cache_alloc(s, flags);

416
	kasan_kmalloc(s, ret, size, flags);
417
	return ret;
418
419
420
421
422
423
424
}

static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node, size_t size)
{
425
426
	void *ret = kmem_cache_alloc_node(s, gfpflags, node);

427
	kasan_kmalloc(s, ret, size, gfpflags);
428
	return ret;
429
430
431
}
#endif /* CONFIG_TRACING */

432
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
433
434

#ifdef CONFIG_TRACING
435
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
436
437
438
439
440
441
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	return kmalloc_order(size, flags, order);
}
442
443
#endif

444
445
446
447
448
449
450
451
452
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
	unsigned int order = get_order(size);
	return kmalloc_order_trace(size, flags, order);
}

/**
 * kmalloc - allocate memory
 * @size: how many bytes of memory are required.
453
 * @flags: the type of memory to allocate.
454
455
456
 *
 * kmalloc is the normal method of allocating memory
 * for objects smaller than page size in the kernel.
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
 *
 * The @flags argument may be one of:
 *
 * %GFP_USER - Allocate memory on behalf of user.  May sleep.
 *
 * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
 *
 * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
 *   For example, use this inside interrupt handlers.
 *
 * %GFP_HIGHUSER - Allocate pages from high memory.
 *
 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
 *
 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
 *
 * %GFP_NOWAIT - Allocation will not sleep.
 *
475
 * %__GFP_THISNODE - Allocate node-local memory only.
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
 *
 * %GFP_DMA - Allocation suitable for DMA.
 *   Should only be used for kmalloc() caches. Otherwise, use a
 *   slab created with SLAB_DMA.
 *
 * Also it is possible to set different flags by OR'ing
 * in one or more of the following additional @flags:
 *
 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
 *
 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
 *   (think twice before using).
 *
 * %__GFP_NORETRY - If memory is not immediately available,
 *   then give up at once.
 *
 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
 *
494
495
 * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail
 *   eventually.
496
497
498
499
 *
 * There are other flags available as well, but these are not intended
 * for general use, and so are not documented here. For a full list of
 * potential flags, always refer to linux/gfp.h.
500
501
502
503
504
505
506
507
 */
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	if (__builtin_constant_p(size)) {
		if (size > KMALLOC_MAX_CACHE_SIZE)
			return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
		if (!(flags & GFP_DMA)) {
508
			unsigned int index = kmalloc_index(size);
509
510
511
512
513
514
515
516
517
518
519
520

			if (!index)
				return ZERO_SIZE_PTR;

			return kmem_cache_alloc_trace(kmalloc_caches[index],
					flags, size);
		}
#endif
	}
	return __kmalloc(size, flags);
}

521
522
523
524
525
/*
 * Determine size used for the nth kmalloc cache.
 * return size or 0 if a kmalloc cache for that
 * size does not exist
 */
526
static __always_inline unsigned int kmalloc_size(unsigned int n)
527
{
528
#ifndef CONFIG_SLOB
529
	if (n > 2)
530
		return 1U << n;
531
532
533
534
535
536

	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
		return 96;

	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
		return 192;
537
#endif
538
539
540
	return 0;
}

541
542
543
544
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
	if (__builtin_constant_p(size) &&
545
		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
546
		unsigned int i = kmalloc_index(size);
547
548
549
550
551
552
553
554
555
556
557

		if (!i)
			return ZERO_SIZE_PTR;

		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
						flags, node, size);
	}
#endif
	return __kmalloc_node(size, flags, node);
}

558
559
560
561
562
struct memcg_cache_array {
	struct rcu_head rcu;
	struct kmem_cache *entries[0];
};

Glauber Costa's avatar
Glauber Costa committed
563
564
565
566
/*
 * This is the main placeholder for memcg-related information in kmem caches.
 * Both the root cache and the child caches will have it. For the root cache,
 * this will hold a dynamically allocated array large enough to hold
567
568
569
 * information about the currently limited memcgs in the system. To allow the
 * array to be accessed without taking any locks, on relocation we free the old
 * version only after a grace period.
Glauber Costa's avatar
Glauber Costa committed
570
 *
571
 * Root and child caches hold different metadata.
Glauber Costa's avatar
Glauber Costa committed
572
 *
573
574
 * @root_cache:	Common to root and child caches.  NULL for root, pointer to
 *		the root cache for children.
575
 *
576
577
578
579
580
581
 * The following fields are specific to root caches.
 *
 * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
 *		used to index child cachces during allocation and cleared
 *		early during shutdown.
 *
582
583
 * @root_caches_node: List node for slab_root_caches list.
 *
584
585
586
587
588
589
590
591
592
 * @children:	List of all child caches.  While the child caches are also
 *		reachable through @memcg_caches, a child cache remains on
 *		this list until it is actually destroyed.
 *
 * The following fields are specific to child caches.
 *
 * @memcg:	Pointer to the memcg this cache belongs to.
 *
 * @children_node: List node for @root_cache->children list.
593
594
 *
 * @kmem_caches_node: List node for @memcg->kmem_caches list.
Glauber Costa's avatar
Glauber Costa committed
595
596
 */
struct memcg_cache_params {
597
	struct kmem_cache *root_cache;
Glauber Costa's avatar
Glauber Costa committed
598
	union {
599
600
		struct {
			struct memcg_cache_array __rcu *memcg_caches;
601
			struct list_head __root_caches_node;
602
			struct list_head children;
603
			bool dying;
604
		};
605
606
		struct {
			struct mem_cgroup *memcg;
607
			struct list_head children_node;
608
			struct list_head kmem_caches_node;
609
610
611
612
613
614

			void (*deact_fn)(struct kmem_cache *);
			union {
				struct rcu_head deact_rcu_head;
				struct work_struct deact_work;
			};
615
		};
Glauber Costa's avatar
Glauber Costa committed
616
617
618
	};
};

619
620
int memcg_update_all_caches(int num_memcgs);

621
622
623
624
625
/**
 * kmalloc_array - allocate memory for an array.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate (see kmalloc).
626
 */
Xi Wang's avatar
Xi Wang committed
627
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
Linus Torvalds's avatar
Linus Torvalds committed
628
{
629
630
631
	size_t bytes;

	if (unlikely(check_mul_overflow(n, size, &bytes)))
Paul Mundt's avatar
Paul Mundt committed
632
		return NULL;
633
	if (__builtin_constant_p(n) && __builtin_constant_p(size))
634
635
		return kmalloc(bytes, flags);
	return __kmalloc(bytes, flags);
Xi Wang's avatar
Xi Wang committed
636
637
638
639
640
641
642
643
644
645
646
}

/**
 * kcalloc - allocate memory for an array. The memory is set to zero.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
	return kmalloc_array(n, size, flags | __GFP_ZERO);
Linus Torvalds's avatar
Linus Torvalds committed
647
648
}

649
650
651
652
653
654
655
656
/*
 * kmalloc_track_caller is a special version of kmalloc that records the
 * calling function of the routine calling it for slab leak tracking instead
 * of just the calling function (confusing, eh?).
 * It's useful when the call to kmalloc comes from a widely-used standard
 * allocator where we care about the real place the memory allocation
 * request comes from.
 */
657
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
658
#define kmalloc_track_caller(size, flags) \
659
	__kmalloc_track_caller(size, flags, _RET_IP_)
Linus Torvalds's avatar
Linus Torvalds committed
660

661
662
663
static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
				       int node)
{
664
665
666
	size_t bytes;

	if (unlikely(check_mul_overflow(n, size, &bytes)))
667
668
		return NULL;
	if (__builtin_constant_p(n) && __builtin_constant_p(size))
669
670
		return kmalloc_node(bytes, flags, node);
	return __kmalloc_node(bytes, flags, node);
671
672
673
674
675
676
677
678
}

static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}


679
#ifdef CONFIG_NUMA
680
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
681
682
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node_track_caller(size, flags, node, \
683
			_RET_IP_)
684

685
686
687
688
#else /* CONFIG_NUMA */

#define kmalloc_node_track_caller(size, flags, node) \
	kmalloc_track_caller(size, flags)
689

Pascal Terjan's avatar
Pascal Terjan committed
690
#endif /* CONFIG_NUMA */
691

692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
/*
 * Shortcuts
 */
static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
{
	return kmem_cache_alloc(k, flags | __GFP_ZERO);
}

/**
 * kzalloc - allocate memory. The memory is set to zero.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kzalloc(size_t size, gfp_t flags)
{
	return kmalloc(size, flags | __GFP_ZERO);
}

Jeff Layton's avatar
Jeff Layton committed
710
711
712
713
714
715
716
717
718
719
720
/**
 * kzalloc_node - allocate zeroed memory from a particular memory node.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 * @node: memory node from which to allocate
 */
static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
{
	return kmalloc_node(size, flags | __GFP_ZERO, node);
}

721
unsigned int kmem_cache_size(struct kmem_cache *s);
722
723
void __init kmem_cache_init_late(void);

724
725
726
727
728
729
730
731
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
int slab_prepare_cpu(unsigned int cpu);
int slab_dead_cpu(unsigned int cpu);
#else
#define slab_prepare_cpu	NULL
#define slab_dead_cpu		NULL
#endif

Linus Torvalds's avatar
Linus Torvalds committed
732
#endif	/* _LINUX_SLAB_H */