slab.h 19.3 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
2
3
 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
 *
Christoph Lameter's avatar
Christoph Lameter committed
4
 * (C) SGI 2006, Christoph Lameter
5
6
 * 	Cleaned up and restructured to ease the addition of alternative
 * 	implementations of SLAB allocators.
7
8
 * (C) Linux Foundation 2008-2013
 *      Unified interface for all slab allocators
Linus Torvalds's avatar
Linus Torvalds committed
9
10
11
12
13
 */

#ifndef _LINUX_SLAB_H
#define	_LINUX_SLAB_H

14
15
#include <linux/gfp.h>
#include <linux/types.h>
Glauber Costa's avatar
Glauber Costa committed
16
17
#include <linux/workqueue.h>

Linus Torvalds's avatar
Linus Torvalds committed
18

19
20
/*
 * Flags to pass to kmem_cache_create().
21
 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
Linus Torvalds's avatar
Linus Torvalds committed
22
 */
23
#define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
24
25
26
#define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
#define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
27
28
29
#define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
#define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
#define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
/*
 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
 *
 * This delays freeing the SLAB page by a grace period, it does _NOT_
 * delay object freeing. This means that if you do kmem_cache_free()
 * that memory location is free to be reused at any time. Thus it may
 * be possible to see another object there in the same RCU grace period.
 *
 * This feature only ensures the memory location backing the object
 * stays valid, the trick to using this is relying on an independent
 * object validation pass. Something like:
 *
 *  rcu_read_lock()
 * again:
 *  obj = lockless_lookup(key);
 *  if (obj) {
 *    if (!try_get_ref(obj)) // might fail for free objects
 *      goto again;
 *
 *    if (obj->key != key) { // not the object we expected
 *      put_ref(obj);
 *      goto again;
 *    }
 *  }
 *  rcu_read_unlock();
 *
56
57
58
59
60
61
62
63
 * This is useful if we need to approach a kernel structure obliquely,
 * from its address obtained without the usual locking. We can lock
 * the structure to stabilize it and check it's still at the given address,
 * only if we can be sure that the memory has not been meanwhile reused
 * for some other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
64
 */
65
#define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
66
#define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
Christoph Lameter's avatar
Christoph Lameter committed
67
#define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
Linus Torvalds's avatar
Linus Torvalds committed
68

69
70
71
72
73
74
75
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
# define SLAB_DEBUG_OBJECTS	0x00400000UL
#else
# define SLAB_DEBUG_OBJECTS	0x00000000UL
#endif

76
77
#define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */

Vegard Nossum's avatar
Vegard Nossum committed
78
79
80
81
82
83
/* Don't track use of uninitialized memory */
#ifdef CONFIG_KMEMCHECK
# define SLAB_NOTRACK		0x01000000UL
#else
# define SLAB_NOTRACK		0x00000000UL
#endif
84
85
86
87
88
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
#else
# define SLAB_FAILSLAB		0x00000000UL
#endif
89
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Vladimir Davydov's avatar
Vladimir Davydov committed
90
91
92
93
# define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
#else
# define SLAB_ACCOUNT		0x00000000UL
#endif
Vegard Nossum's avatar
Vegard Nossum committed
94

Alexander Potapenko's avatar
Alexander Potapenko committed
95
96
97
98
99
100
#ifdef CONFIG_KASAN
#define SLAB_KASAN		0x08000000UL
#else
#define SLAB_KASAN		0x00000000UL
#endif

101
102
103
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
#define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
104
105
106
107
108
109
110
111
112
113
/*
 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
 *
 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
 *
 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
 * Both make kfree a no-op.
 */
#define ZERO_SIZE_PTR ((void *)16)

114
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
115
116
				(unsigned long)ZERO_SIZE_PTR)

117
#include <linux/kmemleak.h>
118
#include <linux/kasan.h>
119

120
struct mem_cgroup;
121
122
123
124
/*
 * struct kmem_cache related prototypes
 */
void __init kmem_cache_init(void);
125
bool slab_is_available(void);
Linus Torvalds's avatar
Linus Torvalds committed
126

127
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128
			unsigned long,
129
			void (*)(void *));
130
131
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
132
133
134
135

void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
void memcg_deactivate_kmem_caches(struct mem_cgroup *);
void memcg_destroy_kmem_caches(struct mem_cgroup *);
136

137
138
139
140
141
142
143
144
145
146
/*
 * Please use this macro to create slab caches. Simply specify the
 * name of the structure and maybe some flags that are listed above.
 *
 * The alignment of the struct determines object alignment. If you
 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 * then the objects will be properly aligned in SMP configurations.
 */
#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
		sizeof(struct __struct), __alignof__(struct __struct),\
147
		(__flags), NULL)
148

149
150
151
152
153
154
155
156
157
/*
 * Common kmalloc functions provided by all allocators
 */
void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);

158
159
160
161
162
163
164
165
166
167
168
169
170
/*
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than the alignment of a 64-bit integer.
 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 */
#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
/*
 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 * Intended for arches that get misalignment faults even for 64 bit integer
 * aligned buffers.
 */
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

/*
 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
 * aligned pointers.
 */
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)

189
/*
190
191
192
193
194
195
 * Kmalloc array related definitions
 */

#ifdef CONFIG_SLAB
/*
 * The largest kmalloc size supported by the SLAB allocators is
196
197
198
199
200
201
202
 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 * less than 32 MB.
 *
 * WARNING: Its not easy to increase this value since the allocators have
 * to do various tricks to work around compiler limitations in order to
 * ensure proper constant folding.
 */
203
204
#define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
205
#define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
206
#ifndef KMALLOC_SHIFT_LOW
207
#define KMALLOC_SHIFT_LOW	5
208
#endif
209
210
211
#endif

#ifdef CONFIG_SLUB
212
/*
213
214
 * SLUB directly allocates requests fitting in to an order-1 page
 * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
215
216
217
 */
#define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
218
#ifndef KMALLOC_SHIFT_LOW
219
220
#define KMALLOC_SHIFT_LOW	3
#endif
221
#endif
222

223
224
#ifdef CONFIG_SLOB
/*
225
 * SLOB passes all requests larger than one page to the page allocator.
226
227
228
229
 * No kmalloc array is necessary since objects of different sizes can
 * be allocated from the same page.
 */
#define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
230
#define KMALLOC_SHIFT_MAX	30
231
232
233
234
235
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW	3
#endif
#endif

236
237
238
239
240
241
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
/* Maximum order allocatable via the slab allocagtor */
#define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
242

243
244
245
/*
 * Kmalloc subsystem.
 */
246
#ifndef KMALLOC_MIN_SIZE
247
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
248
249
#endif

Joonsoo Kim's avatar
Joonsoo Kim committed
250
251
252
253
254
255
256
257
258
259
260
/*
 * This restriction comes from byte sized index implementation.
 * Page size is normally 2^12 bytes and, in this case, if we want to use
 * byte sized index which can represent 2^8 entries, the size of the object
 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
 * If minimum size of kmalloc is less than 16, we use it as minimum object
 * size and give up to use byte sized index.
 */
#define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
                               (KMALLOC_MIN_SIZE) : 16)

261
#ifndef CONFIG_SLOB
262
263
264
265
266
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
#endif

267
268
269
270
271
/*
 * Figure out which kmalloc slab an allocation of a certain size
 * belongs to.
 * 0 = zero alloc
 * 1 =  65 .. 96 bytes
272
273
 * 2 = 129 .. 192 bytes
 * n = 2^(n-1)+1 .. 2^n
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
 */
static __always_inline int kmalloc_index(size_t size)
{
	if (!size)
		return 0;

	if (size <= KMALLOC_MIN_SIZE)
		return KMALLOC_SHIFT_LOW;

	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
		return 1;
	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
		return 2;
	if (size <=          8) return 3;
	if (size <=         16) return 4;
	if (size <=         32) return 5;
	if (size <=         64) return 6;
	if (size <=        128) return 7;
	if (size <=        256) return 8;
	if (size <=        512) return 9;
	if (size <=       1024) return 10;
	if (size <=   2 * 1024) return 11;
	if (size <=   4 * 1024) return 12;
	if (size <=   8 * 1024) return 13;
	if (size <=  16 * 1024) return 14;
	if (size <=  32 * 1024) return 15;
	if (size <=  64 * 1024) return 16;
	if (size <= 128 * 1024) return 17;
	if (size <= 256 * 1024) return 18;
	if (size <= 512 * 1024) return 19;
	if (size <= 1024 * 1024) return 20;
	if (size <=  2 * 1024 * 1024) return 21;
	if (size <=  4 * 1024 * 1024) return 22;
	if (size <=  8 * 1024 * 1024) return 23;
	if (size <=  16 * 1024 * 1024) return 24;
	if (size <=  32 * 1024 * 1024) return 25;
	if (size <=  64 * 1024 * 1024) return 26;
	BUG();

	/* Will never be reached. Needed because the compiler may complain */
	return -1;
}
316
#endif /* !CONFIG_SLOB */
317

318
319
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
320
void kmem_cache_free(struct kmem_cache *, void *);
321

322
/*
Jesper Dangaard Brouer's avatar
Jesper Dangaard Brouer committed
323
 * Bulk allocation and freeing operations. These are accelerated in an
324
325
326
327
328
329
 * allocator specific way to avoid taking locks repeatedly or building
 * metadata structures unnecessarily.
 *
 * Note that interrupts must be enabled when calling these functions.
 */
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
330
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
331

332
333
334
335
336
337
338
339
340
/*
 * Caller must not use kfree_bulk() on memory not originally allocated
 * by kmalloc(), because the SLOB allocator cannot handle this.
 */
static __always_inline void kfree_bulk(size_t size, void **p)
{
	kmem_cache_free_bulk(NULL, size, p);
}

341
#ifdef CONFIG_NUMA
342
343
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
344
345
346
347
348
349
350
351
352
353
354
355
356
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __kmalloc(size, flags);
}

static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
{
	return kmem_cache_alloc(s, flags);
}
#endif

#ifdef CONFIG_TRACING
357
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
358
359
360
361

#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
					   gfp_t gfpflags,
362
					   int node, size_t size) __assume_slab_alignment;
363
364
365
366
367
368
369
370
371
372
373
374
375
376
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node, size_t size)
{
	return kmem_cache_alloc_trace(s, gfpflags, size);
}
#endif /* CONFIG_NUMA */

#else /* CONFIG_TRACING */
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
		gfp_t flags, size_t size)
{
377
378
	void *ret = kmem_cache_alloc(s, flags);

379
	kasan_kmalloc(s, ret, size, flags);
380
	return ret;
381
382
383
384
385
386
387
}

static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node, size_t size)
{
388
389
	void *ret = kmem_cache_alloc_node(s, gfpflags, node);

390
	kasan_kmalloc(s, ret, size, gfpflags);
391
	return ret;
392
393
394
}
#endif /* CONFIG_TRACING */

395
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
396
397

#ifdef CONFIG_TRACING
398
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
399
400
401
402
403
404
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	return kmalloc_order(size, flags, order);
}
405
406
#endif

407
408
409
410
411
412
413
414
415
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
	unsigned int order = get_order(size);
	return kmalloc_order_trace(size, flags, order);
}

/**
 * kmalloc - allocate memory
 * @size: how many bytes of memory are required.
416
 * @flags: the type of memory to allocate.
417
418
419
 *
 * kmalloc is the normal method of allocating memory
 * for objects smaller than page size in the kernel.
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
 *
 * The @flags argument may be one of:
 *
 * %GFP_USER - Allocate memory on behalf of user.  May sleep.
 *
 * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
 *
 * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
 *   For example, use this inside interrupt handlers.
 *
 * %GFP_HIGHUSER - Allocate pages from high memory.
 *
 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
 *
 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
 *
 * %GFP_NOWAIT - Allocation will not sleep.
 *
438
 * %__GFP_THISNODE - Allocate node-local memory only.
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
 *
 * %GFP_DMA - Allocation suitable for DMA.
 *   Should only be used for kmalloc() caches. Otherwise, use a
 *   slab created with SLAB_DMA.
 *
 * Also it is possible to set different flags by OR'ing
 * in one or more of the following additional @flags:
 *
 * %__GFP_COLD - Request cache-cold pages instead of
 *   trying to return cache-warm pages.
 *
 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
 *
 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
 *   (think twice before using).
 *
 * %__GFP_NORETRY - If memory is not immediately available,
 *   then give up at once.
 *
 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
 *
 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
 *
 * There are other flags available as well, but these are not intended
 * for general use, and so are not documented here. For a full list of
 * potential flags, always refer to linux/gfp.h.
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
 */
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	if (__builtin_constant_p(size)) {
		if (size > KMALLOC_MAX_CACHE_SIZE)
			return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
		if (!(flags & GFP_DMA)) {
			int index = kmalloc_index(size);

			if (!index)
				return ZERO_SIZE_PTR;

			return kmem_cache_alloc_trace(kmalloc_caches[index],
					flags, size);
		}
#endif
	}
	return __kmalloc(size, flags);
}

486
487
488
489
490
491
492
/*
 * Determine size used for the nth kmalloc cache.
 * return size or 0 if a kmalloc cache for that
 * size does not exist
 */
static __always_inline int kmalloc_size(int n)
{
493
#ifndef CONFIG_SLOB
494
495
496
497
498
499
500
501
	if (n > 2)
		return 1 << n;

	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
		return 96;

	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
		return 192;
502
#endif
503
504
505
	return 0;
}

506
507
508
509
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
	if (__builtin_constant_p(size) &&
510
		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
511
512
513
514
515
516
517
518
519
520
521
522
		int i = kmalloc_index(size);

		if (!i)
			return ZERO_SIZE_PTR;

		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
						flags, node, size);
	}
#endif
	return __kmalloc_node(size, flags, node);
}

523
524
525
526
527
struct memcg_cache_array {
	struct rcu_head rcu;
	struct kmem_cache *entries[0];
};

Glauber Costa's avatar
Glauber Costa committed
528
529
530
531
/*
 * This is the main placeholder for memcg-related information in kmem caches.
 * Both the root cache and the child caches will have it. For the root cache,
 * this will hold a dynamically allocated array large enough to hold
532
533
534
 * information about the currently limited memcgs in the system. To allow the
 * array to be accessed without taking any locks, on relocation we free the old
 * version only after a grace period.
Glauber Costa's avatar
Glauber Costa committed
535
536
537
538
 *
 * Child caches will hold extra metadata needed for its operation. Fields are:
 *
 * @memcg: pointer to the memcg this cache belongs to
539
 * @root_cache: pointer to the global, root cache, this cache was derived from
540
541
542
 *
 * Both root and child caches of the same kind are linked into a list chained
 * through @list.
Glauber Costa's avatar
Glauber Costa committed
543
544
545
 */
struct memcg_cache_params {
	bool is_root_cache;
546
	struct list_head list;
Glauber Costa's avatar
Glauber Costa committed
547
	union {
548
		struct memcg_cache_array __rcu *memcg_caches;
549
550
551
552
		struct {
			struct mem_cgroup *memcg;
			struct kmem_cache *root_cache;
		};
Glauber Costa's avatar
Glauber Costa committed
553
554
555
	};
};

556
557
int memcg_update_all_caches(int num_memcgs);

558
559
560
561
562
/**
 * kmalloc_array - allocate memory for an array.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate (see kmalloc).
563
 */
Xi Wang's avatar
Xi Wang committed
564
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
Linus Torvalds's avatar
Linus Torvalds committed
565
{
Xi Wang's avatar
Xi Wang committed
566
	if (size != 0 && n > SIZE_MAX / size)
Paul Mundt's avatar
Paul Mundt committed
567
		return NULL;
Xi Wang's avatar
Xi Wang committed
568
569
570
571
572
573
574
575
576
577
578
579
	return __kmalloc(n * size, flags);
}

/**
 * kcalloc - allocate memory for an array. The memory is set to zero.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
	return kmalloc_array(n, size, flags | __GFP_ZERO);
Linus Torvalds's avatar
Linus Torvalds committed
580
581
}

582
583
584
585
586
587
588
589
/*
 * kmalloc_track_caller is a special version of kmalloc that records the
 * calling function of the routine calling it for slab leak tracking instead
 * of just the calling function (confusing, eh?).
 * It's useful when the call to kmalloc comes from a widely-used standard
 * allocator where we care about the real place the memory allocation
 * request comes from.
 */
590
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
591
#define kmalloc_track_caller(size, flags) \
592
	__kmalloc_track_caller(size, flags, _RET_IP_)
Linus Torvalds's avatar
Linus Torvalds committed
593

594
#ifdef CONFIG_NUMA
595
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
596
597
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node_track_caller(size, flags, node, \
598
			_RET_IP_)
599

600
601
602
603
#else /* CONFIG_NUMA */

#define kmalloc_node_track_caller(size, flags, node) \
	kmalloc_track_caller(size, flags)
604

Pascal Terjan's avatar
Pascal Terjan committed
605
#endif /* CONFIG_NUMA */
606

607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
/*
 * Shortcuts
 */
static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
{
	return kmem_cache_alloc(k, flags | __GFP_ZERO);
}

/**
 * kzalloc - allocate memory. The memory is set to zero.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kzalloc(size_t size, gfp_t flags)
{
	return kmalloc(size, flags | __GFP_ZERO);
}

Jeff Layton's avatar
Jeff Layton committed
625
626
627
628
629
630
631
632
633
634
635
/**
 * kzalloc_node - allocate zeroed memory from a particular memory node.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 * @node: memory node from which to allocate
 */
static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
{
	return kmalloc_node(size, flags | __GFP_ZERO, node);
}

636
unsigned int kmem_cache_size(struct kmem_cache *s);
637
638
void __init kmem_cache_init_late(void);

Linus Torvalds's avatar
Linus Torvalds committed
639
#endif	/* _LINUX_SLAB_H */