slab_common.c 30.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
/*
 * Slab allocator functions that are independent of the allocator strategy
 *
 * (C) 2012 Christoph Lameter <cl@linux.com>
 */
#include <linux/slab.h>

#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/interrupt.h>
#include <linux/memory.h>
#include <linux/compiler.h>
#include <linux/module.h>
14
15
#include <linux/cpu.h>
#include <linux/uaccess.h>
16
17
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
18
19
20
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
21
#include <linux/memcontrol.h>
22
23

#define CREATE_TRACE_POINTS
24
#include <trace/events/kmem.h>
25

26
27
28
#include "slab.h"

enum slab_state slab_state;
29
30
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
31
struct kmem_cache *kmem_cache;
32

33
34
35
36
37
/*
 * Set of flags that will prevent slab merging
 */
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
Alexander Potapenko's avatar
Alexander Potapenko committed
38
		SLAB_FAILSLAB | SLAB_KASAN)
39

Vladimir Davydov's avatar
Vladimir Davydov committed
40
41
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
			 SLAB_NOTRACK | SLAB_ACCOUNT)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

/*
 * Merge control. If this is set then no merging of slab caches will occur.
 * (Could be removed. This was introduced to pacify the merge skeptics.)
 */
static int slab_nomerge;

static int __init setup_slab_nomerge(char *str)
{
	slab_nomerge = 1;
	return 1;
}

#ifdef CONFIG_SLUB
__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
#endif

__setup("slab_nomerge", setup_slab_nomerge);

61
62
63
64
65
66
67
68
69
/*
 * Determine the size of a slab object
 */
unsigned int kmem_cache_size(struct kmem_cache *s)
{
	return s->object_size;
}
EXPORT_SYMBOL(kmem_cache_size);

70
#ifdef CONFIG_DEBUG_VM
71
static int kmem_cache_sanity_check(const char *name, size_t size)
72
73
74
75
76
{
	struct kmem_cache *s = NULL;

	if (!name || in_interrupt() || size < sizeof(void *) ||
		size > KMALLOC_MAX_SIZE) {
77
78
		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
		return -EINVAL;
79
	}
80

81
82
83
84
85
86
87
88
89
90
91
	list_for_each_entry(s, &slab_caches, list) {
		char tmp;
		int res;

		/*
		 * This happens when the module gets unloaded and doesn't
		 * destroy its slab cache and no-one else reuses the vmalloc
		 * area of the module.  Print a warning.
		 */
		res = probe_kernel_address(s->name, tmp);
		if (res) {
92
			pr_err("Slab cache with size %d has lost its name\n",
93
94
95
96
97
98
			       s->object_size);
			continue;
		}
	}

	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
99
100
101
	return 0;
}
#else
102
static inline int kmem_cache_sanity_check(const char *name, size_t size)
103
104
105
{
	return 0;
}
106
107
#endif

108
109
110
111
void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
{
	size_t i;

112
113
114
115
116
117
	for (i = 0; i < nr; i++) {
		if (s)
			kmem_cache_free(s, p[i]);
		else
			kfree(p[i]);
	}
118
119
}

120
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
121
122
123
124
125
126
127
128
								void **p)
{
	size_t i;

	for (i = 0; i < nr; i++) {
		void *x = p[i] = kmem_cache_alloc(s, flags);
		if (!x) {
			__kmem_cache_free_bulk(s, i, p);
129
			return 0;
130
131
		}
	}
132
	return i;
133
134
}

135
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
136
void slab_init_memcg_params(struct kmem_cache *s)
137
{
138
	s->memcg_params.is_root_cache = true;
139
	INIT_LIST_HEAD(&s->memcg_params.list);
140
141
142
143
144
145
146
	RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
}

static int init_memcg_params(struct kmem_cache *s,
		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
	struct memcg_cache_array *arr;
147

148
149
150
151
	if (memcg) {
		s->memcg_params.is_root_cache = false;
		s->memcg_params.memcg = memcg;
		s->memcg_params.root_cache = root_cache;
152
		return 0;
153
	}
154

155
	slab_init_memcg_params(s);
156

157
158
	if (!memcg_nr_cache_ids)
		return 0;
159

160
161
162
163
164
	arr = kzalloc(sizeof(struct memcg_cache_array) +
		      memcg_nr_cache_ids * sizeof(void *),
		      GFP_KERNEL);
	if (!arr)
		return -ENOMEM;
165

166
	RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
167
168
169
	return 0;
}

170
static void destroy_memcg_params(struct kmem_cache *s)
171
{
172
173
	if (is_root_cache(s))
		kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
174
175
}

176
static int update_memcg_params(struct kmem_cache *s, int new_array_size)
177
{
178
	struct memcg_cache_array *old, *new;
179

180
181
	if (!is_root_cache(s))
		return 0;
182

183
184
185
	new = kzalloc(sizeof(struct memcg_cache_array) +
		      new_array_size * sizeof(void *), GFP_KERNEL);
	if (!new)
186
187
		return -ENOMEM;

188
189
190
191
192
	old = rcu_dereference_protected(s->memcg_params.memcg_caches,
					lockdep_is_held(&slab_mutex));
	if (old)
		memcpy(new->entries, old->entries,
		       memcg_nr_cache_ids * sizeof(void *));
193

194
195
196
	rcu_assign_pointer(s->memcg_params.memcg_caches, new);
	if (old)
		kfree_rcu(old, rcu);
197
198
199
	return 0;
}

200
201
202
203
204
int memcg_update_all_caches(int num_memcgs)
{
	struct kmem_cache *s;
	int ret = 0;

205
	mutex_lock(&slab_mutex);
206
	list_for_each_entry(s, &slab_caches, list) {
207
		ret = update_memcg_params(s, num_memcgs);
208
209
210
211
212
		/*
		 * Instead of freeing the memory, we'll just leave the caches
		 * up to this point in an updated state.
		 */
		if (ret)
213
			break;
214
215
216
217
	}
	mutex_unlock(&slab_mutex);
	return ret;
}
218
#else
219
220
static inline int init_memcg_params(struct kmem_cache *s,
		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
221
222
223
224
{
	return 0;
}

225
static inline void destroy_memcg_params(struct kmem_cache *s)
226
227
{
}
228
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
229

230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
/*
 * Find a mergeable slab cache
 */
int slab_unmergeable(struct kmem_cache *s)
{
	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
		return 1;

	if (!is_root_cache(s))
		return 1;

	if (s->ctor)
		return 1;

	/*
	 * We may have set a slab to be unmergeable during bootstrap.
	 */
	if (s->refcount < 0)
		return 1;

	return 0;
}

struct kmem_cache *find_mergeable(size_t size, size_t align,
		unsigned long flags, const char *name, void (*ctor)(void *))
{
	struct kmem_cache *s;

	if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
		return NULL;

	if (ctor)
		return NULL;

	size = ALIGN(size, sizeof(void *));
	align = calculate_alignment(flags, align, size);
	size = ALIGN(size, align);
	flags = kmem_cache_flags(size, flags, name, NULL);

269
	list_for_each_entry_reverse(s, &slab_caches, list) {
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
		if (slab_unmergeable(s))
			continue;

		if (size > s->size)
			continue;

		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
			continue;
		/*
		 * Check if alignment is compatible.
		 * Courtesy of Adrian Drzewiecki
		 */
		if ((s->size & ~(align - 1)) != s->size)
			continue;

		if (s->size - size >= sizeof(void *))
			continue;

288
289
290
291
		if (IS_ENABLED(CONFIG_SLAB) && align &&
			(align > s->align || s->align % align))
			continue;

292
293
294
295
296
		return s;
	}
	return NULL;
}

297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
/*
 * Figure out what the alignment of the objects will be given a set of
 * flags, a user specified alignment and the size of the objects.
 */
unsigned long calculate_alignment(unsigned long flags,
		unsigned long align, unsigned long size)
{
	/*
	 * If the user wants hardware cache aligned objects then follow that
	 * suggestion if the object is sufficiently large.
	 *
	 * The hardware cache alignment cannot override the specified
	 * alignment though. If that is greater then use it.
	 */
	if (flags & SLAB_HWCACHE_ALIGN) {
		unsigned long ralign = cache_line_size();
		while (size <= ralign / 2)
			ralign /= 2;
		align = max(align, ralign);
	}

	if (align < ARCH_SLAB_MINALIGN)
		align = ARCH_SLAB_MINALIGN;

	return ALIGN(align, sizeof(void *));
}

324
325
326
327
static struct kmem_cache *create_cache(const char *name,
		size_t object_size, size_t size, size_t align,
		unsigned long flags, void (*ctor)(void *),
		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
{
	struct kmem_cache *s;
	int err;

	err = -ENOMEM;
	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
	if (!s)
		goto out;

	s->name = name;
	s->object_size = object_size;
	s->size = size;
	s->align = align;
	s->ctor = ctor;

343
	err = init_memcg_params(s, memcg, root_cache);
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
	if (err)
		goto out_free_cache;

	err = __kmem_cache_create(s, flags);
	if (err)
		goto out_free_cache;

	s->refcount = 1;
	list_add(&s->list, &slab_caches);
out:
	if (err)
		return ERR_PTR(err);
	return s;

out_free_cache:
359
	destroy_memcg_params(s);
360
	kmem_cache_free(kmem_cache, s);
361
362
	goto out;
}
363

364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
/*
 * kmem_cache_create - Create a cache.
 * @name: A string which is used in /proc/slabinfo to identify this cache.
 * @size: The size of objects to be created in this cache.
 * @align: The required alignment for the objects.
 * @flags: SLAB flags
 * @ctor: A constructor for the objects.
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a interrupt, but can be interrupted.
 * The @ctor is run when new pages are allocated by the cache.
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */
388
struct kmem_cache *
389
390
kmem_cache_create(const char *name, size_t size, size_t align,
		  unsigned long flags, void (*ctor)(void *))
391
{
392
	struct kmem_cache *s = NULL;
393
	const char *cache_name;
394
	int err;
395

396
	get_online_cpus();
397
	get_online_mems();
398
	memcg_get_cache_ids();
399

400
	mutex_lock(&slab_mutex);
401

402
	err = kmem_cache_sanity_check(name, size);
403
	if (err) {
404
		goto out_unlock;
405
	}
406

407
408
409
410
411
412
413
	/*
	 * Some allocators will constraint the set of valid flags to a subset
	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
	 * case, and we'll just provide them with a sanitized version of the
	 * passed flags.
	 */
	flags &= CACHE_CREATE_MASK;
414

415
416
	s = __kmem_cache_alias(name, size, align, flags, ctor);
	if (s)
417
		goto out_unlock;
418

419
	cache_name = kstrdup_const(name, GFP_KERNEL);
420
421
422
423
	if (!cache_name) {
		err = -ENOMEM;
		goto out_unlock;
	}
424

425
426
427
	s = create_cache(cache_name, size, size,
			 calculate_alignment(flags, align, size),
			 flags, ctor, NULL, NULL);
428
429
	if (IS_ERR(s)) {
		err = PTR_ERR(s);
430
		kfree_const(cache_name);
431
	}
432
433

out_unlock:
434
	mutex_unlock(&slab_mutex);
435

436
	memcg_put_cache_ids();
437
	put_online_mems();
438
439
	put_online_cpus();

440
	if (err) {
441
442
443
444
		if (flags & SLAB_PANIC)
			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
				name, err);
		else {
445
			pr_warn("kmem_cache_create(%s) failed with error %d\n",
446
447
448
449
450
				name, err);
			dump_stack();
		}
		return NULL;
	}
451
452
	return s;
}
453
EXPORT_SYMBOL(kmem_cache_create);
454

455
static int shutdown_cache(struct kmem_cache *s,
456
457
		struct list_head *release, bool *need_rcu_barrier)
{
458
	if (__kmem_cache_shutdown(s) != 0)
459
460
461
462
463
464
465
466
467
		return -EBUSY;

	if (s->flags & SLAB_DESTROY_BY_RCU)
		*need_rcu_barrier = true;

	list_move(&s->list, release);
	return 0;
}

468
static void release_caches(struct list_head *release, bool need_rcu_barrier)
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
{
	struct kmem_cache *s, *s2;

	if (need_rcu_barrier)
		rcu_barrier();

	list_for_each_entry_safe(s, s2, release, list) {
#ifdef SLAB_SUPPORTS_SYSFS
		sysfs_slab_remove(s);
#else
		slab_kmem_cache_release(s);
#endif
	}
}

484
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
485
/*
486
 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
487
488
489
490
491
492
493
 * @memcg: The memory cgroup the new cache is for.
 * @root_cache: The parent of the new cache.
 *
 * This function attempts to create a kmem cache that will serve allocation
 * requests going from @memcg to @root_cache. The new cache inherits properties
 * from its parent.
 */
494
495
void memcg_create_kmem_cache(struct mem_cgroup *memcg,
			     struct kmem_cache *root_cache)
496
{
497
	static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
Michal Hocko's avatar
Michal Hocko committed
498
	struct cgroup_subsys_state *css = &memcg->css;
499
	struct memcg_cache_array *arr;
500
	struct kmem_cache *s = NULL;
501
	char *cache_name;
502
	int idx;
503
504

	get_online_cpus();
505
506
	get_online_mems();

507
508
	mutex_lock(&slab_mutex);

509
	/*
510
	 * The memory cgroup could have been offlined while the cache
511
512
	 * creation work was pending.
	 */
513
	if (memcg->kmem_state != KMEM_ONLINE)
514
515
		goto out_unlock;

516
517
518
519
	idx = memcg_cache_id(memcg);
	arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
					lockdep_is_held(&slab_mutex));

520
521
522
523
524
	/*
	 * Since per-memcg caches are created asynchronously on first
	 * allocation (see memcg_kmem_get_cache()), several threads can try to
	 * create the same cache, but only one of them may succeed.
	 */
525
	if (arr->entries[idx])
526
527
		goto out_unlock;

528
	cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
529
	cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
530
			       css->id, memcg_name_buf);
531
532
533
	if (!cache_name)
		goto out_unlock;

534
535
536
537
	s = create_cache(cache_name, root_cache->object_size,
			 root_cache->size, root_cache->align,
			 root_cache->flags, root_cache->ctor,
			 memcg, root_cache);
538
539
540
541
542
	/*
	 * If we could not create a memcg cache, do not complain, because
	 * that's not critical at all as we can always proceed with the root
	 * cache.
	 */
543
	if (IS_ERR(s)) {
544
		kfree(cache_name);
545
		goto out_unlock;
546
	}
547

548
549
	list_add(&s->memcg_params.list, &root_cache->memcg_params.list);

550
551
552
553
554
555
	/*
	 * Since readers won't lock (see cache_from_memcg_idx()), we need a
	 * barrier here to ensure nobody will see the kmem_cache partially
	 * initialized.
	 */
	smp_wmb();
556
	arr->entries[idx] = s;
557

558
559
out_unlock:
	mutex_unlock(&slab_mutex);
560
561

	put_online_mems();
562
	put_online_cpus();
563
}
564

565
566
567
568
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
{
	int idx;
	struct memcg_cache_array *arr;
569
	struct kmem_cache *s, *c;
570
571
572

	idx = memcg_cache_id(memcg);

573
574
575
	get_online_cpus();
	get_online_mems();

576
577
578
579
580
581
582
	mutex_lock(&slab_mutex);
	list_for_each_entry(s, &slab_caches, list) {
		if (!is_root_cache(s))
			continue;

		arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
						lockdep_is_held(&slab_mutex));
583
584
585
586
587
		c = arr->entries[idx];
		if (!c)
			continue;

		__kmem_cache_shrink(c, true);
588
589
590
		arr->entries[idx] = NULL;
	}
	mutex_unlock(&slab_mutex);
591
592
593

	put_online_mems();
	put_online_cpus();
594
595
}

596
597
598
599
600
601
602
603
604
605
606
607
static int __shutdown_memcg_cache(struct kmem_cache *s,
		struct list_head *release, bool *need_rcu_barrier)
{
	BUG_ON(is_root_cache(s));

	if (shutdown_cache(s, release, need_rcu_barrier))
		return -EBUSY;

	list_del(&s->memcg_params.list);
	return 0;
}

608
void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
609
{
610
611
612
	LIST_HEAD(release);
	bool need_rcu_barrier = false;
	struct kmem_cache *s, *s2;
613

614
615
	get_online_cpus();
	get_online_mems();
616
617

	mutex_lock(&slab_mutex);
618
	list_for_each_entry_safe(s, s2, &slab_caches, list) {
619
		if (is_root_cache(s) || s->memcg_params.memcg != memcg)
620
621
622
623
624
			continue;
		/*
		 * The cgroup is about to be freed and therefore has no charges
		 * left. Hence, all its caches must be empty by now.
		 */
625
		BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
626
627
	}
	mutex_unlock(&slab_mutex);
628

629
630
631
	put_online_mems();
	put_online_cpus();

632
	release_caches(&release, need_rcu_barrier);
633
}
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695

static int shutdown_memcg_caches(struct kmem_cache *s,
		struct list_head *release, bool *need_rcu_barrier)
{
	struct memcg_cache_array *arr;
	struct kmem_cache *c, *c2;
	LIST_HEAD(busy);
	int i;

	BUG_ON(!is_root_cache(s));

	/*
	 * First, shutdown active caches, i.e. caches that belong to online
	 * memory cgroups.
	 */
	arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
					lockdep_is_held(&slab_mutex));
	for_each_memcg_cache_index(i) {
		c = arr->entries[i];
		if (!c)
			continue;
		if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
			/*
			 * The cache still has objects. Move it to a temporary
			 * list so as not to try to destroy it for a second
			 * time while iterating over inactive caches below.
			 */
			list_move(&c->memcg_params.list, &busy);
		else
			/*
			 * The cache is empty and will be destroyed soon. Clear
			 * the pointer to it in the memcg_caches array so that
			 * it will never be accessed even if the root cache
			 * stays alive.
			 */
			arr->entries[i] = NULL;
	}

	/*
	 * Second, shutdown all caches left from memory cgroups that are now
	 * offline.
	 */
	list_for_each_entry_safe(c, c2, &s->memcg_params.list,
				 memcg_params.list)
		__shutdown_memcg_cache(c, release, need_rcu_barrier);

	list_splice(&busy, &s->memcg_params.list);

	/*
	 * A cache being destroyed must be empty. In particular, this means
	 * that all per memcg caches attached to it must be empty too.
	 */
	if (!list_empty(&s->memcg_params.list))
		return -EBUSY;
	return 0;
}
#else
static inline int shutdown_memcg_caches(struct kmem_cache *s,
		struct list_head *release, bool *need_rcu_barrier)
{
	return 0;
}
696
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
697

698
699
void slab_kmem_cache_release(struct kmem_cache *s)
{
700
	__kmem_cache_release(s);
701
	destroy_memcg_params(s);
702
	kfree_const(s->name);
703
704
705
	kmem_cache_free(kmem_cache, s);
}

706
707
void kmem_cache_destroy(struct kmem_cache *s)
{
708
709
	LIST_HEAD(release);
	bool need_rcu_barrier = false;
710
	int err;
711

712
713
714
	if (unlikely(!s))
		return;

715
	get_online_cpus();
716
717
	get_online_mems();

718
	mutex_lock(&slab_mutex);
719

720
	s->refcount--;
721
722
723
	if (s->refcount)
		goto out_unlock;

724
725
	err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
	if (!err)
726
		err = shutdown_cache(s, &release, &need_rcu_barrier);
727

728
	if (err) {
Joe Perches's avatar
Joe Perches committed
729
730
		pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
		       s->name);
731
732
		dump_stack();
	}
733
734
out_unlock:
	mutex_unlock(&slab_mutex);
735

736
	put_online_mems();
737
	put_online_cpus();
738

739
	release_caches(&release, need_rcu_barrier);
740
741
742
}
EXPORT_SYMBOL(kmem_cache_destroy);

743
744
745
746
747
748
749
750
751
752
753
754
755
/**
 * kmem_cache_shrink - Shrink a cache.
 * @cachep: The cache to shrink.
 *
 * Releases as many slabs as possible for a cache.
 * To help debugging, a zero exit status indicates all slabs were released.
 */
int kmem_cache_shrink(struct kmem_cache *cachep)
{
	int ret;

	get_online_cpus();
	get_online_mems();
756
	ret = __kmem_cache_shrink(cachep, false);
757
758
759
760
761
762
	put_online_mems();
	put_online_cpus();
	return ret;
}
EXPORT_SYMBOL(kmem_cache_shrink);

763
bool slab_is_available(void)
764
765
766
{
	return slab_state >= UP;
}
767

768
769
770
771
772
773
774
775
776
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
		unsigned long flags)
{
	int err;

	s->name = name;
	s->size = s->object_size = size;
777
	s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
778
779
780

	slab_init_memcg_params(s);

781
782
783
	err = __kmem_cache_create(s, flags);

	if (err)
784
		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
					name, size, err);

	s->refcount = -1;	/* Exempt from merging for now */
}

struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
				unsigned long flags)
{
	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);

	if (!s)
		panic("Out of memory when creating slab %s\n", name);

	create_boot_cache(s, name, size, flags);
	list_add(&s->list, &slab_caches);
	s->refcount = 1;
	return s;
}

804
805
806
807
808
809
810
811
struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_caches);

#ifdef CONFIG_ZONE_DMA
struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_dma_caches);
#endif

812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
/*
 * Conversion table for small slabs sizes / 8 to the index in the
 * kmalloc array. This is necessary for slabs < 192 since we have non power
 * of two cache sizes there. The size of larger slabs can be determined using
 * fls.
 */
static s8 size_index[24] = {
	3,	/* 8 */
	4,	/* 16 */
	5,	/* 24 */
	5,	/* 32 */
	6,	/* 40 */
	6,	/* 48 */
	6,	/* 56 */
	6,	/* 64 */
	1,	/* 72 */
	1,	/* 80 */
	1,	/* 88 */
	1,	/* 96 */
	7,	/* 104 */
	7,	/* 112 */
	7,	/* 120 */
	7,	/* 128 */
	2,	/* 136 */
	2,	/* 144 */
	2,	/* 152 */
	2,	/* 160 */
	2,	/* 168 */
	2,	/* 176 */
	2,	/* 184 */
	2	/* 192 */
};

static inline int size_index_elem(size_t bytes)
{
	return (bytes - 1) / 8;
}

/*
 * Find the kmem_cache structure that serves a given size of
 * allocation
 */
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{
	int index;

858
	if (unlikely(size > KMALLOC_MAX_SIZE)) {
859
		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
860
		return NULL;
861
	}
862

863
864
865
866
867
868
869
870
871
	if (size <= 192) {
		if (!size)
			return ZERO_SIZE_PTR;

		index = size_index[size_index_elem(size)];
	} else
		index = fls(size - 1);

#ifdef CONFIG_ZONE_DMA
872
	if (unlikely((flags & GFP_DMA)))
873
874
875
876
877
878
		return kmalloc_dma_caches[index];

#endif
	return kmalloc_caches[index];
}

879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
/*
 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
 * kmalloc-67108864.
 */
static struct {
	const char *name;
	unsigned long size;
} const kmalloc_info[] __initconst = {
	{NULL,                      0},		{"kmalloc-96",             96},
	{"kmalloc-192",           192},		{"kmalloc-8",               8},
	{"kmalloc-16",             16},		{"kmalloc-32",             32},
	{"kmalloc-64",             64},		{"kmalloc-128",           128},
	{"kmalloc-256",           256},		{"kmalloc-512",           512},
	{"kmalloc-1024",         1024},		{"kmalloc-2048",         2048},
	{"kmalloc-4096",         4096},		{"kmalloc-8192",         8192},
	{"kmalloc-16384",       16384},		{"kmalloc-32768",       32768},
	{"kmalloc-65536",       65536},		{"kmalloc-131072",     131072},
	{"kmalloc-262144",     262144},		{"kmalloc-524288",     524288},
	{"kmalloc-1048576",   1048576},		{"kmalloc-2097152",   2097152},
	{"kmalloc-4194304",   4194304},		{"kmalloc-8388608",   8388608},
	{"kmalloc-16777216", 16777216},		{"kmalloc-33554432", 33554432},
	{"kmalloc-67108864", 67108864}
};

904
/*
905
906
907
908
909
910
911
912
913
 * Patch up the size_index table if we have strange large alignment
 * requirements for the kmalloc array. This is only the case for
 * MIPS it seems. The standard arches will not generate any code here.
 *
 * Largest permitted alignment is 256 bytes due to the way we
 * handle the index determination for the smaller caches.
 *
 * Make sure that nothing crazy happens if someone starts tinkering
 * around with ARCH_KMALLOC_MINALIGN
914
 */
915
void __init setup_kmalloc_cache_index_table(void)
916
917
918
{
	int i;

919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));

	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
		int elem = size_index_elem(i);

		if (elem >= ARRAY_SIZE(size_index))
			break;
		size_index[elem] = KMALLOC_SHIFT_LOW;
	}

	if (KMALLOC_MIN_SIZE >= 64) {
		/*
		 * The 96 byte size cache is not used if the alignment
		 * is 64 byte.
		 */
		for (i = 64 + 8; i <= 96; i += 8)
			size_index[size_index_elem(i)] = 7;

	}

	if (KMALLOC_MIN_SIZE >= 128) {
		/*
		 * The 192 byte sized cache is not used if the alignment
		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
		 * instead.
		 */
		for (i = 128 + 8; i <= 192; i += 8)
			size_index[size_index_elem(i)] = 8;
	}
949
950
}

951
static void __init new_kmalloc_cache(int idx, unsigned long flags)
952
953
954
955
956
{
	kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
					kmalloc_info[idx].size, flags);
}

957
958
959
960
961
962
963
964
965
/*
 * Create the kmalloc array. Some of the regular kmalloc arrays
 * may already have been created because they were needed to
 * enable allocations for slab creation.
 */
void __init create_kmalloc_caches(unsigned long flags)
{
	int i;

966
967
968
	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
		if (!kmalloc_caches[i])
			new_kmalloc_cache(i, flags);
969

970
		/*
971
972
973
		 * Caches that are not of the two-to-the-power-of size.
		 * These have to be created immediately after the
		 * earlier power of two caches
974
		 */
975
976
977
978
		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
			new_kmalloc_cache(1, flags);
		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
			new_kmalloc_cache(2, flags);
979
980
	}

981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
	/* Kmalloc array is now usable */
	slab_state = UP;

#ifdef CONFIG_ZONE_DMA
	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
		struct kmem_cache *s = kmalloc_caches[i];

		if (s) {
			int size = kmalloc_size(i);
			char *n = kasprintf(GFP_NOWAIT,
				 "dma-kmalloc-%d", size);

			BUG_ON(!n);
			kmalloc_dma_caches[i] = create_kmalloc_cache(n,
				size, SLAB_CACHE_DMA | flags);
		}
	}
#endif
}
1000
1001
#endif /* !CONFIG_SLOB */

1002
1003
1004
1005
1006
/*
 * To avoid unnecessary overhead, we pass through large allocation requests
 * directly to the page allocator. We use __GFP_COMP, because we will need to
 * know the allocation order to free the pages properly in kfree.
 */
Vladimir Davydov's avatar
Vladimir Davydov committed
1007
1008
1009
1010
1011
1012
1013
1014
1015
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
	void *ret;
	struct page *page;

	flags |= __GFP_COMP;
	page = alloc_kmem_pages(flags, order);
	ret = page ? page_address(page) : NULL;
	kmemleak_alloc(ret, size, 1, flags);
1016
	kasan_kmalloc_large(ret, size, flags);
Vladimir Davydov's avatar
Vladimir Davydov committed
1017
1018
1019
1020
	return ret;
}
EXPORT_SYMBOL(kmalloc_order);

1021
1022
1023
1024
1025
1026
1027
1028
1029
#ifdef CONFIG_TRACING
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	void *ret = kmalloc_order(size, flags, order);
	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_order_trace);
#endif
1030

1031
#ifdef CONFIG_SLABINFO
1032
1033
1034
1035
1036
1037
1038

#ifdef CONFIG_SLAB
#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
#else
#define SLABINFO_RIGHTS S_IRUSR
#endif

1039
static void print_slabinfo_header(struct seq_file *m)
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
{
	/*
	 * Output format version, so at least we can change it
	 * without _too_ many complaints.
	 */
#ifdef CONFIG_DEBUG_SLAB
	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
#else
	seq_puts(m, "slabinfo - version: 2.1\n");
#endif
Joe Perches's avatar
Joe Perches committed
1050
	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1051
1052
1053
	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#ifdef CONFIG_DEBUG_SLAB
Joe Perches's avatar
Joe Perches committed
1054
	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1055
1056
1057
1058
1059
	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
	seq_putc(m, '\n');
}

1060
void *slab_start(struct seq_file *m, loff_t *pos)
1061
1062
1063
1064
1065
{
	mutex_lock(&slab_mutex);
	return seq_list_start(&slab_caches, *pos);
}

1066
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1067
1068
1069
1070
{
	return seq_list_next(p, &slab_caches, pos);
}

1071
void slab_stop(struct seq_file *m, void *p)
1072
1073
1074
1075
{
	mutex_unlock(&slab_mutex);
}

1076
1077
1078
1079
1080
1081
1082
1083
1084
static void
memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
{
	struct kmem_cache *c;
	struct slabinfo sinfo;

	if (!is_root_cache(s))
		return;

1085
	for_each_memcg_cache(c, s) {
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
		memset(&sinfo, 0, sizeof(sinfo));
		get_slabinfo(c, &sinfo);

		info->active_slabs += sinfo.active_slabs;
		info->num_slabs += sinfo.num_slabs;
		info->shared_avail += sinfo.shared_avail;
		info->active_objs += sinfo.active_objs;
		info->num_objs += sinfo.num_objs;
	}
}

1097
static void cache_show(struct kmem_cache *s, struct seq_file *m)
1098
{
1099
1100
1101
1102
1103
	struct slabinfo sinfo;

	memset(&sinfo, 0, sizeof(sinfo));
	get_slabinfo(s, &sinfo);

1104
1105
	memcg_accumulate_slabinfo(s, &sinfo);

1106
	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1107
		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1108
1109
1110
1111
1112
1113
1114
1115
		   sinfo.objects_per_slab, (1 << sinfo.cache_order));

	seq_printf(m, " : tunables %4u %4u %4u",
		   sinfo.limit, sinfo.batchcount, sinfo.shared);
	seq_printf(m, " : slabdata %6lu %6lu %6lu",
		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
	slabinfo_show_stats(m, s);
	seq_putc(m, '\n');
1116
1117
}

1118
static int slab_show(struct seq_file *m, void *p)
1119
1120
1121
{
	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);

1122
1123
	if (p == slab_caches.next)
		print_slabinfo_header(m);
1124
1125
1126
1127
1128
	if (is_root_cache(s))
		cache_show(s, m);
	return 0;
}

1129
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
1130
1131
1132
1133
1134
1135
1136
int memcg_slab_show(struct seq_file *m, void *p)
{
	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));

	if (p == slab_caches.next)
		print_slabinfo_header(m);
1137
	if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
1138
1139
		cache_show(s, m);
	return 0;
1140
}
1141
#endif
1142

1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
/*
 * slabinfo_op - iterator that generates /proc/slabinfo
 *
 * Output layout:
 * cache-name
 * num-active-objs
 * total-objs
 * object size
 * num-active-slabs
 * total-slabs
 * num-pages-per-slab
 * + further values on SMP and with statistics enabled
 */
static const struct seq_operations slabinfo_op = {
1157
	.start = slab_start,
1158
1159
	.next = slab_next,
	.stop = slab_stop,
1160
	.show = slab_show,
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
};

static int slabinfo_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &slabinfo_op);
}

static const struct file_operations proc_slabinfo_operations = {
	.open		= slabinfo_open,
	.read		= seq_read,
	.write          = slabinfo_write,
	.llseek		= seq_lseek,
	.release	= seq_release,
};

static int __init slab_proc_init(void)
{
1178
1179
	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
						&proc_slabinfo_operations);
1180
1181
1182
1183
	return 0;
}
module_init(slab_proc_init);
#endif /* CONFIG_SLABINFO */
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193

static __always_inline void *__do_krealloc(const void *p, size_t new_size,
					   gfp_t flags)
{
	void *ret;
	size_t ks = 0;

	if (p)
		ks = ksize(p);

1194
	if (ks >= new_size) {
1195
		kasan_krealloc((void *)p, new_size, flags);
1196
		return (void *)p;
1197
	}
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284

	ret = kmalloc_track_caller(new_size, flags);
	if (ret && p)
		memcpy(ret, p, ks);

	return ret;
}

/**
 * __krealloc - like krealloc() but don't free @p.
 * @p: object to reallocate memory for.
 * @new_size: how many bytes of memory are required.
 * @flags: the type of memory to allocate.
 *
 * This function is like krealloc() except it never frees the originally
 * allocated buffer. Use this if you don't want to free the buffer immediately
 * like, for example, with RCU.
 */
void *__krealloc(const void *p, size_t new_size, gfp_t flags)
{
	if (unlikely(!new_size))
		return ZERO_SIZE_PTR;

	return __do_krealloc(p, new_size, flags);

}
EXPORT_SYMBOL(__krealloc);

/**
 * krealloc - reallocate memory. The contents will remain unchanged.
 * @p: object to reallocate memory for.
 * @new_size: how many bytes of memory are required.
 * @flags: the type of memory to allocate.
 *
 * The contents of the object pointed to are preserved up to the
 * lesser of the new and old sizes.  If @p is %NULL, krealloc()
 * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
 * %NULL pointer, the object pointed to is freed.
 */
void *krealloc(const void *p, size_t new_size, gfp_t flags)
{
	void *ret;

	if (unlikely(!new_size)) {
		kfree(p);
		return ZERO_SIZE_PTR;
	}

	ret = __do_krealloc(p, new_size, flags);
	if (ret && p != ret)
		kfree(p);

	return ret;
}
EXPORT_SYMBOL(krealloc);

/**
 * kzfree - like kfree but zero memory
 * @p: object to free memory of
 *
 * The memory of the object @p points to is zeroed before freed.
 * If @p is %NULL, kzfree() does nothing.
 *
 * Note: this function zeroes the whole allocated buffer which can be a good
 * deal bigger than the requested buffer size passed to kmalloc(). So be
 * careful when using this function in performance sensitive code.
 */
void kzfree(const void *p)
{
	size_t ks;
	void *mem = (void *)p;

	if (unlikely(ZERO_OR_NULL_PTR(mem)))
		return;
	ks = ksize(mem);
	memset(mem, 0, ks);
	kfree(mem);
}
EXPORT_SYMBOL(kzfree);

/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);