slub.c 142 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Christoph Lameter's avatar
Christoph Lameter committed
2
3
4
5
/*
 * SLUB: A slab allocator that limits cache line use instead of queuing
 * objects in per cpu and per node lists.
 *
6
7
 * The allocator synchronizes using per slab locks or atomic operatios
 * and only uses a centralized lock to manage a pool of partial slabs.
Christoph Lameter's avatar
Christoph Lameter committed
8
 *
Christoph Lameter's avatar
Christoph Lameter committed
9
 * (C) 2007 SGI, Christoph Lameter
10
 * (C) 2011 Linux Foundation, Christoph Lameter
Christoph Lameter's avatar
Christoph Lameter committed
11
12
13
 */

#include <linux/mm.h>
Nick Piggin's avatar
Nick Piggin committed
14
#include <linux/swap.h> /* struct reclaim_state */
Christoph Lameter's avatar
Christoph Lameter committed
15
16
17
18
19
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
20
#include "slab.h"
21
#include <linux/proc_fs.h>
22
#include <linux/notifier.h>
Christoph Lameter's avatar
Christoph Lameter committed
23
#include <linux/seq_file.h>
24
#include <linux/kasan.h>
Vegard Nossum's avatar
Vegard Nossum committed
25
#include <linux/kmemcheck.h>
Christoph Lameter's avatar
Christoph Lameter committed
26
27
28
29
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
30
#include <linux/debugobjects.h>
Christoph Lameter's avatar
Christoph Lameter committed
31
#include <linux/kallsyms.h>
32
#include <linux/memory.h>
Roman Zippel's avatar
Roman Zippel committed
33
#include <linux/math64.h>
Akinobu Mita's avatar
Akinobu Mita committed
34
#include <linux/fault-inject.h>
35
#include <linux/stacktrace.h>
36
#include <linux/prefetch.h>
37
#include <linux/memcontrol.h>
38
#include <linux/random.h>
Christoph Lameter's avatar
Christoph Lameter committed
39

40
41
#include <trace/events/kmem.h>

42
43
#include "internal.h"

Christoph Lameter's avatar
Christoph Lameter committed
44
45
/*
 * Lock order:
46
 *   1. slab_mutex (Global Mutex)
47
48
 *   2. node->list_lock
 *   3. slab_lock(page) (Only on some arches and for debugging)
Christoph Lameter's avatar
Christoph Lameter committed
49
 *
50
 *   slab_mutex
51
 *
52
 *   The role of the slab_mutex is to protect the list of all the slabs
53
54
55
56
57
58
59
60
61
62
63
64
65
66
 *   and to synchronize major metadata changes to slab cache structures.
 *
 *   The slab_lock is only used for debugging and on arches that do not
 *   have the ability to do a cmpxchg_double. It only protects the second
 *   double word in the page struct. Meaning
 *	A. page->freelist	-> List of object free in a page
 *	B. page->counters	-> Counters of objects
 *	C. page->frozen		-> frozen state
 *
 *   If a slab is frozen then it is exempt from list management. It is not
 *   on any list. The processor that froze the slab is the one who can
 *   perform list operations on the page. Other processors may put objects
 *   onto the freelist but the processor that froze the slab is the only
 *   one that can retrieve the objects from the page's freelist.
Christoph Lameter's avatar
Christoph Lameter committed
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
 *
 *   The list_lock protects the partial and full list on each node and
 *   the partial slab counter. If taken then no new slabs may be added or
 *   removed from the lists nor make the number of partial slabs be modified.
 *   (Note that the total number of slabs is an atomic value that may be
 *   modified without taking the list lock).
 *
 *   The list_lock is a centralized lock and thus we avoid taking it as
 *   much as possible. As long as SLUB does not have to handle partial
 *   slabs, operations can continue without any centralized lock. F.e.
 *   allocating a long series of objects that fill up slabs does not require
 *   the list lock.
 *   Interrupts are disabled during allocation and deallocation in order to
 *   make the slab allocator safe to use in the context of an irq. In addition
 *   interrupts are disabled to ensure that the processor does not change
 *   while handling per_cpu slabs, due to kernel preemption.
 *
 * SLUB assigns one slab for allocation to each processor.
 * Allocations only occur from these slabs called cpu slabs.
 *
Christoph Lameter's avatar
Christoph Lameter committed
87
88
 * Slabs with free elements are kept on a partial list and during regular
 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter's avatar
Christoph Lameter committed
89
 * freed then the slab will show up again on the partial lists.
Christoph Lameter's avatar
Christoph Lameter committed
90
91
 * We track full slabs for debugging purposes though because otherwise we
 * cannot scan all objects.
Christoph Lameter's avatar
Christoph Lameter committed
92
93
94
95
96
97
98
 *
 * Slabs are freed when they become empty. Teardown and setup is
 * minimal so we rely on the page allocators per cpu caches for
 * fast frees and allocs.
 *
 * Overloading of page flags that are otherwise used for LRU management.
 *
99
100
101
102
103
104
105
106
107
108
109
110
 * PageActive 		The slab is frozen and exempt from list processing.
 * 			This means that the slab is dedicated to a purpose
 * 			such as satisfying allocations for a specific
 * 			processor. Objects may be freed in the slab while
 * 			it is frozen but slab_free will then skip the usual
 * 			list operations. It is up to the processor holding
 * 			the slab to integrate the slab into the slab lists
 * 			when the slab is no longer needed.
 *
 * 			One use of this flag is to mark slabs that are
 * 			used for allocations. Then such a slab becomes a cpu
 * 			slab. The cpu slab may be equipped with an additional
111
 * 			freelist that allows lockless access to
112
113
 * 			free objects in addition to the regular freelist
 * 			that requires the slab lock.
Christoph Lameter's avatar
Christoph Lameter committed
114
115
116
 *
 * PageError		Slab requires special handling due to debug
 * 			options set. This moves	slab handling out of
117
 * 			the fast path and disables lockless freelists.
Christoph Lameter's avatar
Christoph Lameter committed
118
119
 */

120
121
static inline int kmem_cache_debug(struct kmem_cache *s)
{
122
#ifdef CONFIG_SLUB_DEBUG
123
	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
124
#else
125
	return 0;
126
#endif
127
}
128

129
void *fixup_red_left(struct kmem_cache *s, void *p)
Joonsoo Kim's avatar
Joonsoo Kim committed
130
131
132
133
134
135
136
{
	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
		p += s->red_left_pad;

	return p;
}

137
138
139
140
141
142
143
144
145
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
	return !kmem_cache_debug(s);
#else
	return false;
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
146
147
148
149
150
151
152
153
154
155
156
/*
 * Issues still to be resolved:
 *
 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 *
 * - Variable sizing of the per node arrays
 */

/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST

157
158
159
/* Enable to log cmpxchg failures */
#undef SLUB_DEBUG_CMPXCHG

160
161
162
163
/*
 * Mininum number of partial slabs. These will be left on the partial
 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 */
164
#define MIN_PARTIAL 5
Christoph Lameter's avatar
Christoph Lameter committed
165

166
167
168
/*
 * Maximum number of desirable partial slabs.
 * The existence of more partial slabs makes kmem_cache_shrink
169
 * sort the partial list by the number of objects in use.
170
171
172
 */
#define MAX_PARTIAL 10

173
#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
Christoph Lameter's avatar
Christoph Lameter committed
174
				SLAB_POISON | SLAB_STORE_USER)
Christoph Lameter's avatar
Christoph Lameter committed
175

176
177
178
179
180
181
182
183
/*
 * These debug flags cannot use CMPXCHG because there might be consistency
 * issues when checking or reading debug information
 */
#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
				SLAB_TRACE)


184
/*
185
186
187
 * Debugging flags that require metadata to be stored in the slab.  These get
 * disabled when slub_debug=O is used and a cache's min order increases with
 * metadata.
188
 */
189
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
190

191
192
#define OO_SHIFT	16
#define OO_MASK		((1 << OO_SHIFT) - 1)
193
#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
194

Christoph Lameter's avatar
Christoph Lameter committed
195
/* Internal SLUB flags */
Christoph Lameter's avatar
Christoph Lameter committed
196
#define __OBJECT_POISON		0x80000000UL /* Poison object */
197
#define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
Christoph Lameter's avatar
Christoph Lameter committed
198

199
200
201
/*
 * Tracking user of a slab.
 */
202
#define TRACK_ADDRS_COUNT 16
203
struct track {
204
	unsigned long addr;	/* Called from address */
205
206
207
#ifdef CONFIG_STACKTRACE
	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
#endif
208
209
210
211
212
213
214
	int cpu;		/* Was running on cpu */
	int pid;		/* Pid context */
	unsigned long when;	/* When did the operation occur */
};

enum track_item { TRACK_ALLOC, TRACK_FREE };

215
#ifdef CONFIG_SYSFS
Christoph Lameter's avatar
Christoph Lameter committed
216
217
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
218
static void memcg_propagate_slab_attrs(struct kmem_cache *s);
219
static void sysfs_slab_remove(struct kmem_cache *s);
Christoph Lameter's avatar
Christoph Lameter committed
220
#else
221
222
223
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
							{ return 0; }
224
static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
225
static inline void sysfs_slab_remove(struct kmem_cache *s) { }
Christoph Lameter's avatar
Christoph Lameter committed
226
227
#endif

228
static inline void stat(const struct kmem_cache *s, enum stat_item si)
229
230
{
#ifdef CONFIG_SLUB_STATS
231
232
233
234
235
	/*
	 * The rmw is racy on a preemptible kernel but this is acceptable, so
	 * avoid this_cpu_add()'s irq-disable overhead.
	 */
	raw_cpu_inc(s->cpu_slab->stat[si]);
236
237
238
#endif
}

Christoph Lameter's avatar
Christoph Lameter committed
239
240
241
242
/********************************************************************
 * 			Core slab cache functions
 *******************************************************************/

243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
/*
 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 * with an XOR of the address where the pointer is held and a per-cache
 * random number.
 */
static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
				 unsigned long ptr_addr)
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
	return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
#else
	return ptr;
#endif
}

/* Returns the freelist pointer recorded at location ptr_addr. */
static inline void *freelist_dereference(const struct kmem_cache *s,
					 void *ptr_addr)
{
	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
			    (unsigned long)ptr_addr);
}

266
267
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
268
	return freelist_dereference(s, object + s->offset);
269
270
}

271
272
static void prefetch_freepointer(const struct kmem_cache *s, void *object)
{
273
274
	if (object)
		prefetch(freelist_dereference(s, object + s->offset));
275
276
}

277
278
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
279
	unsigned long freepointer_addr;
280
281
	void *p;

282
283
284
	if (!debug_pagealloc_enabled())
		return get_freepointer(s, object);

285
286
287
	freepointer_addr = (unsigned long)object + s->offset;
	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
	return freelist_ptr(s, p, freepointer_addr);
288
289
}

290
291
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
292
293
	unsigned long freeptr_addr = (unsigned long)object + s->offset;

294
295
296
297
#ifdef CONFIG_SLAB_FREELIST_HARDENED
	BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif

298
	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
299
300
301
}

/* Loop over all objects in a slab */
302
#define for_each_object(__p, __s, __addr, __objects) \
Joonsoo Kim's avatar
Joonsoo Kim committed
303
304
305
	for (__p = fixup_red_left(__s, __addr); \
		__p < (__addr) + (__objects) * (__s)->size; \
		__p += (__s)->size)
306

307
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
Joonsoo Kim's avatar
Joonsoo Kim committed
308
309
310
	for (__p = fixup_red_left(__s, __addr), __idx = 1; \
		__idx <= __objects; \
		__p += (__s)->size, __idx++)
311

312
313
314
315
316
317
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
	return (p - addr) / s->size;
}

318
319
320
321
322
static inline int order_objects(int order, unsigned long size, int reserved)
{
	return ((PAGE_SIZE << order) - reserved) / size;
}

323
static inline struct kmem_cache_order_objects oo_make(int order,
324
		unsigned long size, int reserved)
325
326
{
	struct kmem_cache_order_objects x = {
327
		(order << OO_SHIFT) + order_objects(order, size, reserved)
328
329
330
331
332
333
334
	};

	return x;
}

static inline int oo_order(struct kmem_cache_order_objects x)
{
335
	return x.x >> OO_SHIFT;
336
337
338
339
}

static inline int oo_objects(struct kmem_cache_order_objects x)
{
340
	return x.x & OO_MASK;
341
342
}

343
344
345
346
347
/*
 * Per slab locking using the pagelock
 */
static __always_inline void slab_lock(struct page *page)
{
348
	VM_BUG_ON_PAGE(PageTail(page), page);
349
350
351
352
353
	bit_spin_lock(PG_locked, &page->flags);
}

static __always_inline void slab_unlock(struct page *page)
{
354
	VM_BUG_ON_PAGE(PageTail(page), page);
355
356
357
	__bit_spin_unlock(PG_locked, &page->flags);
}

358
359
360
361
362
363
static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
{
	struct page tmp;
	tmp.counters = counters_new;
	/*
	 * page->counters can cover frozen/inuse/objects as well
364
365
	 * as page->_refcount.  If we assign to ->counters directly
	 * we run the risk of losing updates to page->_refcount, so
366
367
368
369
370
371
372
	 * be careful and only assign to the fields we need.
	 */
	page->frozen  = tmp.frozen;
	page->inuse   = tmp.inuse;
	page->objects = tmp.objects;
}

373
374
375
376
377
378
379
/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
		void *freelist_old, unsigned long counters_old,
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
	VM_BUG_ON(!irqs_disabled());
380
381
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
382
	if (s->flags & __CMPXCHG_DOUBLE) {
383
		if (cmpxchg_double(&page->freelist, &page->counters,
384
385
				   freelist_old, counters_old,
				   freelist_new, counters_new))
386
			return true;
387
388
389
390
	} else
#endif
	{
		slab_lock(page);
391
392
		if (page->freelist == freelist_old &&
					page->counters == counters_old) {
393
			page->freelist = freelist_new;
394
			set_page_slub_counters(page, counters_new);
395
			slab_unlock(page);
396
			return true;
397
398
399
400
401
402
403
404
		}
		slab_unlock(page);
	}

	cpu_relax();
	stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
405
	pr_info("%s %s: cmpxchg double redo ", n, s->name);
406
407
#endif

408
	return false;
409
410
}

411
412
413
414
415
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
		void *freelist_old, unsigned long counters_old,
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
416
417
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
418
	if (s->flags & __CMPXCHG_DOUBLE) {
419
		if (cmpxchg_double(&page->freelist, &page->counters,
420
421
				   freelist_old, counters_old,
				   freelist_new, counters_new))
422
			return true;
423
424
425
	} else
#endif
	{
426
427
428
		unsigned long flags;

		local_irq_save(flags);
429
		slab_lock(page);
430
431
		if (page->freelist == freelist_old &&
					page->counters == counters_old) {
432
			page->freelist = freelist_new;
433
			set_page_slub_counters(page, counters_new);
434
			slab_unlock(page);
435
			local_irq_restore(flags);
436
			return true;
437
		}
438
		slab_unlock(page);
439
		local_irq_restore(flags);
440
441
442
443
444
445
	}

	cpu_relax();
	stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
446
	pr_info("%s %s: cmpxchg double redo ", n, s->name);
447
448
#endif

449
	return false;
450
451
}

452
#ifdef CONFIG_SLUB_DEBUG
453
454
455
/*
 * Determine a map of object in use on a page.
 *
456
 * Node listlock must be held to guarantee that the page does
457
458
459
460
461
462
463
464
465
466
467
 * not vanish from under us.
 */
static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
{
	void *p;
	void *addr = page_address(page);

	for (p = page->freelist; p; p = get_freepointer(s, p))
		set_bit(slab_index(p, s, addr), map);
}

Joonsoo Kim's avatar
Joonsoo Kim committed
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
static inline int size_from_object(struct kmem_cache *s)
{
	if (s->flags & SLAB_RED_ZONE)
		return s->size - s->red_left_pad;

	return s->size;
}

static inline void *restore_red_left(struct kmem_cache *s, void *p)
{
	if (s->flags & SLAB_RED_ZONE)
		p -= s->red_left_pad;

	return p;
}

484
485
486
/*
 * Debug settings:
 */
487
#if defined(CONFIG_SLUB_DEBUG_ON)
488
489
static int slub_debug = DEBUG_DEFAULT_FLAGS;
#else
490
static int slub_debug;
491
#endif
492
493

static char *slub_debug_slabs;
494
static int disable_higher_order_debug;
495

496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
/*
 * slub is about to manipulate internal object metadata.  This memory lies
 * outside the range of the allocated object, so accessing it would normally
 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 * to tell kasan that these accesses are OK.
 */
static inline void metadata_access_enable(void)
{
	kasan_disable_current();
}

static inline void metadata_access_disable(void)
{
	kasan_enable_current();
}

Christoph Lameter's avatar
Christoph Lameter committed
512
513
514
/*
 * Object debugging
 */
Joonsoo Kim's avatar
Joonsoo Kim committed
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534

/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s,
				struct page *page, void *object)
{
	void *base;

	if (!object)
		return 1;

	base = page_address(page);
	object = restore_red_left(s, object);
	if (object < base || object >= base + page->objects * s->size ||
		(object - base) % s->size) {
		return 0;
	}

	return 1;
}

535
536
static void print_section(char *level, char *text, u8 *addr,
			  unsigned int length)
Christoph Lameter's avatar
Christoph Lameter committed
537
{
538
	metadata_access_enable();
539
	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
540
			length, 1);
541
	metadata_access_disable();
Christoph Lameter's avatar
Christoph Lameter committed
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
}

static struct track *get_track(struct kmem_cache *s, void *object,
	enum track_item alloc)
{
	struct track *p;

	if (s->offset)
		p = object + s->offset + sizeof(void *);
	else
		p = object + s->inuse;

	return p + alloc;
}

static void set_track(struct kmem_cache *s, void *object,
558
			enum track_item alloc, unsigned long addr)
Christoph Lameter's avatar
Christoph Lameter committed
559
{
Akinobu Mita's avatar
Akinobu Mita committed
560
	struct track *p = get_track(s, object, alloc);
Christoph Lameter's avatar
Christoph Lameter committed
561
562

	if (addr) {
563
564
565
566
567
568
569
570
#ifdef CONFIG_STACKTRACE
		struct stack_trace trace;
		int i;

		trace.nr_entries = 0;
		trace.max_entries = TRACK_ADDRS_COUNT;
		trace.entries = p->addrs;
		trace.skip = 3;
571
		metadata_access_enable();
572
		save_stack_trace(&trace);
573
		metadata_access_disable();
574
575
576
577
578
579
580
581
582

		/* See rant in lockdep.c */
		if (trace.nr_entries != 0 &&
		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
			trace.nr_entries--;

		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
			p->addrs[i] = 0;
#endif
Christoph Lameter's avatar
Christoph Lameter committed
583
584
		p->addr = addr;
		p->cpu = smp_processor_id();
585
		p->pid = current->pid;
Christoph Lameter's avatar
Christoph Lameter committed
586
587
588
589
590
591
592
		p->when = jiffies;
	} else
		memset(p, 0, sizeof(struct track));
}

static void init_tracking(struct kmem_cache *s, void *object)
{
593
594
595
	if (!(s->flags & SLAB_STORE_USER))
		return;

596
597
	set_track(s, object, TRACK_FREE, 0UL);
	set_track(s, object, TRACK_ALLOC, 0UL);
Christoph Lameter's avatar
Christoph Lameter committed
598
599
600
601
602
603
604
}

static void print_track(const char *s, struct track *t)
{
	if (!t->addr)
		return;

605
606
	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
	       s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
607
608
609
610
611
#ifdef CONFIG_STACKTRACE
	{
		int i;
		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
			if (t->addrs[i])
612
				pr_err("\t%pS\n", (void *)t->addrs[i]);
613
614
615
616
			else
				break;
	}
#endif
617
618
619
620
621
622
623
624
625
626
627
628
629
}

static void print_tracking(struct kmem_cache *s, void *object)
{
	if (!(s->flags & SLAB_STORE_USER))
		return;

	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
	print_track("Freed", get_track(s, object, TRACK_FREE));
}

static void print_page_info(struct page *page)
{
630
	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
631
	       page, page->objects, page->inuse, page->freelist, page->flags);
632
633
634
635
636

}

static void slab_bug(struct kmem_cache *s, char *fmt, ...)
{
637
	struct va_format vaf;
638
639
640
	va_list args;

	va_start(args, fmt);
641
642
	vaf.fmt = fmt;
	vaf.va = &args;
643
	pr_err("=============================================================================\n");
644
	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
645
	pr_err("-----------------------------------------------------------------------------\n\n");
646

647
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
648
	va_end(args);
Christoph Lameter's avatar
Christoph Lameter committed
649
650
}

651
652
static void slab_fix(struct kmem_cache *s, char *fmt, ...)
{
653
	struct va_format vaf;
654
655
656
	va_list args;

	va_start(args, fmt);
657
658
659
	vaf.fmt = fmt;
	vaf.va = &args;
	pr_err("FIX %s: %pV\n", s->name, &vaf);
660
661
662
663
	va_end(args);
}

static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
Christoph Lameter's avatar
Christoph Lameter committed
664
665
{
	unsigned int off;	/* Offset of last byte */
666
	u8 *addr = page_address(page);
667
668
669
670
671

	print_tracking(s, p);

	print_page_info(page);

672
673
	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
	       p, p - addr, get_freepointer(s, p));
674

Joonsoo Kim's avatar
Joonsoo Kim committed
675
	if (s->flags & SLAB_RED_ZONE)
676
677
		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
			      s->red_left_pad);
Joonsoo Kim's avatar
Joonsoo Kim committed
678
	else if (p > addr + 16)
679
		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
Christoph Lameter's avatar
Christoph Lameter committed
680

681
682
	print_section(KERN_ERR, "Object ", p,
		      min_t(unsigned long, s->object_size, PAGE_SIZE));
Christoph Lameter's avatar
Christoph Lameter committed
683
	if (s->flags & SLAB_RED_ZONE)
684
		print_section(KERN_ERR, "Redzone ", p + s->object_size,
685
			s->inuse - s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
686
687
688
689
690
691

	if (s->offset)
		off = s->offset + sizeof(void *);
	else
		off = s->inuse;

692
	if (s->flags & SLAB_STORE_USER)
Christoph Lameter's avatar
Christoph Lameter committed
693
694
		off += 2 * sizeof(struct track);

695
696
	off += kasan_metadata_size(s);

Joonsoo Kim's avatar
Joonsoo Kim committed
697
	if (off != size_from_object(s))
Christoph Lameter's avatar
Christoph Lameter committed
698
		/* Beginning of the filler is the free pointer */
699
700
		print_section(KERN_ERR, "Padding ", p + off,
			      size_from_object(s) - off);
701
702

	dump_stack();
Christoph Lameter's avatar
Christoph Lameter committed
703
704
}

705
void object_err(struct kmem_cache *s, struct page *page,
Christoph Lameter's avatar
Christoph Lameter committed
706
707
			u8 *object, char *reason)
{
708
	slab_bug(s, "%s", reason);
709
	print_trailer(s, page, object);
Christoph Lameter's avatar
Christoph Lameter committed
710
711
}

712
713
static void slab_err(struct kmem_cache *s, struct page *page,
			const char *fmt, ...)
Christoph Lameter's avatar
Christoph Lameter committed
714
715
716
717
{
	va_list args;
	char buf[100];

718
719
	va_start(args, fmt);
	vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter's avatar
Christoph Lameter committed
720
	va_end(args);
721
	slab_bug(s, "%s", buf);
722
	print_page_info(page);
Christoph Lameter's avatar
Christoph Lameter committed
723
724
725
	dump_stack();
}

726
static void init_object(struct kmem_cache *s, void *object, u8 val)
Christoph Lameter's avatar
Christoph Lameter committed
727
728
729
{
	u8 *p = object;

Joonsoo Kim's avatar
Joonsoo Kim committed
730
731
732
	if (s->flags & SLAB_RED_ZONE)
		memset(p - s->red_left_pad, val, s->red_left_pad);

Christoph Lameter's avatar
Christoph Lameter committed
733
	if (s->flags & __OBJECT_POISON) {
734
735
		memset(p, POISON_FREE, s->object_size - 1);
		p[s->object_size - 1] = POISON_END;
Christoph Lameter's avatar
Christoph Lameter committed
736
737
738
	}

	if (s->flags & SLAB_RED_ZONE)
739
		memset(p + s->object_size, val, s->inuse - s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
740
741
}

742
743
744
745
746
747
748
749
750
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
						void *from, void *to)
{
	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
	memset(from, data, to - from);
}

static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
			u8 *object, char *what,
751
			u8 *start, unsigned int value, unsigned int bytes)
752
753
754
755
{
	u8 *fault;
	u8 *end;

756
	metadata_access_enable();
757
	fault = memchr_inv(start, value, bytes);
758
	metadata_access_disable();
759
760
761
762
763
764
765
766
	if (!fault)
		return 1;

	end = start + bytes;
	while (end > fault && end[-1] == value)
		end--;

	slab_bug(s, "%s overwritten", what);
767
	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
768
769
770
771
772
					fault, end - 1, fault[0], value);
	print_trailer(s, page, object);

	restore_bytes(s, what, value, fault, end);
	return 0;
Christoph Lameter's avatar
Christoph Lameter committed
773
774
775
776
777
778
779
780
781
}

/*
 * Object layout:
 *
 * object address
 * 	Bytes of the object to be managed.
 * 	If the freepointer may overlay the object then the free
 * 	pointer is the first word of the object.
Christoph Lameter's avatar
Christoph Lameter committed
782
 *
Christoph Lameter's avatar
Christoph Lameter committed
783
784
785
 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
 * 	0xa5 (POISON_END)
 *
786
 * object + s->object_size
Christoph Lameter's avatar
Christoph Lameter committed
787
 * 	Padding to reach word boundary. This is also used for Redzoning.
Christoph Lameter's avatar
Christoph Lameter committed
788
 * 	Padding is extended by another word if Redzoning is enabled and
789
 * 	object_size == inuse.
Christoph Lameter's avatar
Christoph Lameter committed
790
 *
Christoph Lameter's avatar
Christoph Lameter committed
791
792
793
794
 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 * 	0xcc (RED_ACTIVE) for objects in use.
 *
 * object + s->inuse
Christoph Lameter's avatar
Christoph Lameter committed
795
796
 * 	Meta data starts here.
 *
Christoph Lameter's avatar
Christoph Lameter committed
797
798
 * 	A. Free pointer (if we cannot overwrite object on free)
 * 	B. Tracking data for SLAB_STORE_USER
Christoph Lameter's avatar
Christoph Lameter committed
799
 * 	C. Padding to reach required alignment boundary or at mininum
Christoph Lameter's avatar
Christoph Lameter committed
800
 * 		one word if debugging is on to be able to detect writes
Christoph Lameter's avatar
Christoph Lameter committed
801
802
803
 * 		before the word boundary.
 *
 *	Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter's avatar
Christoph Lameter committed
804
805
 *
 * object + s->size
Christoph Lameter's avatar
Christoph Lameter committed
806
 * 	Nothing is used beyond s->size.
Christoph Lameter's avatar
Christoph Lameter committed
807
 *
808
 * If slabcaches are merged then the object_size and inuse boundaries are mostly
Christoph Lameter's avatar
Christoph Lameter committed
809
 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter's avatar
Christoph Lameter committed
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
 * may be used with merged slabcaches.
 */

static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
{
	unsigned long off = s->inuse;	/* The end of info */

	if (s->offset)
		/* Freepointer is placed after the object. */
		off += sizeof(void *);

	if (s->flags & SLAB_STORE_USER)
		/* We also have user information there */
		off += 2 * sizeof(struct track);

825
826
	off += kasan_metadata_size(s);

Joonsoo Kim's avatar
Joonsoo Kim committed
827
	if (size_from_object(s) == off)
Christoph Lameter's avatar
Christoph Lameter committed
828
829
		return 1;

830
	return check_bytes_and_report(s, page, p, "Object padding",
Joonsoo Kim's avatar
Joonsoo Kim committed
831
			p + off, POISON_INUSE, size_from_object(s) - off);
Christoph Lameter's avatar
Christoph Lameter committed
832
833
}

834
/* Check the pad bytes at the end of a slab page */
Christoph Lameter's avatar
Christoph Lameter committed
835
836
static int slab_pad_check(struct kmem_cache *s, struct page *page)
{
837
838
839
840
841
	u8 *start;
	u8 *fault;
	u8 *end;
	int length;
	int remainder;
Christoph Lameter's avatar
Christoph Lameter committed
842
843
844
845

	if (!(s->flags & SLAB_POISON))
		return 1;

846
	start = page_address(page);
847
	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
848
849
	end = start + length;
	remainder = length % s->size;
Christoph Lameter's avatar
Christoph Lameter committed
850
851
852
	if (!remainder)
		return 1;

853
	metadata_access_enable();
854
	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
855
	metadata_access_disable();
856
857
858
859
860
861
	if (!fault)
		return 1;
	while (end > fault && end[-1] == POISON_INUSE)
		end--;

	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
862
	print_section(KERN_ERR, "Padding ", end - remainder, remainder);
863

Eric Dumazet's avatar
Eric Dumazet committed
864
	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
865
	return 0;
Christoph Lameter's avatar
Christoph Lameter committed
866
867
868
}

static int check_object(struct kmem_cache *s, struct page *page,
869
					void *object, u8 val)
Christoph Lameter's avatar
Christoph Lameter committed
870
871
{
	u8 *p = object;
872
	u8 *endobject = object + s->object_size;
Christoph Lameter's avatar
Christoph Lameter committed
873
874

	if (s->flags & SLAB_RED_ZONE) {
Joonsoo Kim's avatar
Joonsoo Kim committed
875
876
877
878
		if (!check_bytes_and_report(s, page, object, "Redzone",
			object - s->red_left_pad, val, s->red_left_pad))
			return 0;

879
		if (!check_bytes_and_report(s, page, object, "Redzone",
880
			endobject, val, s->inuse - s->object_size))
Christoph Lameter's avatar
Christoph Lameter committed
881
882
			return 0;
	} else {
883
		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
Ingo Molnar's avatar
Ingo Molnar committed
884
			check_bytes_and_report(s, page, p, "Alignment padding",
885
886
				endobject, POISON_INUSE,
				s->inuse - s->object_size);
Ingo Molnar's avatar
Ingo Molnar committed
887
		}
Christoph Lameter's avatar
Christoph Lameter committed
888
889
890
	}

	if (s->flags & SLAB_POISON) {
891
		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
892
			(!check_bytes_and_report(s, page, p, "Poison", p,
893
					POISON_FREE, s->object_size - 1) ||
894
			 !check_bytes_and_report(s, page, p, "Poison",
895
				p + s->object_size - 1, POISON_END, 1)))
Christoph Lameter's avatar
Christoph Lameter committed
896
897
898
899
900
901
902
			return 0;
		/*
		 * check_pad_bytes cleans up on its own.
		 */
		check_pad_bytes(s, page, p);
	}

903
	if (!s->offset && val == SLUB_RED_ACTIVE)
Christoph Lameter's avatar
Christoph Lameter committed
904
905
906
907
908
909
910
911
912
913
		/*
		 * Object and freepointer overlap. Cannot check
		 * freepointer while object is allocated.
		 */
		return 1;

	/* Check free pointer validity */
	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
		object_err(s, page, p, "Freepointer corrupt");
		/*
Nick Andrew's avatar
Nick Andrew committed
914
		 * No choice but to zap it and thus lose the remainder
Christoph Lameter's avatar
Christoph Lameter committed
915
		 * of the free objects in this slab. May cause
Christoph Lameter's avatar
Christoph Lameter committed
916
		 * another error because the object count is now wrong.
Christoph Lameter's avatar
Christoph Lameter committed
917
		 */
918
		set_freepointer(s, p, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
919
920
921
922
923
924
925
		return 0;
	}
	return 1;
}

static int check_slab(struct kmem_cache *s, struct page *page)
{
926
927
	int maxobj;

Christoph Lameter's avatar
Christoph Lameter committed
928
929
930
	VM_BUG_ON(!irqs_disabled());

	if (!PageSlab(page)) {
931
		slab_err(s, page, "Not a valid slab page");
Christoph Lameter's avatar
Christoph Lameter committed
932
933
		return 0;
	}
934

935
	maxobj = order_objects(compound_order(page), s->size, s->reserved);
936
937
	if (page->objects > maxobj) {
		slab_err(s, page, "objects %u > max %u",
938
			page->objects, maxobj);
939
940
941
		return 0;
	}
	if (page->inuse > page->objects) {
942
		slab_err(s, page, "inuse %u > max %u",
943
			page->inuse, page->objects);
Christoph Lameter's avatar
Christoph Lameter committed
944
945
946
947
948
949
950
951
		return 0;
	}
	/* Slab_pad_check fixes things up after itself */
	slab_pad_check(s, page);
	return 1;
}

/*
Christoph Lameter's avatar
Christoph Lameter committed
952
953
 * Determine if a certain object on a page is on the freelist. Must hold the
 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter's avatar
Christoph Lameter committed
954
955
956
957
 */
static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
{
	int nr = 0;
958
	void *fp;
Christoph Lameter's avatar
Christoph Lameter committed
959
	void *object = NULL;
960
	int max_objects;
Christoph Lameter's avatar
Christoph Lameter committed
961

962
	fp = page->freelist;
963
	while (fp && nr <= page->objects) {
Christoph Lameter's avatar
Christoph Lameter committed
964
965
966
967
968
969
		if (fp == search)
			return 1;
		if (!check_valid_pointer(s, page, fp)) {
			if (object) {
				object_err(s, page, object,
					"Freechain corrupt");
970
				set_freepointer(s, object, NULL);
Christoph Lameter's avatar
Christoph Lameter committed
971
			} else {
972
				slab_err(s, page, "Freepointer corrupt");
973
				page->freelist = NULL;
974
				page->inuse = page->objects;
975
				slab_fix(s, "Freelist cleared");
Christoph Lameter's avatar
Christoph Lameter committed
976
977
978
979
980
981
982
983
984
				return 0;
			}
			break;
		}
		object = fp;
		fp = get_freepointer(s, object);
		nr++;
	}

985
	max_objects = order_objects(compound_order(page), s->size, s->reserved);
986
987
	if (max_objects > MAX_OBJS_PER_PAGE)
		max_objects = MAX_OBJS_PER_PAGE;
988
989

	if (page->objects != max_objects) {
Joe Perches's avatar
Joe Perches committed
990
991
		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
			 page->objects, max_objects);
992
993
994
		page->objects = max_objects;
		slab_fix(s, "Number of objects adjusted.");
	}
995
	if (page->inuse != page->objects - nr) {
Joe Perches's avatar
Joe Perches committed
996
997
		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
			 page->inuse, page->objects - nr);
998
		page->inuse = page->objects - nr;
999
		slab_fix(s, "Object count adjusted.");
Christoph Lameter's avatar
Christoph Lameter committed
1000
1001
1002
1003
	}
	return search == NULL;
}

1004
1005
static void trace(struct kmem_cache *s, struct page *page, void *object,
								int alloc)
Christoph Lameter's avatar
Christoph Lameter committed
1006
1007
{
	if (s->flags & SLAB_TRACE) {
1008
		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
Christoph Lameter's avatar
Christoph Lameter committed
1009
1010
1011
1012
1013
1014
			s->name,
			alloc ? "alloc" : "free",
			object, page->inuse,
			page->freelist);

		if (!alloc)
1015
			print_section(KERN_INFO, "Object ", (void *)object,
1016
					s->object_size);
Christoph Lameter's avatar
Christoph Lameter committed
1017
1018
1019
1020
1021

		dump_stack();
	}
}

1022
/*
Christoph Lameter's avatar
Christoph Lameter committed
1023
 * Tracking of fully allocated slabs for debugging purposes.
1024
 */
1025
1026
static void add_full(struct kmem_cache *s,
	struct kmem_cache_node *n, struct page *page)
1027
{
1028
1029
1030
	if (!(s->flags & SLAB_STORE_USER))
		return;

1031
	lockdep_assert_held(&n->list_lock);
1032
1033
1034
	list_add(&page->lru, &n->full);
}

Peter Zijlstra's avatar
Peter Zijlstra committed
1035
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1036
1037
1038
1039
{
	if (!(s->flags & SLAB_STORE_USER))
		return;

1040
	lockdep_assert_held(&n->list_lock);
1041
1042
1043
	list_del(&page->lru);
}

1044
1045
1046
1047
1048
1049
1050
1051
/* Tracking of the number of slabs for debugging purposes */
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{
	struct kmem_cache_node *n = get_node(s, node);

	return atomic_long_read(&n->nr_slabs);
}

1052
1053
1054
1055
1056
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{
	return atomic_long_read(&n->nr_slabs);
}

1057
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1058
1059
1060
1061
1062
1063
1064
1065
1066
{
	struct kmem_cache_node *n = get_node(s, node);

	/*
	 * May be called early in order to allocate a slab for the
	 * kmem_cache_node structure. Solve the chicken-egg
	 * dilemma by deferring the increment of the count during
	 * bootstrap (see early_kmem_cache_node_alloc).
	 */
1067
	if (likely(n)) {
1068
		atomic_long_inc(&n->nr_slabs);
1069
1070
		atomic_long_add(objects, &n->total_objects);
	}
1071
}
1072
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1073
1074
1075
1076
{
	struct kmem_cache_node *n = get_node(s, node);

	atomic_long_dec(&n->nr_slabs);
1077
	atomic_long_sub(objects, &n->total_objects);
1078
1079
1080
}

/* Object debug checks for alloc/free paths */
Christoph Lameter's avatar