test_kasan.c 25.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
Andrey Ryabinin's avatar
Andrey Ryabinin committed
2
3
4
5
6
7
/*
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
 */

Marco Elver's avatar
Marco Elver committed
8
#include <linux/bitops.h>
Greg Thelen's avatar
Greg Thelen committed
9
#include <linux/delay.h>
Marco Elver's avatar
Marco Elver committed
10
#include <linux/kasan.h>
Andrey Ryabinin's avatar
Andrey Ryabinin committed
11
#include <linux/kernel.h>
12
#include <linux/mm.h>
Marco Elver's avatar
Marco Elver committed
13
14
#include <linux/mman.h>
#include <linux/module.h>
Andrey Ryabinin's avatar
Andrey Ryabinin committed
15
#include <linux/printk.h>
16
#include <linux/random.h>
Andrey Ryabinin's avatar
Andrey Ryabinin committed
17
18
#include <linux/slab.h>
#include <linux/string.h>
19
#include <linux/uaccess.h>
20
#include <linux/io.h>
Daniel Axtens's avatar
Daniel Axtens committed
21
#include <linux/vmalloc.h>
22
23

#include <asm/page.h>
Andrey Ryabinin's avatar
Andrey Ryabinin committed
24

Patricia Alfonso's avatar
Patricia Alfonso committed
25
26
#include <kunit/test.h>

27
28
#include "../mm/kasan/kasan.h"

29
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
30

31
/*
32
33
 * Some tests use these global variables to store return values from function
 * calls that could otherwise be eliminated by the compiler as dead code.
34
35
 */
void *kasan_ptr_result;
Patricia Alfonso's avatar
Patricia Alfonso committed
36
37
38
39
40
41
int kasan_int_result;

static struct kunit_resource resource;
static struct kunit_kasan_expectation fail_data;
static bool multishot;

42
43
/*
 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
44
45
46
 * first detected bug and panic the kernel if panic_on_warn is enabled. For
 * hardware tag-based KASAN also allow tag checking to be reenabled for each
 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
47
 */
Patricia Alfonso's avatar
Patricia Alfonso committed
48
49
static int kasan_test_init(struct kunit *test)
{
50
51
52
53
	if (!kasan_enabled()) {
		kunit_err(test, "can't run KASAN tests with KASAN disabled");
		return -1;
	}
54
55
56
57
	if (kasan_flag_async) {
		kunit_err(test, "can't run KASAN tests in async mode");
		return -1;
	}
58

Patricia Alfonso's avatar
Patricia Alfonso committed
59
	multishot = kasan_save_enable_multi_shot();
60
	hw_set_tagging_report_once(false);
Patricia Alfonso's avatar
Patricia Alfonso committed
61
62
63
64
65
	return 0;
}

static void kasan_test_exit(struct kunit *test)
{
66
	hw_set_tagging_report_once(true);
Patricia Alfonso's avatar
Patricia Alfonso committed
67
68
69
70
	kasan_restore_multi_shot(multishot);
}

/**
71
72
73
74
 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
 * KASAN report; causes a test failure otherwise. This relies on a KUnit
 * resource named "kasan_data". Do not use this name for KUnit resources
 * outside of KASAN tests.
75
76
77
78
79
 *
 * For hardware tag-based KASAN, when a tag fault happens, tag checking is
 * normally auto-disabled. When this happens, this test handler reenables
 * tag checking. As tag checking can be only disabled or enabled per CPU, this
 * handler disables migration (preemption).
80
81
82
83
84
 *
 * Since the compiler doesn't see that the expression can change the fail_data
 * fields, it can reorder or optimize away the accesses to those fields.
 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
 * expression to prevent that.
Patricia Alfonso's avatar
Patricia Alfonso committed
85
 */
86
87
88
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {		\
	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS))			\
		migrate_disable();				\
89
90
	WRITE_ONCE(fail_data.report_expected, true);		\
	WRITE_ONCE(fail_data.report_found, false);		\
91
92
93
94
95
	kunit_add_named_resource(test,				\
				NULL,				\
				NULL,				\
				&resource,			\
				"kasan_data", &fail_data);	\
96
	barrier();						\
97
	expression;						\
98
	barrier();						\
99
	KUNIT_EXPECT_EQ(test,					\
100
101
			READ_ONCE(fail_data.report_expected),	\
			READ_ONCE(fail_data.report_found));	\
102
	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) {			\
103
		if (READ_ONCE(fail_data.report_found))		\
104
			hw_enable_tagging_sync();		\
105
106
		migrate_enable();				\
	}							\
Patricia Alfonso's avatar
Patricia Alfonso committed
107
108
} while (0)

109
110
111
112
113
114
115
116
117
118
119
120
121
122
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do {			\
	if (!IS_ENABLED(config)) {					\
		kunit_info((test), "skipping, " #config " required");	\
		return;							\
	}								\
} while (0)

#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do {			\
	if (IS_ENABLED(config)) {					\
		kunit_info((test), "skipping, " #config " enabled");	\
		return;							\
	}								\
} while (0)

123
static void kmalloc_oob_right(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
124
125
126
127
128
{
	char *ptr;
	size_t size = 123;

	ptr = kmalloc(size, GFP_KERNEL);
129
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
130

131
	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
Andrey Ryabinin's avatar
Andrey Ryabinin committed
132
133
134
	kfree(ptr);
}

135
static void kmalloc_oob_left(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
136
137
138
139
140
{
	char *ptr;
	size_t size = 15;

	ptr = kmalloc(size, GFP_KERNEL);
141
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
142

143
	KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
Andrey Ryabinin's avatar
Andrey Ryabinin committed
144
145
146
	kfree(ptr);
}

147
static void kmalloc_node_oob_right(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
148
149
150
151
152
{
	char *ptr;
	size_t size = 4096;

	ptr = kmalloc_node(size, GFP_KERNEL, 0);
153
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
154

155
	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
156
157
158
	kfree(ptr);
}

159
160
161
162
163
164
/*
 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
 * fit into a slab cache and therefore is allocated via the page allocator
 * fallback. Since this kind of fallback is only implemented for SLUB, these
 * tests are limited to that allocator.
 */
165
static void kmalloc_pagealloc_oob_right(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
166
167
168
169
{
	char *ptr;
	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

170
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
171

172
	ptr = kmalloc(size, GFP_KERNEL);
173
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
174

175
	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
176

177
178
	kfree(ptr);
}
179

180
static void kmalloc_pagealloc_uaf(struct kunit *test)
181
182
183
184
{
	char *ptr;
	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

185
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
186

187
188
	ptr = kmalloc(size, GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
189
	kfree(ptr);
190

191
	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
192
193
}

194
static void kmalloc_pagealloc_invalid_free(struct kunit *test)
195
196
197
198
{
	char *ptr;
	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

199
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
200

201
202
203
204
	ptr = kmalloc(size, GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

	KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
205
}
206

207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
static void pagealloc_oob_right(struct kunit *test)
{
	char *ptr;
	struct page *pages;
	size_t order = 4;
	size_t size = (1UL << (PAGE_SHIFT + order));

	/*
	 * With generic KASAN page allocations have no redzones, thus
	 * out-of-bounds detection is not guaranteed.
	 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
	 */
	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);

	pages = alloc_pages(GFP_KERNEL, order);
	ptr = page_address(pages);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
	free_pages((unsigned long)ptr, order);
}

static void pagealloc_uaf(struct kunit *test)
{
	char *ptr;
	struct page *pages;
	size_t order = 4;

	pages = alloc_pages(GFP_KERNEL, order);
	ptr = page_address(pages);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
	free_pages((unsigned long)ptr, order);

	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
}

243
static void kmalloc_large_oob_right(struct kunit *test)
244
245
246
{
	char *ptr;
	size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
247
248
249

	/*
	 * Allocate a chunk that is large enough, but still fits into a slab
250
251
	 * and does not trigger the page allocator fallback in SLUB.
	 */
Andrey Ryabinin's avatar
Andrey Ryabinin committed
252
	ptr = kmalloc(size, GFP_KERNEL);
253
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
254

255
	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
256
257
258
	kfree(ptr);
}

259
static void kmalloc_oob_krealloc_more(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
260
261
262
263
264
265
{
	char *ptr1, *ptr2;
	size_t size1 = 17;
	size_t size2 = 19;

	ptr1 = kmalloc(size1, GFP_KERNEL);
266
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
267

268
269
	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
270

271
	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
Andrey Ryabinin's avatar
Andrey Ryabinin committed
272
273
274
	kfree(ptr2);
}

275
static void kmalloc_oob_krealloc_less(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
276
277
278
279
280
281
{
	char *ptr1, *ptr2;
	size_t size1 = 17;
	size_t size2 = 15;

	ptr1 = kmalloc(size1, GFP_KERNEL);
282
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
283

284
285
	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
286

287
	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
Andrey Ryabinin's avatar
Andrey Ryabinin committed
288
289
290
	kfree(ptr2);
}

291
static void kmalloc_oob_16(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
292
293
294
295
296
{
	struct {
		u64 words[2];
	} *ptr1, *ptr2;

297
	/* This test is specifically crafted for the generic mode. */
298
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
299

Andrey Ryabinin's avatar
Andrey Ryabinin committed
300
	ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
301
302
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);

Andrey Ryabinin's avatar
Andrey Ryabinin committed
303
	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
304
305
306
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);

	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
307
308
309
310
	kfree(ptr1);
	kfree(ptr2);
}

311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
static void kmalloc_uaf_16(struct kunit *test)
{
	struct {
		u64 words[2];
	} *ptr1, *ptr2;

	ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);

	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
	kfree(ptr2);

	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
	kfree(ptr1);
}

328
static void kmalloc_oob_memset_2(struct kunit *test)
329
330
331
332
333
{
	char *ptr;
	size_t size = 8;

	ptr = kmalloc(size, GFP_KERNEL);
334
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
335

336
	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
337
338
339
	kfree(ptr);
}

340
static void kmalloc_oob_memset_4(struct kunit *test)
341
342
343
344
345
{
	char *ptr;
	size_t size = 8;

	ptr = kmalloc(size, GFP_KERNEL);
346
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
347

348
	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
349
350
351
352
	kfree(ptr);
}


353
static void kmalloc_oob_memset_8(struct kunit *test)
354
355
356
357
358
{
	char *ptr;
	size_t size = 8;

	ptr = kmalloc(size, GFP_KERNEL);
359
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
360

361
	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
362
363
364
	kfree(ptr);
}

365
static void kmalloc_oob_memset_16(struct kunit *test)
366
367
368
369
370
{
	char *ptr;
	size_t size = 16;

	ptr = kmalloc(size, GFP_KERNEL);
371
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
372

373
	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
374
375
376
	kfree(ptr);
}

377
static void kmalloc_oob_in_memset(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
378
379
380
381
382
{
	char *ptr;
	size_t size = 666;

	ptr = kmalloc(size, GFP_KERNEL);
383
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
384

385
	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
Andrey Ryabinin's avatar
Andrey Ryabinin committed
386
387
388
	kfree(ptr);
}

389
static void kmalloc_memmove_invalid_size(struct kunit *test)
390
391
392
393
394
395
{
	char *ptr;
	size_t size = 64;
	volatile size_t invalid_size = -2;

	ptr = kmalloc(size, GFP_KERNEL);
396
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
397
398

	memset((char *)ptr, 0, 64);
399
400
401

	KUNIT_EXPECT_KASAN_FAIL(test,
		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
402
403
404
	kfree(ptr);
}

405
static void kmalloc_uaf(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
406
407
408
409
410
{
	char *ptr;
	size_t size = 10;

	ptr = kmalloc(size, GFP_KERNEL);
411
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
412
413

	kfree(ptr);
414
	KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
Andrey Ryabinin's avatar
Andrey Ryabinin committed
415
416
}

417
static void kmalloc_uaf_memset(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
418
419
420
421
422
{
	char *ptr;
	size_t size = 33;

	ptr = kmalloc(size, GFP_KERNEL);
423
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
424
425

	kfree(ptr);
426
	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
Andrey Ryabinin's avatar
Andrey Ryabinin committed
427
428
}

429
static void kmalloc_uaf2(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
430
431
432
{
	char *ptr1, *ptr2;
	size_t size = 43;
433
	int counter = 0;
Andrey Ryabinin's avatar
Andrey Ryabinin committed
434

435
again:
Andrey Ryabinin's avatar
Andrey Ryabinin committed
436
	ptr1 = kmalloc(size, GFP_KERNEL);
437
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
438
439

	kfree(ptr1);
440

Andrey Ryabinin's avatar
Andrey Ryabinin committed
441
	ptr2 = kmalloc(size, GFP_KERNEL);
442
443
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);

444
445
446
447
448
449
450
451
452
	/*
	 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
	 * Allow up to 16 attempts at generating different tags.
	 */
	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
		kfree(ptr2);
		goto again;
	}

453
454
	KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
	KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
455
456
457
458

	kfree(ptr2);
}

459
static void kfree_via_page(struct kunit *test)
460
461
462
463
464
465
466
{
	char *ptr;
	size_t size = 8;
	struct page *page;
	unsigned long offset;

	ptr = kmalloc(size, GFP_KERNEL);
467
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
468
469
470
471
472
473

	page = virt_to_page(ptr);
	offset = offset_in_page(ptr);
	kfree(page_address(page) + offset);
}

474
static void kfree_via_phys(struct kunit *test)
475
476
477
478
479
480
{
	char *ptr;
	size_t size = 8;
	phys_addr_t phys;

	ptr = kmalloc(size, GFP_KERNEL);
481
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
482
483
484
485
486

	phys = virt_to_phys(ptr);
	kfree(phys_to_virt(phys));
}

487
static void kmem_cache_oob(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
488
489
490
{
	char *p;
	size_t size = 200;
491
492
493
	struct kmem_cache *cache;

	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
494
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
495

Andrey Ryabinin's avatar
Andrey Ryabinin committed
496
497
	p = kmem_cache_alloc(cache, GFP_KERNEL);
	if (!p) {
498
		kunit_err(test, "Allocation failed: %s\n", __func__);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
499
500
501
502
		kmem_cache_destroy(cache);
		return;
	}

503
	KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
504

Andrey Ryabinin's avatar
Andrey Ryabinin committed
505
506
507
508
	kmem_cache_free(cache, p);
	kmem_cache_destroy(cache);
}

509
static void kmem_cache_accounted(struct kunit *test)
Greg Thelen's avatar
Greg Thelen committed
510
511
512
513
514
515
516
{
	int i;
	char *p;
	size_t size = 200;
	struct kmem_cache *cache;

	cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
517
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
Greg Thelen's avatar
Greg Thelen committed
518
519
520
521
522
523
524

	/*
	 * Several allocations with a delay to allow for lazy per memcg kmem
	 * cache creation.
	 */
	for (i = 0; i < 5; i++) {
		p = kmem_cache_alloc(cache, GFP_KERNEL);
525
		if (!p)
Greg Thelen's avatar
Greg Thelen committed
526
			goto free_cache;
527

Greg Thelen's avatar
Greg Thelen committed
528
529
530
531
532
533
534
535
		kmem_cache_free(cache, p);
		msleep(100);
	}

free_cache:
	kmem_cache_destroy(cache);
}

536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
static void kmem_cache_bulk(struct kunit *test)
{
	struct kmem_cache *cache;
	size_t size = 200;
	char *p[10];
	bool ret;
	int i;

	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);

	ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
	if (!ret) {
		kunit_err(test, "Allocation failed: %s\n", __func__);
		kmem_cache_destroy(cache);
		return;
	}

	for (i = 0; i < ARRAY_SIZE(p); i++)
		p[i][0] = p[i][size - 1] = 42;

	kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
	kmem_cache_destroy(cache);
}

Andrey Ryabinin's avatar
Andrey Ryabinin committed
561
562
static char global_array[10];

563
static void kasan_global_oob(struct kunit *test)
Andrey Ryabinin's avatar
Andrey Ryabinin committed
564
565
566
567
{
	volatile int i = 3;
	char *p = &global_array[ARRAY_SIZE(global_array) + i];

568
	/* Only generic mode instruments globals. */
569
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
570

571
	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Andrey Ryabinin's avatar
Andrey Ryabinin committed
572
573
}

574
/* Check that ksize() makes the whole object accessible. */
575
static void ksize_unpoisons_memory(struct kunit *test)
576
577
{
	char *ptr;
578
	size_t size = 123, real_size;
579
580

	ptr = kmalloc(size, GFP_KERNEL);
581
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
582
	real_size = ksize(ptr);
583
584

	/* This access shouldn't trigger a KASAN report. */
585
	ptr[size] = 'x';
586
587

	/* This one must. */
588
	KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
589

590
591
592
	kfree(ptr);
}

593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
/*
 * Check that a use-after-free is detected by ksize() and via normal accesses
 * after it.
 */
static void ksize_uaf(struct kunit *test)
{
	char *ptr;
	int size = 128 - KASAN_GRANULE_SIZE;

	ptr = kmalloc(size, GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
	kfree(ptr);

	KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
}

611
static void kasan_stack_oob(struct kunit *test)
612
{
613
614
615
	char stack_array[10];
	volatile int i = OOB_TAG_OFF;
	char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
616

617
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
618

619
	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
620
621
}

622
static void kasan_alloca_oob_left(struct kunit *test)
623
624
625
626
627
{
	volatile int i = 10;
	char alloca_array[i];
	char *p = alloca_array - 1;

628
	/* Only generic mode instruments dynamic allocas. */
629
630
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
631
632

	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
633
634
}

635
static void kasan_alloca_oob_right(struct kunit *test)
636
637
638
639
640
{
	volatile int i = 10;
	char alloca_array[i];
	char *p = alloca_array + i;

641
	/* Only generic mode instruments dynamic allocas. */
642
643
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
644
645

	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
646
647
}

648
static void kmem_cache_double_free(struct kunit *test)
Dmitry Vyukov's avatar
Dmitry Vyukov committed
649
650
651
652
653
654
{
	char *p;
	size_t size = 200;
	struct kmem_cache *cache;

	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
655
656
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);

Dmitry Vyukov's avatar
Dmitry Vyukov committed
657
658
	p = kmem_cache_alloc(cache, GFP_KERNEL);
	if (!p) {
659
		kunit_err(test, "Allocation failed: %s\n", __func__);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
660
661
662
663
664
		kmem_cache_destroy(cache);
		return;
	}

	kmem_cache_free(cache, p);
665
	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
Dmitry Vyukov's avatar
Dmitry Vyukov committed
666
667
668
	kmem_cache_destroy(cache);
}

669
static void kmem_cache_invalid_free(struct kunit *test)
Dmitry Vyukov's avatar
Dmitry Vyukov committed
670
671
672
673
674
675
676
{
	char *p;
	size_t size = 200;
	struct kmem_cache *cache;

	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
				  NULL);
677
678
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);

Dmitry Vyukov's avatar
Dmitry Vyukov committed
679
680
	p = kmem_cache_alloc(cache, GFP_KERNEL);
	if (!p) {
681
		kunit_err(test, "Allocation failed: %s\n", __func__);
Dmitry Vyukov's avatar
Dmitry Vyukov committed
682
683
684
685
		kmem_cache_destroy(cache);
		return;
	}

686
	/* Trigger invalid free, the object doesn't get freed. */
687
	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
688
689
690
691
692
693
694

	/*
	 * Properly free the object to prevent the "Objects remaining in
	 * test_cache on __kmem_cache_shutdown" BUG failure.
	 */
	kmem_cache_free(cache, p);

Dmitry Vyukov's avatar
Dmitry Vyukov committed
695
696
697
	kmem_cache_destroy(cache);
}

698
static void kasan_memchr(struct kunit *test)
699
700
701
702
{
	char *ptr;
	size_t size = 24;

703
704
705
706
	/*
	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
	 */
707
	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
708

709
710
711
	if (OOB_TAG_OFF)
		size = round_up(size, OOB_TAG_OFF);

712
713
714
715
716
	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

	KUNIT_EXPECT_KASAN_FAIL(test,
		kasan_ptr_result = memchr(ptr, '1', size + 1));
717
718
719
720

	kfree(ptr);
}

721
static void kasan_memcmp(struct kunit *test)
722
723
724
725
726
{
	char *ptr;
	size_t size = 24;
	int arr[9];

727
728
729
730
	/*
	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
	 */
731
	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
732

733
734
735
	if (OOB_TAG_OFF)
		size = round_up(size, OOB_TAG_OFF);

736
737
	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
738
	memset(arr, 0, sizeof(arr));
739
740
741

	KUNIT_EXPECT_KASAN_FAIL(test,
		kasan_int_result = memcmp(ptr, arr, size+1));
742
743
744
	kfree(ptr);
}

745
static void kasan_strings(struct kunit *test)
746
747
748
749
{
	char *ptr;
	size_t size = 24;

750
751
752
753
	/*
	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
	 */
754
	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
755
756
757

	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
758
759
760
761
762
763
764
765
766
767

	kfree(ptr);

	/*
	 * Try to cause only 1 invalid access (less spam in dmesg).
	 * For that we need ptr to point to zeroed byte.
	 * Skip metadata that could be stored in freed object so ptr
	 * will likely point to zeroed byte.
	 */
	ptr += 16;
768
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
769

770
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
771

772
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
773

774
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
775

776
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
777

778
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
779
780
}

781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
{
	KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
}

static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
{
	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));

#if defined(clear_bit_unlock_is_negative_byte)
	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
				clear_bit_unlock_is_negative_byte(nr, addr));
#endif
}

static void kasan_bitops_generic(struct kunit *test)
Marco Elver's avatar
Marco Elver committed
811
{
812
813
814
	long *bits;

	/* This test is specifically crafted for the generic mode. */
815
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
816

Marco Elver's avatar
Marco Elver committed
817
	/*
818
	 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
Marco Elver's avatar
Marco Elver committed
819
820
	 * this way we do not actually corrupt other memory.
	 */
821
	bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
822
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
Marco Elver's avatar
Marco Elver committed
823
824
825
826
827
828

	/*
	 * Below calls try to access bit within allocated memory; however, the
	 * below accesses are still out-of-bounds, since bitops are defined to
	 * operate on the whole long the bit is in.
	 */
829
	kasan_bitops_modify(test, BITS_PER_LONG, bits);
Marco Elver's avatar
Marco Elver committed
830
831
832
833

	/*
	 * Below calls try to access bit beyond allocated memory.
	 */
834
	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
Marco Elver's avatar
Marco Elver committed
835

836
837
	kfree(bits);
}
Marco Elver's avatar
Marco Elver committed
838

839
840
841
static void kasan_bitops_tags(struct kunit *test)
{
	long *bits;
Marco Elver's avatar
Marco Elver committed
842

843
844
	/* This test is specifically crafted for tag-based modes. */
	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
Marco Elver's avatar
Marco Elver committed
845

846
847
	/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
	bits = kzalloc(48, GFP_KERNEL);
848
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
Marco Elver's avatar
Marco Elver committed
849

850
851
852
	/* Do the accesses past the 48 allocated bytes, but within the redone. */
	kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
Marco Elver's avatar
Marco Elver committed
853
854
855
856

	kfree(bits);
}

857
static void kmalloc_double_kzfree(struct kunit *test)
858
859
860
861
862
{
	char *ptr;
	size_t size = 16;

	ptr = kmalloc(size, GFP_KERNEL);
863
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
864

865
	kfree_sensitive(ptr);
866
	KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
867
868
}

869
static void vmalloc_oob(struct kunit *test)
Daniel Axtens's avatar
Daniel Axtens committed
870
871
872
{
	void *area;

873
	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
Daniel Axtens's avatar
Daniel Axtens committed
874
875
876
877
878
879

	/*
	 * We have to be careful not to hit the guard page.
	 * The MMU will catch that and crash us.
	 */
	area = vmalloc(3000);
880
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
Daniel Axtens's avatar
Daniel Axtens committed
881

882
	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
Daniel Axtens's avatar
Daniel Axtens committed
883
884
	vfree(area);
}
885

886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
/*
 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
 * modes.
 */
static void match_all_not_assigned(struct kunit *test)
{
	char *ptr;
	struct page *pages;
	int i, size, order;

	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);

	for (i = 0; i < 256; i++) {
		size = (get_random_int() % 1024) + 1;
		ptr = kmalloc(size, GFP_KERNEL);
		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
		kfree(ptr);
	}

	for (i = 0; i < 256; i++) {
		order = (get_random_int() % 4) + 1;
		pages = alloc_pages(GFP_KERNEL, order);
		ptr = page_address(pages);
		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
		free_pages((unsigned long)ptr, order);
	}
}

/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
static void match_all_ptr_tag(struct kunit *test)
{
	char *ptr;
	u8 tag;

	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);

	ptr = kmalloc(128, GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

	/* Backup the assigned tag. */
	tag = get_tag(ptr);
	KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);

	/* Reset the tag to 0xff.*/
	ptr = set_tag(ptr, KASAN_TAG_KERNEL);

	/* This access shouldn't trigger a KASAN report. */
	*ptr = 0;

	/* Recover the pointer tag and free. */
	ptr = set_tag(ptr, tag);
	kfree(ptr);
}

/* Check that there are no match-all memory tags for tag-based modes. */
static void match_all_mem_tag(struct kunit *test)
{
	char *ptr;
	int tag;

	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);

	ptr = kmalloc(128, GFP_KERNEL);
	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
	KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);

	/* For each possible tag value not matching the pointer tag. */
	for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
		if (tag == get_tag(ptr))
			continue;

		/* Mark the first memory granule with the chosen memory tag. */
		kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag);

		/* This access must cause a KASAN report. */
		KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
	}

	/* Recover the memory tag and free. */
	kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr));
	kfree(ptr);
}

974
975
976
977
978
979
980
static struct kunit_case kasan_kunit_test_cases[] = {
	KUNIT_CASE(kmalloc_oob_right),
	KUNIT_CASE(kmalloc_oob_left),
	KUNIT_CASE(kmalloc_node_oob_right),
	KUNIT_CASE(kmalloc_pagealloc_oob_right),
	KUNIT_CASE(kmalloc_pagealloc_uaf),
	KUNIT_CASE(kmalloc_pagealloc_invalid_free),
981
982
	KUNIT_CASE(pagealloc_oob_right),
	KUNIT_CASE(pagealloc_uaf),
983
984
985
986
	KUNIT_CASE(kmalloc_large_oob_right),
	KUNIT_CASE(kmalloc_oob_krealloc_more),
	KUNIT_CASE(kmalloc_oob_krealloc_less),
	KUNIT_CASE(kmalloc_oob_16),
987
	KUNIT_CASE(kmalloc_uaf_16),
988
989
990
991
992
993
994
995
996
997
998
999
	KUNIT_CASE(kmalloc_oob_in_memset),
	KUNIT_CASE(kmalloc_oob_memset_2),
	KUNIT_CASE(kmalloc_oob_memset_4),
	KUNIT_CASE(kmalloc_oob_memset_8),
	KUNIT_CASE(kmalloc_oob_memset_16),
	KUNIT_CASE(kmalloc_memmove_invalid_size),
	KUNIT_CASE(kmalloc_uaf),
	KUNIT_CASE(kmalloc_uaf_memset),
	KUNIT_CASE(kmalloc_uaf2),
	KUNIT_CASE(kfree_via_page),
	KUNIT_CASE(kfree_via_phys),
	KUNIT_CASE(kmem_cache_oob),
1000
	KUNIT_CASE(kmem_cache_accounted),