kasan.c 21.5 KB
Newer Older
1
2
3
4
/*
 * This file contains shadow memory manipulation code.
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6
 *
7
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8
9
10
11
12
13
14
15
16
17
18
19
 *        Andrey Konovalov <adech.fo@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DISABLE_BRANCH_PROFILING

#include <linux/export.h>
20
#include <linux/interrupt.h>
21
#include <linux/init.h>
22
#include <linux/kasan.h>
23
#include <linux/kernel.h>
24
#include <linux/kmemleak.h>
25
#include <linux/linkage.h>
26
#include <linux/memblock.h>
27
#include <linux/memory.h>
28
#include <linux/mm.h>
29
#include <linux/module.h>
30
31
#include <linux/printk.h>
#include <linux/sched.h>
32
#include <linux/sched/task_stack.h>
33
34
35
36
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
37
#include <linux/vmalloc.h>
38
#include <linux/bug.h>
39
40

#include "kasan.h"
41
#include "../slab.h"
42

43
44
45
46
47
48
49
50
51
52
void kasan_enable_current(void)
{
	current->kasan_depth++;
}

void kasan_disable_current(void)
{
	current->kasan_depth--;
}

53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
/*
 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
 */
static void kasan_poison_shadow(const void *address, size_t size, u8 value)
{
	void *shadow_start, *shadow_end;

	shadow_start = kasan_mem_to_shadow(address);
	shadow_end = kasan_mem_to_shadow(address + size);

	memset(shadow_start, value, shadow_end - shadow_start);
}

void kasan_unpoison_shadow(const void *address, size_t size)
{
	kasan_poison_shadow(address, size, 0);

	if (size & KASAN_SHADOW_MASK) {
		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
		*shadow = size & KASAN_SHADOW_MASK;
	}
}

77
static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
78
79
80
81
82
83
84
85
86
87
88
89
90
91
{
	void *base = task_stack_page(task);
	size_t size = sp - base;

	kasan_unpoison_shadow(base, size);
}

/* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack(struct task_struct *task)
{
	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
}

/* Unpoison the stack for the current task beyond a watermark sp value. */
92
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
93
{
94
95
96
97
98
99
100
101
	/*
	 * Calculate the task stack base address.  Avoid using 'current'
	 * because this function is called by early resume code which hasn't
	 * yet set up the percpu register (%gs).
	 */
	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));

	kasan_unpoison_shadow(base, watermark - base);
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
}

/*
 * Clear all poison for the region between the current SP and a provided
 * watermark value, as is sometimes required prior to hand-crafted asm function
 * returns in the middle of functions.
 */
void kasan_unpoison_stack_above_sp_to(const void *watermark)
{
	const void *sp = __builtin_frame_address(0);
	size_t size = watermark - sp;

	if (WARN_ON(sp > watermark))
		return;
	kasan_unpoison_shadow(sp, size);
117
}
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136

/*
 * All functions below always inlined so compiler could
 * perform better optimizations in each of __asan_loadX/__assn_storeX
 * depending on memory access size X.
 */

static __always_inline bool memory_is_poisoned_1(unsigned long addr)
{
	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);

	if (unlikely(shadow_value)) {
		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
		return unlikely(last_accessible_byte >= shadow_value);
	}

	return false;
}

137
138
static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
						unsigned long size)
139
{
140
	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
141

142
143
144
145
146
147
	/*
	 * Access crosses 8(shadow size)-byte boundary. Such access maps
	 * into 2 shadow bytes, so we need to check them both.
	 */
	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
148

149
	return memory_is_poisoned_1(addr + size - 1);
150
151
152
153
}

static __always_inline bool memory_is_poisoned_16(unsigned long addr)
{
154
	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
155

156
157
158
	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
		return *shadow_addr || memory_is_poisoned_1(addr + 15);
159

160
	return *shadow_addr;
161
162
}

163
static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
164
165
166
167
168
169
170
171
172
173
174
175
					size_t size)
{
	while (size) {
		if (unlikely(*start))
			return (unsigned long)start;
		start++;
		size--;
	}

	return 0;
}

176
static __always_inline unsigned long memory_is_nonzero(const void *start,
177
178
179
180
181
182
183
						const void *end)
{
	unsigned int words;
	unsigned long ret;
	unsigned int prefix = (unsigned long)start % 8;

	if (end - start <= 16)
184
		return bytes_is_nonzero(start, end - start);
185
186
187

	if (prefix) {
		prefix = 8 - prefix;
188
		ret = bytes_is_nonzero(start, prefix);
189
190
191
192
193
194
195
196
		if (unlikely(ret))
			return ret;
		start += prefix;
	}

	words = (end - start) / 8;
	while (words) {
		if (unlikely(*(u64 *)start))
197
			return bytes_is_nonzero(start, 8);
198
199
200
201
		start += 8;
		words--;
	}

202
	return bytes_is_nonzero(start, (end - start) % 8);
203
204
205
206
207
208
209
}

static __always_inline bool memory_is_poisoned_n(unsigned long addr,
						size_t size)
{
	unsigned long ret;

210
	ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
211
212
213
214
215
216
217
			kasan_mem_to_shadow((void *)addr + size - 1) + 1);

	if (unlikely(ret)) {
		unsigned long last_byte = addr + size - 1;
		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);

		if (unlikely(ret != (unsigned long)last_shadow ||
Wang Long's avatar
Wang Long committed
218
			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
219
220
221
222
223
224
225
226
227
228
229
230
231
232
			return true;
	}
	return false;
}

static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
{
	if (__builtin_constant_p(size)) {
		switch (size) {
		case 1:
			return memory_is_poisoned_1(addr);
		case 2:
		case 4:
		case 8:
233
			return memory_is_poisoned_2_4_8(addr, size);
234
235
236
237
238
239
240
241
242
243
		case 16:
			return memory_is_poisoned_16(addr);
		default:
			BUILD_BUG();
		}
	}

	return memory_is_poisoned_n(addr, size);
}

244
245
246
static __always_inline void check_memory_region_inline(unsigned long addr,
						size_t size, bool write,
						unsigned long ret_ip)
247
248
249
250
251
252
{
	if (unlikely(size == 0))
		return;

	if (unlikely((void *)addr <
		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
253
		kasan_report(addr, size, write, ret_ip);
254
255
256
257
258
259
		return;
	}

	if (likely(!memory_is_poisoned(addr, size)))
		return;

260
	kasan_report(addr, size, write, ret_ip);
261
262
}

263
264
265
266
267
268
static void check_memory_region(unsigned long addr,
				size_t size, bool write,
				unsigned long ret_ip)
{
	check_memory_region_inline(addr, size, write, ret_ip);
}
269

270
void kasan_check_read(const volatile void *p, unsigned int size)
271
272
273
274
275
{
	check_memory_region((unsigned long)p, size, false, _RET_IP_);
}
EXPORT_SYMBOL(kasan_check_read);

276
void kasan_check_write(const volatile void *p, unsigned int size)
277
278
279
280
281
{
	check_memory_region((unsigned long)p, size, true, _RET_IP_);
}
EXPORT_SYMBOL(kasan_check_write);

282
283
284
#undef memset
void *memset(void *addr, int c, size_t len)
{
285
	check_memory_region((unsigned long)addr, len, true, _RET_IP_);
286
287
288
289
290
291
292

	return __memset(addr, c, len);
}

#undef memmove
void *memmove(void *dest, const void *src, size_t len)
{
293
294
	check_memory_region((unsigned long)src, len, false, _RET_IP_);
	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
295
296
297
298
299
300
301

	return __memmove(dest, src, len);
}

#undef memcpy
void *memcpy(void *dest, const void *src, size_t len)
{
302
303
	check_memory_region((unsigned long)src, len, false, _RET_IP_);
	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
304
305
306
307

	return __memcpy(dest, src, len);
}

308
309
310
311
312
313
314
315
316
317
318
319
320
321
void kasan_alloc_pages(struct page *page, unsigned int order)
{
	if (likely(!PageHighMem(page)))
		kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
}

void kasan_free_pages(struct page *page, unsigned int order)
{
	if (likely(!PageHighMem(page)))
		kasan_poison_shadow(page_address(page),
				PAGE_SIZE << order,
				KASAN_FREE_PAGE);
}

Alexander Potapenko's avatar
Alexander Potapenko committed
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
/*
 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
 * For larger allocations larger redzones are used.
 */
static size_t optimal_redzone(size_t object_size)
{
	int rz =
		object_size <= 64        - 16   ? 16 :
		object_size <= 128       - 32   ? 32 :
		object_size <= 512       - 64   ? 64 :
		object_size <= 4096      - 128  ? 128 :
		object_size <= (1 << 14) - 256  ? 256 :
		object_size <= (1 << 15) - 512  ? 512 :
		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
	return rz;
}

void kasan_cache_create(struct kmem_cache *cache, size_t *size,
340
			slab_flags_t *flags)
Alexander Potapenko's avatar
Alexander Potapenko committed
341
342
{
	int redzone_adjust;
343
344
	int orig_size = *size;

Alexander Potapenko's avatar
Alexander Potapenko committed
345
346
347
348
349
	/* Add alloc meta. */
	cache->kasan_info.alloc_meta_offset = *size;
	*size += sizeof(struct kasan_alloc_meta);

	/* Add free meta. */
350
	if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
Alexander Potapenko's avatar
Alexander Potapenko committed
351
352
353
354
355
356
	    cache->object_size < sizeof(struct kasan_free_meta)) {
		cache->kasan_info.free_meta_offset = *size;
		*size += sizeof(struct kasan_free_meta);
	}
	redzone_adjust = optimal_redzone(cache->object_size) -
		(*size - cache->object_size);
357

Alexander Potapenko's avatar
Alexander Potapenko committed
358
359
	if (redzone_adjust > 0)
		*size += redzone_adjust;
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375

	*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
					optimal_redzone(cache->object_size)));

	/*
	 * If the metadata doesn't fit, don't enable KASAN at all.
	 */
	if (*size <= cache->kasan_info.alloc_meta_offset ||
			*size <= cache->kasan_info.free_meta_offset) {
		cache->kasan_info.alloc_meta_offset = 0;
		cache->kasan_info.free_meta_offset = 0;
		*size = orig_size;
		return;
	}

	*flags |= SLAB_KASAN;
Alexander Potapenko's avatar
Alexander Potapenko committed
376
377
}

378
379
380
381
382
void kasan_cache_shrink(struct kmem_cache *cache)
{
	quarantine_remove_cache(cache);
}

383
void kasan_cache_shutdown(struct kmem_cache *cache)
384
385
386
387
{
	quarantine_remove_cache(cache);
}

388
389
390
391
392
393
394
395
size_t kasan_metadata_size(struct kmem_cache *cache)
{
	return (cache->kasan_info.alloc_meta_offset ?
		sizeof(struct kasan_alloc_meta) : 0) +
		(cache->kasan_info.free_meta_offset ?
		sizeof(struct kasan_free_meta) : 0);
}

396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
void kasan_poison_slab(struct page *page)
{
	kasan_poison_shadow(page_address(page),
			PAGE_SIZE << compound_order(page),
			KASAN_KMALLOC_REDZONE);
}

void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
	kasan_unpoison_shadow(object, cache->object_size);
}

void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
	kasan_poison_shadow(object,
			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
			KASAN_KMALLOC_REDZONE);
}

415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
static inline int in_irqentry_text(unsigned long ptr)
{
	return (ptr >= (unsigned long)&__irqentry_text_start &&
		ptr < (unsigned long)&__irqentry_text_end) ||
		(ptr >= (unsigned long)&__softirqentry_text_start &&
		 ptr < (unsigned long)&__softirqentry_text_end);
}

static inline void filter_irq_stacks(struct stack_trace *trace)
{
	int i;

	if (!trace->nr_entries)
		return;
	for (i = 0; i < trace->nr_entries; i++)
		if (in_irqentry_text(trace->entries[i])) {
			/* Include the irqentry function into the stack. */
			trace->nr_entries = i + 1;
			break;
		}
}

static inline depot_stack_handle_t save_stack(gfp_t flags)
{
	unsigned long entries[KASAN_STACK_DEPTH];
	struct stack_trace trace = {
		.nr_entries = 0,
		.entries = entries,
		.max_entries = KASAN_STACK_DEPTH,
		.skip = 0
	};

	save_stack_trace(&trace);
	filter_irq_stacks(&trace);
	if (trace.nr_entries != 0 &&
	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
		trace.nr_entries--;

	return depot_save_stack(&trace, flags);
}

static inline void set_track(struct kasan_track *track, gfp_t flags)
Alexander Potapenko's avatar
Alexander Potapenko committed
457
458
{
	track->pid = current->pid;
459
	track->stack = save_stack(flags);
Alexander Potapenko's avatar
Alexander Potapenko committed
460
461
462
463
464
}

struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
					const void *object)
{
465
	BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
Alexander Potapenko's avatar
Alexander Potapenko committed
466
467
468
469
470
471
	return (void *)object + cache->kasan_info.alloc_meta_offset;
}

struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
				      const void *object)
{
472
	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
Alexander Potapenko's avatar
Alexander Potapenko committed
473
474
475
	return (void *)object + cache->kasan_info.free_meta_offset;
}

476
477
478
479
480
481
482
483
484
485
486
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
{
	struct kasan_alloc_meta *alloc_info;

	if (!(cache->flags & SLAB_KASAN))
		return;

	alloc_info = get_alloc_info(cache, object);
	__memset(alloc_info, 0, sizeof(*alloc_info));
}

487
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
488
{
489
	kasan_kmalloc(cache, object, cache->object_size, flags);
490
491
}

492
static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
493
494
495
496
497
{
	unsigned long size = cache->object_size;
	unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);

	/* RCU slabs could be legally used after free within the RCU period */
498
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
499
500
		return;

501
502
503
	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
}

504
bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
505
{
506
507
	s8 shadow_byte;

508
	/* RCU slabs could be legally used after free within the RCU period */
509
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
510
511
		return false;

512
513
	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
514
		kasan_report_invalid_free(object, ip);
515
516
		return true;
	}
517

518
	kasan_poison_slab_free(cache, object);
519

520
521
522
523
524
525
	if (unlikely(!(cache->flags & SLAB_KASAN)))
		return false;

	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
	quarantine_put(get_free_info(cache, object), cache);
	return true;
526
527
}

528
529
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
		   gfp_t flags)
530
531
532
533
{
	unsigned long redzone_start;
	unsigned long redzone_end;

534
	if (gfpflags_allow_blocking(flags))
535
536
		quarantine_reduce();

537
538
539
540
541
542
543
544
545
546
547
	if (unlikely(object == NULL))
		return;

	redzone_start = round_up((unsigned long)(object + size),
				KASAN_SHADOW_SCALE_SIZE);
	redzone_end = round_up((unsigned long)object + cache->object_size,
				KASAN_SHADOW_SCALE_SIZE);

	kasan_unpoison_shadow(object, size);
	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
		KASAN_KMALLOC_REDZONE);
Alexander Potapenko's avatar
Alexander Potapenko committed
548

549
550
	if (cache->flags & SLAB_KASAN)
		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
551
552
553
}
EXPORT_SYMBOL(kasan_kmalloc);

554
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
555
556
557
558
559
{
	struct page *page;
	unsigned long redzone_start;
	unsigned long redzone_end;

560
	if (gfpflags_allow_blocking(flags))
561
562
		quarantine_reduce();

563
564
565
566
567
568
569
570
571
572
573
574
575
	if (unlikely(ptr == NULL))
		return;

	page = virt_to_page(ptr);
	redzone_start = round_up((unsigned long)(ptr + size),
				KASAN_SHADOW_SCALE_SIZE);
	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));

	kasan_unpoison_shadow(ptr, size);
	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
		KASAN_PAGE_REDZONE);
}

576
void kasan_krealloc(const void *object, size_t size, gfp_t flags)
577
578
579
580
581
582
583
584
585
{
	struct page *page;

	if (unlikely(object == ZERO_SIZE_PTR))
		return;

	page = virt_to_head_page(object);

	if (unlikely(!PageSlab(page)))
586
		kasan_kmalloc_large(object, size, flags);
587
	else
588
		kasan_kmalloc(page->slab_cache, object, size, flags);
589
590
}

591
void kasan_poison_kfree(void *ptr, unsigned long ip)
592
593
594
595
596
{
	struct page *page;

	page = virt_to_head_page(ptr);

597
598
599
600
601
	if (unlikely(!PageSlab(page))) {
		if (ptr != page_address(page)) {
			kasan_report_invalid_free(ptr, ip);
			return;
		}
602
603
		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
				KASAN_FREE_PAGE);
604
	} else {
605
		kasan_poison_slab_free(page->slab_cache, ptr);
606
	}
607
608
}

609
void kasan_kfree_large(void *ptr, unsigned long ip)
610
{
611
	if (ptr != page_address(virt_to_head_page(ptr)))
612
		kasan_report_invalid_free(ptr, ip);
613
	/* The object will be poisoned by page_alloc. */
614
615
}

616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
int kasan_module_alloc(void *addr, size_t size)
{
	void *ret;
	size_t shadow_size;
	unsigned long shadow_start;

	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
	shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
			PAGE_SIZE);

	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
		return -EINVAL;

	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
			shadow_start + shadow_size,
631
			GFP_KERNEL | __GFP_ZERO,
632
633
			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
			__builtin_return_address(0));
634
635
636

	if (ret) {
		find_vm_area(addr)->flags |= VM_KASAN;
637
		kmemleak_ignore(ret);
638
639
640
641
		return 0;
	}

	return -ENOMEM;
642
643
}

644
void kasan_free_shadow(const struct vm_struct *vm)
645
{
646
647
	if (vm->flags & VM_KASAN)
		vfree(kasan_mem_to_shadow(vm->addr));
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
}

static void register_global(struct kasan_global *global)
{
	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);

	kasan_unpoison_shadow(global->beg, global->size);

	kasan_poison_shadow(global->beg + aligned_size,
		global->size_with_redzone - aligned_size,
		KASAN_GLOBAL_REDZONE);
}

void __asan_register_globals(struct kasan_global *globals, size_t size)
{
	int i;

	for (i = 0; i < size; i++)
		register_global(&globals[i]);
}
EXPORT_SYMBOL(__asan_register_globals);

void __asan_unregister_globals(struct kasan_global *globals, size_t size)
{
}
EXPORT_SYMBOL(__asan_unregister_globals);

675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
#define DEFINE_ASAN_LOAD_STORE(size)					\
	void __asan_load##size(unsigned long addr)			\
	{								\
		check_memory_region_inline(addr, size, false, _RET_IP_);\
	}								\
	EXPORT_SYMBOL(__asan_load##size);				\
	__alias(__asan_load##size)					\
	void __asan_load##size##_noabort(unsigned long);		\
	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
	void __asan_store##size(unsigned long addr)			\
	{								\
		check_memory_region_inline(addr, size, true, _RET_IP_);	\
	}								\
	EXPORT_SYMBOL(__asan_store##size);				\
	__alias(__asan_store##size)					\
	void __asan_store##size##_noabort(unsigned long);		\
691
692
693
694
695
696
697
698
699
700
	EXPORT_SYMBOL(__asan_store##size##_noabort)

DEFINE_ASAN_LOAD_STORE(1);
DEFINE_ASAN_LOAD_STORE(2);
DEFINE_ASAN_LOAD_STORE(4);
DEFINE_ASAN_LOAD_STORE(8);
DEFINE_ASAN_LOAD_STORE(16);

void __asan_loadN(unsigned long addr, size_t size)
{
701
	check_memory_region(addr, size, false, _RET_IP_);
702
703
704
705
706
707
708
709
710
}
EXPORT_SYMBOL(__asan_loadN);

__alias(__asan_loadN)
void __asan_loadN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_loadN_noabort);

void __asan_storeN(unsigned long addr, size_t size)
{
711
	check_memory_region(addr, size, true, _RET_IP_);
712
713
714
715
716
717
718
719
720
721
}
EXPORT_SYMBOL(__asan_storeN);

__alias(__asan_storeN)
void __asan_storeN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_storeN_noabort);

/* to shut up compiler complaints */
void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return);
722

723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
/* Emitted by compiler to poison large objects when they go out of scope. */
void __asan_poison_stack_memory(const void *addr, size_t size)
{
	/*
	 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
	 * by redzones, so we simply round up size to simplify logic.
	 */
	kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
			    KASAN_USE_AFTER_SCOPE);
}
EXPORT_SYMBOL(__asan_poison_stack_memory);

/* Emitted by compiler to unpoison large objects when they go into scope. */
void __asan_unpoison_stack_memory(const void *addr, size_t size)
{
	kasan_unpoison_shadow(addr, size);
}
EXPORT_SYMBOL(__asan_unpoison_stack_memory);

742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
/* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size)
{
	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
			rounded_up_size;
	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);

	const void *left_redzone = (const void *)(addr -
			KASAN_ALLOCA_REDZONE_SIZE);
	const void *right_redzone = (const void *)(addr + rounded_up_size);

	WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));

	kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
			      size - rounded_down_size);
	kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
			KASAN_ALLOCA_LEFT);
	kasan_poison_shadow(right_redzone,
			padding_size + KASAN_ALLOCA_REDZONE_SIZE,
			KASAN_ALLOCA_RIGHT);
}
EXPORT_SYMBOL(__asan_alloca_poison);

/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
{
	if (unlikely(!stack_top || stack_top > stack_bottom))
		return;

	kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
}
EXPORT_SYMBOL(__asan_allocas_unpoison);

776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
/* Emitted by the compiler to [un]poison local variables. */
#define DEFINE_ASAN_SET_SHADOW(byte) \
	void __asan_set_shadow_##byte(const void *addr, size_t size)	\
	{								\
		__memset((void *)addr, 0x##byte, size);			\
	}								\
	EXPORT_SYMBOL(__asan_set_shadow_##byte)

DEFINE_ASAN_SET_SHADOW(00);
DEFINE_ASAN_SET_SHADOW(f1);
DEFINE_ASAN_SET_SHADOW(f2);
DEFINE_ASAN_SET_SHADOW(f3);
DEFINE_ASAN_SET_SHADOW(f5);
DEFINE_ASAN_SET_SHADOW(f8);

791
#ifdef CONFIG_MEMORY_HOTPLUG
792
static int __meminit kasan_mem_notifier(struct notifier_block *nb,
793
794
			unsigned long action, void *data)
{
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
	struct memory_notify *mem_data = data;
	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
	unsigned long shadow_end, shadow_size;

	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
	shadow_size = nr_shadow_pages << PAGE_SHIFT;
	shadow_end = shadow_start + shadow_size;

	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
		return NOTIFY_BAD;

	switch (action) {
	case MEM_GOING_ONLINE: {
		void *ret;

		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
					shadow_end, GFP_KERNEL,
					PAGE_KERNEL, VM_NO_GUARD,
					pfn_to_nid(mem_data->start_pfn),
					__builtin_return_address(0));
		if (!ret)
			return NOTIFY_BAD;

		kmemleak_ignore(ret);
		return NOTIFY_OK;
	}
	case MEM_OFFLINE:
		vfree((void *)shadow_start);
	}

	return NOTIFY_OK;
829
830
831
832
833
834
835
836
837
838
839
}

static int __init kasan_memhotplug_init(void)
{
	hotplug_memory_notifier(kasan_mem_notifier, 0);

	return 0;
}

module_init(kasan_memhotplug_init);
#endif