gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

kasan.c 23.3 KB
Newer Older
1
2
3
4
/*
 * This file contains shadow memory manipulation code.
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6
 *
7
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8
 *        Andrey Konovalov <andreyknvl@gmail.com>
9
10
11
12
13
14
15
16
17
18
19
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DISABLE_BRANCH_PROFILING

#include <linux/export.h>
20
#include <linux/interrupt.h>
21
#include <linux/init.h>
22
#include <linux/kasan.h>
23
#include <linux/kernel.h>
24
#include <linux/kmemleak.h>
25
#include <linux/linkage.h>
26
#include <linux/memblock.h>
27
#include <linux/memory.h>
28
#include <linux/mm.h>
29
#include <linux/module.h>
30
31
#include <linux/printk.h>
#include <linux/sched.h>
32
#include <linux/sched/task_stack.h>
33
34
35
36
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
37
#include <linux/vmalloc.h>
38
#include <linux/bug.h>
39
40

#include "kasan.h"
41
#include "../slab.h"
42

43
44
45
46
47
48
49
50
51
52
void kasan_enable_current(void)
{
	current->kasan_depth++;
}

void kasan_disable_current(void)
{
	current->kasan_depth--;
}

53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
/*
 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
 */
static void kasan_poison_shadow(const void *address, size_t size, u8 value)
{
	void *shadow_start, *shadow_end;

	shadow_start = kasan_mem_to_shadow(address);
	shadow_end = kasan_mem_to_shadow(address + size);

	memset(shadow_start, value, shadow_end - shadow_start);
}

void kasan_unpoison_shadow(const void *address, size_t size)
{
	kasan_poison_shadow(address, size, 0);

	if (size & KASAN_SHADOW_MASK) {
		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
		*shadow = size & KASAN_SHADOW_MASK;
	}
}

77
static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
78
79
80
81
82
83
84
85
86
87
88
89
90
91
{
	void *base = task_stack_page(task);
	size_t size = sp - base;

	kasan_unpoison_shadow(base, size);
}

/* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack(struct task_struct *task)
{
	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
}

/* Unpoison the stack for the current task beyond a watermark sp value. */
92
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
93
{
94
95
96
97
98
99
100
101
	/*
	 * Calculate the task stack base address.  Avoid using 'current'
	 * because this function is called by early resume code which hasn't
	 * yet set up the percpu register (%gs).
	 */
	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));

	kasan_unpoison_shadow(base, watermark - base);
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
}

/*
 * Clear all poison for the region between the current SP and a provided
 * watermark value, as is sometimes required prior to hand-crafted asm function
 * returns in the middle of functions.
 */
void kasan_unpoison_stack_above_sp_to(const void *watermark)
{
	const void *sp = __builtin_frame_address(0);
	size_t size = watermark - sp;

	if (WARN_ON(sp > watermark))
		return;
	kasan_unpoison_shadow(sp, size);
117
}
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136

/*
 * All functions below always inlined so compiler could
 * perform better optimizations in each of __asan_loadX/__assn_storeX
 * depending on memory access size X.
 */

static __always_inline bool memory_is_poisoned_1(unsigned long addr)
{
	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);

	if (unlikely(shadow_value)) {
		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
		return unlikely(last_accessible_byte >= shadow_value);
	}

	return false;
}

137
138
static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
						unsigned long size)
139
{
140
	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
141

142
143
144
145
146
147
	/*
	 * Access crosses 8(shadow size)-byte boundary. Such access maps
	 * into 2 shadow bytes, so we need to check them both.
	 */
	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
148

149
	return memory_is_poisoned_1(addr + size - 1);
150
151
152
153
}

static __always_inline bool memory_is_poisoned_16(unsigned long addr)
{
154
	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
155

156
157
158
	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
		return *shadow_addr || memory_is_poisoned_1(addr + 15);
159

160
	return *shadow_addr;
161
162
}

163
static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
164
165
166
167
168
169
170
171
172
173
174
175
					size_t size)
{
	while (size) {
		if (unlikely(*start))
			return (unsigned long)start;
		start++;
		size--;
	}

	return 0;
}

176
static __always_inline unsigned long memory_is_nonzero(const void *start,
177
178
179
180
181
182
183
						const void *end)
{
	unsigned int words;
	unsigned long ret;
	unsigned int prefix = (unsigned long)start % 8;

	if (end - start <= 16)
184
		return bytes_is_nonzero(start, end - start);
185
186
187

	if (prefix) {
		prefix = 8 - prefix;
188
		ret = bytes_is_nonzero(start, prefix);
189
190
191
192
193
194
195
196
		if (unlikely(ret))
			return ret;
		start += prefix;
	}

	words = (end - start) / 8;
	while (words) {
		if (unlikely(*(u64 *)start))
197
			return bytes_is_nonzero(start, 8);
198
199
200
201
		start += 8;
		words--;
	}

202
	return bytes_is_nonzero(start, (end - start) % 8);
203
204
205
206
207
208
209
}

static __always_inline bool memory_is_poisoned_n(unsigned long addr,
						size_t size)
{
	unsigned long ret;

210
	ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
211
212
213
214
215
216
217
			kasan_mem_to_shadow((void *)addr + size - 1) + 1);

	if (unlikely(ret)) {
		unsigned long last_byte = addr + size - 1;
		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);

		if (unlikely(ret != (unsigned long)last_shadow ||
Wang Long's avatar
Wang Long committed
218
			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
219
220
221
222
223
224
225
226
227
228
229
230
231
232
			return true;
	}
	return false;
}

static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
{
	if (__builtin_constant_p(size)) {
		switch (size) {
		case 1:
			return memory_is_poisoned_1(addr);
		case 2:
		case 4:
		case 8:
233
			return memory_is_poisoned_2_4_8(addr, size);
234
235
236
237
238
239
240
241
242
243
		case 16:
			return memory_is_poisoned_16(addr);
		default:
			BUILD_BUG();
		}
	}

	return memory_is_poisoned_n(addr, size);
}

244
245
246
static __always_inline void check_memory_region_inline(unsigned long addr,
						size_t size, bool write,
						unsigned long ret_ip)
247
248
249
250
251
252
{
	if (unlikely(size == 0))
		return;

	if (unlikely((void *)addr <
		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
253
		kasan_report(addr, size, write, ret_ip);
254
255
256
257
258
259
		return;
	}

	if (likely(!memory_is_poisoned(addr, size)))
		return;

260
	kasan_report(addr, size, write, ret_ip);
261
262
}

263
264
265
266
267
268
static void check_memory_region(unsigned long addr,
				size_t size, bool write,
				unsigned long ret_ip)
{
	check_memory_region_inline(addr, size, write, ret_ip);
}
269

270
void kasan_check_read(const volatile void *p, unsigned int size)
271
272
273
274
275
{
	check_memory_region((unsigned long)p, size, false, _RET_IP_);
}
EXPORT_SYMBOL(kasan_check_read);

276
void kasan_check_write(const volatile void *p, unsigned int size)
277
278
279
280
281
{
	check_memory_region((unsigned long)p, size, true, _RET_IP_);
}
EXPORT_SYMBOL(kasan_check_write);

282
283
284
#undef memset
void *memset(void *addr, int c, size_t len)
{
285
	check_memory_region((unsigned long)addr, len, true, _RET_IP_);
286
287
288
289
290
291
292

	return __memset(addr, c, len);
}

#undef memmove
void *memmove(void *dest, const void *src, size_t len)
{
293
294
	check_memory_region((unsigned long)src, len, false, _RET_IP_);
	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
295
296
297
298
299
300
301

	return __memmove(dest, src, len);
}

#undef memcpy
void *memcpy(void *dest, const void *src, size_t len)
{
302
303
	check_memory_region((unsigned long)src, len, false, _RET_IP_);
	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
304
305
306
307

	return __memcpy(dest, src, len);
}

308
309
310
311
312
313
314
315
316
317
318
319
320
321
void kasan_alloc_pages(struct page *page, unsigned int order)
{
	if (likely(!PageHighMem(page)))
		kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
}

void kasan_free_pages(struct page *page, unsigned int order)
{
	if (likely(!PageHighMem(page)))
		kasan_poison_shadow(page_address(page),
				PAGE_SIZE << order,
				KASAN_FREE_PAGE);
}

Alexander Potapenko's avatar
Alexander Potapenko committed
322
323
324
325
/*
 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
 * For larger allocations larger redzones are used.
 */
326
static unsigned int optimal_redzone(unsigned int object_size)
Alexander Potapenko's avatar
Alexander Potapenko committed
327
{
328
	return
Alexander Potapenko's avatar
Alexander Potapenko committed
329
330
331
332
333
334
335
336
337
		object_size <= 64        - 16   ? 16 :
		object_size <= 128       - 32   ? 32 :
		object_size <= 512       - 64   ? 64 :
		object_size <= 4096      - 128  ? 128 :
		object_size <= (1 << 14) - 256  ? 256 :
		object_size <= (1 << 15) - 512  ? 512 :
		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
}

338
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
339
			slab_flags_t *flags)
Alexander Potapenko's avatar
Alexander Potapenko committed
340
{
341
	unsigned int orig_size = *size;
Alexander Potapenko's avatar
Alexander Potapenko committed
342
	int redzone_adjust;
343

Alexander Potapenko's avatar
Alexander Potapenko committed
344
345
346
347
348
	/* Add alloc meta. */
	cache->kasan_info.alloc_meta_offset = *size;
	*size += sizeof(struct kasan_alloc_meta);

	/* Add free meta. */
349
	if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
Alexander Potapenko's avatar
Alexander Potapenko committed
350
351
352
353
354
355
	    cache->object_size < sizeof(struct kasan_free_meta)) {
		cache->kasan_info.free_meta_offset = *size;
		*size += sizeof(struct kasan_free_meta);
	}
	redzone_adjust = optimal_redzone(cache->object_size) -
		(*size - cache->object_size);
356

Alexander Potapenko's avatar
Alexander Potapenko committed
357
358
	if (redzone_adjust > 0)
		*size += redzone_adjust;
359

360
361
	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
			max(*size, cache->object_size +
362
363
364
365
366
367
368
369
370
371
372
373
374
375
					optimal_redzone(cache->object_size)));

	/*
	 * If the metadata doesn't fit, don't enable KASAN at all.
	 */
	if (*size <= cache->kasan_info.alloc_meta_offset ||
			*size <= cache->kasan_info.free_meta_offset) {
		cache->kasan_info.alloc_meta_offset = 0;
		cache->kasan_info.free_meta_offset = 0;
		*size = orig_size;
		return;
	}

	*flags |= SLAB_KASAN;
Alexander Potapenko's avatar
Alexander Potapenko committed
376
377
}

378
379
380
381
382
void kasan_cache_shrink(struct kmem_cache *cache)
{
	quarantine_remove_cache(cache);
}

383
void kasan_cache_shutdown(struct kmem_cache *cache)
384
{
385
386
	if (!__kmem_cache_empty(cache))
		quarantine_remove_cache(cache);
387
388
}

389
390
391
392
393
394
395
396
size_t kasan_metadata_size(struct kmem_cache *cache)
{
	return (cache->kasan_info.alloc_meta_offset ?
		sizeof(struct kasan_alloc_meta) : 0) +
		(cache->kasan_info.free_meta_offset ?
		sizeof(struct kasan_free_meta) : 0);
}

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
void kasan_poison_slab(struct page *page)
{
	kasan_poison_shadow(page_address(page),
			PAGE_SIZE << compound_order(page),
			KASAN_KMALLOC_REDZONE);
}

void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
	kasan_unpoison_shadow(object, cache->object_size);
}

void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
	kasan_poison_shadow(object,
			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
			KASAN_KMALLOC_REDZONE);
}

416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
static inline int in_irqentry_text(unsigned long ptr)
{
	return (ptr >= (unsigned long)&__irqentry_text_start &&
		ptr < (unsigned long)&__irqentry_text_end) ||
		(ptr >= (unsigned long)&__softirqentry_text_start &&
		 ptr < (unsigned long)&__softirqentry_text_end);
}

static inline void filter_irq_stacks(struct stack_trace *trace)
{
	int i;

	if (!trace->nr_entries)
		return;
	for (i = 0; i < trace->nr_entries; i++)
		if (in_irqentry_text(trace->entries[i])) {
			/* Include the irqentry function into the stack. */
			trace->nr_entries = i + 1;
			break;
		}
}

static inline depot_stack_handle_t save_stack(gfp_t flags)
{
	unsigned long entries[KASAN_STACK_DEPTH];
	struct stack_trace trace = {
		.nr_entries = 0,
		.entries = entries,
		.max_entries = KASAN_STACK_DEPTH,
		.skip = 0
	};

	save_stack_trace(&trace);
	filter_irq_stacks(&trace);
	if (trace.nr_entries != 0 &&
	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
		trace.nr_entries--;

	return depot_save_stack(&trace, flags);
}

static inline void set_track(struct kasan_track *track, gfp_t flags)
Alexander Potapenko's avatar
Alexander Potapenko committed
458
459
{
	track->pid = current->pid;
460
	track->stack = save_stack(flags);
Alexander Potapenko's avatar
Alexander Potapenko committed
461
462
463
464
465
}

struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
					const void *object)
{
466
	BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
Alexander Potapenko's avatar
Alexander Potapenko committed
467
468
469
470
471
472
	return (void *)object + cache->kasan_info.alloc_meta_offset;
}

struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
				      const void *object)
{
473
	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
Alexander Potapenko's avatar
Alexander Potapenko committed
474
475
476
	return (void *)object + cache->kasan_info.free_meta_offset;
}

477
void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
478
479
480
481
{
	struct kasan_alloc_meta *alloc_info;

	if (!(cache->flags & SLAB_KASAN))
482
		return (void *)object;
483
484
485

	alloc_info = get_alloc_info(cache, object);
	__memset(alloc_info, 0, sizeof(*alloc_info));
486
487

	return (void *)object;
488
489
}

490
void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
491
{
492
	return kasan_kmalloc(cache, object, cache->object_size, flags);
493
494
}

495
496
static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
			      unsigned long ip, bool quarantine)
497
{
498
	s8 shadow_byte;
499
	unsigned long rounded_up_size;
500

Dmitry Vyukov's avatar
Dmitry Vyukov committed
501
502
503
504
505
506
	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
	    object)) {
		kasan_report_invalid_free(object, ip);
		return true;
	}

507
	/* RCU slabs could be legally used after free within the RCU period */
508
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
509
510
		return false;

511
512
	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
513
		kasan_report_invalid_free(object, ip);
514
515
		return true;
	}
516

517
518
	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
519

520
	if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
521
522
523
524
525
		return false;

	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
	quarantine_put(get_free_info(cache, object), cache);
	return true;
526
527
}

528
529
530
531
532
bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
{
	return __kasan_slab_free(cache, object, ip, true);
}

533
void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
534
		   gfp_t flags)
535
536
537
538
{
	unsigned long redzone_start;
	unsigned long redzone_end;

539
	if (gfpflags_allow_blocking(flags))
540
541
		quarantine_reduce();

542
	if (unlikely(object == NULL))
543
		return NULL;
544
545
546
547
548
549
550
551
552

	redzone_start = round_up((unsigned long)(object + size),
				KASAN_SHADOW_SCALE_SIZE);
	redzone_end = round_up((unsigned long)object + cache->object_size,
				KASAN_SHADOW_SCALE_SIZE);

	kasan_unpoison_shadow(object, size);
	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
		KASAN_KMALLOC_REDZONE);
Alexander Potapenko's avatar
Alexander Potapenko committed
553

554
555
	if (cache->flags & SLAB_KASAN)
		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
556
557

	return (void *)object;
558
559
560
}
EXPORT_SYMBOL(kasan_kmalloc);

561
void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
562
563
564
565
566
{
	struct page *page;
	unsigned long redzone_start;
	unsigned long redzone_end;

567
	if (gfpflags_allow_blocking(flags))
568
569
		quarantine_reduce();

570
	if (unlikely(ptr == NULL))
571
		return NULL;
572
573
574
575
576
577
578
579
580

	page = virt_to_page(ptr);
	redzone_start = round_up((unsigned long)(ptr + size),
				KASAN_SHADOW_SCALE_SIZE);
	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));

	kasan_unpoison_shadow(ptr, size);
	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
		KASAN_PAGE_REDZONE);
581
582

	return (void *)ptr;
583
584
}

585
void *kasan_krealloc(const void *object, size_t size, gfp_t flags)
586
587
588
589
{
	struct page *page;

	if (unlikely(object == ZERO_SIZE_PTR))
590
		return ZERO_SIZE_PTR;
591
592
593
594

	page = virt_to_head_page(object);

	if (unlikely(!PageSlab(page)))
595
		return kasan_kmalloc_large(object, size, flags);
596
	else
597
		return kasan_kmalloc(page->slab_cache, object, size, flags);
598
599
}

600
void kasan_poison_kfree(void *ptr, unsigned long ip)
601
602
603
604
605
{
	struct page *page;

	page = virt_to_head_page(ptr);

606
607
608
609
610
	if (unlikely(!PageSlab(page))) {
		if (ptr != page_address(page)) {
			kasan_report_invalid_free(ptr, ip);
			return;
		}
611
612
		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
				KASAN_FREE_PAGE);
613
	} else {
614
		__kasan_slab_free(page->slab_cache, ptr, ip, false);
615
	}
616
617
}

618
void kasan_kfree_large(void *ptr, unsigned long ip)
619
{
620
	if (ptr != page_address(virt_to_head_page(ptr)))
621
		kasan_report_invalid_free(ptr, ip);
622
	/* The object will be poisoned by page_alloc. */
623
624
}

625
626
627
int kasan_module_alloc(void *addr, size_t size)
{
	void *ret;
628
	size_t scaled_size;
629
630
631
632
	size_t shadow_size;
	unsigned long shadow_start;

	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
633
634
	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
	shadow_size = round_up(scaled_size, PAGE_SIZE);
635
636
637
638
639
640

	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
		return -EINVAL;

	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
			shadow_start + shadow_size,
641
			GFP_KERNEL | __GFP_ZERO,
642
643
			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
			__builtin_return_address(0));
644
645
646

	if (ret) {
		find_vm_area(addr)->flags |= VM_KASAN;
647
		kmemleak_ignore(ret);
648
649
650
651
		return 0;
	}

	return -ENOMEM;
652
653
}

654
void kasan_free_shadow(const struct vm_struct *vm)
655
{
656
657
	if (vm->flags & VM_KASAN)
		vfree(kasan_mem_to_shadow(vm->addr));
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
}

static void register_global(struct kasan_global *global)
{
	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);

	kasan_unpoison_shadow(global->beg, global->size);

	kasan_poison_shadow(global->beg + aligned_size,
		global->size_with_redzone - aligned_size,
		KASAN_GLOBAL_REDZONE);
}

void __asan_register_globals(struct kasan_global *globals, size_t size)
{
	int i;

	for (i = 0; i < size; i++)
		register_global(&globals[i]);
}
EXPORT_SYMBOL(__asan_register_globals);

void __asan_unregister_globals(struct kasan_global *globals, size_t size)
{
}
EXPORT_SYMBOL(__asan_unregister_globals);

685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
#define DEFINE_ASAN_LOAD_STORE(size)					\
	void __asan_load##size(unsigned long addr)			\
	{								\
		check_memory_region_inline(addr, size, false, _RET_IP_);\
	}								\
	EXPORT_SYMBOL(__asan_load##size);				\
	__alias(__asan_load##size)					\
	void __asan_load##size##_noabort(unsigned long);		\
	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
	void __asan_store##size(unsigned long addr)			\
	{								\
		check_memory_region_inline(addr, size, true, _RET_IP_);	\
	}								\
	EXPORT_SYMBOL(__asan_store##size);				\
	__alias(__asan_store##size)					\
	void __asan_store##size##_noabort(unsigned long);		\
701
702
703
704
705
706
707
708
709
710
	EXPORT_SYMBOL(__asan_store##size##_noabort)

DEFINE_ASAN_LOAD_STORE(1);
DEFINE_ASAN_LOAD_STORE(2);
DEFINE_ASAN_LOAD_STORE(4);
DEFINE_ASAN_LOAD_STORE(8);
DEFINE_ASAN_LOAD_STORE(16);

void __asan_loadN(unsigned long addr, size_t size)
{
711
	check_memory_region(addr, size, false, _RET_IP_);
712
713
714
715
716
717
718
719
720
}
EXPORT_SYMBOL(__asan_loadN);

__alias(__asan_loadN)
void __asan_loadN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_loadN_noabort);

void __asan_storeN(unsigned long addr, size_t size)
{
721
	check_memory_region(addr, size, true, _RET_IP_);
722
723
724
725
726
727
728
729
730
731
}
EXPORT_SYMBOL(__asan_storeN);

__alias(__asan_storeN)
void __asan_storeN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_storeN_noabort);

/* to shut up compiler complaints */
void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return);
732

733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
/* Emitted by compiler to poison large objects when they go out of scope. */
void __asan_poison_stack_memory(const void *addr, size_t size)
{
	/*
	 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
	 * by redzones, so we simply round up size to simplify logic.
	 */
	kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
			    KASAN_USE_AFTER_SCOPE);
}
EXPORT_SYMBOL(__asan_poison_stack_memory);

/* Emitted by compiler to unpoison large objects when they go into scope. */
void __asan_unpoison_stack_memory(const void *addr, size_t size)
{
	kasan_unpoison_shadow(addr, size);
}
EXPORT_SYMBOL(__asan_unpoison_stack_memory);

752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
/* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size)
{
	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
			rounded_up_size;
	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);

	const void *left_redzone = (const void *)(addr -
			KASAN_ALLOCA_REDZONE_SIZE);
	const void *right_redzone = (const void *)(addr + rounded_up_size);

	WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));

	kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
			      size - rounded_down_size);
	kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
			KASAN_ALLOCA_LEFT);
	kasan_poison_shadow(right_redzone,
			padding_size + KASAN_ALLOCA_REDZONE_SIZE,
			KASAN_ALLOCA_RIGHT);
}
EXPORT_SYMBOL(__asan_alloca_poison);

/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
{
	if (unlikely(!stack_top || stack_top > stack_bottom))
		return;

	kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
}
EXPORT_SYMBOL(__asan_allocas_unpoison);

786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
/* Emitted by the compiler to [un]poison local variables. */
#define DEFINE_ASAN_SET_SHADOW(byte) \
	void __asan_set_shadow_##byte(const void *addr, size_t size)	\
	{								\
		__memset((void *)addr, 0x##byte, size);			\
	}								\
	EXPORT_SYMBOL(__asan_set_shadow_##byte)

DEFINE_ASAN_SET_SHADOW(00);
DEFINE_ASAN_SET_SHADOW(f1);
DEFINE_ASAN_SET_SHADOW(f2);
DEFINE_ASAN_SET_SHADOW(f3);
DEFINE_ASAN_SET_SHADOW(f5);
DEFINE_ASAN_SET_SHADOW(f8);

801
#ifdef CONFIG_MEMORY_HOTPLUG
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
static bool shadow_mapped(unsigned long addr)
{
	pgd_t *pgd = pgd_offset_k(addr);
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	if (pgd_none(*pgd))
		return false;
	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d))
		return false;
	pud = pud_offset(p4d, addr);
	if (pud_none(*pud))
		return false;

	/*
	 * We can't use pud_large() or pud_huge(), the first one is
	 * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
	 * pud_bad(), if pud is bad then it's bad because it's huge.
	 */
	if (pud_bad(*pud))
		return true;
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return false;

	if (pmd_bad(*pmd))
		return true;
	pte = pte_offset_kernel(pmd, addr);
	return !pte_none(*pte);
}

836
static int __meminit kasan_mem_notifier(struct notifier_block *nb,
837
838
			unsigned long action, void *data)
{
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
	struct memory_notify *mem_data = data;
	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
	unsigned long shadow_end, shadow_size;

	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
	shadow_size = nr_shadow_pages << PAGE_SHIFT;
	shadow_end = shadow_start + shadow_size;

	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
		return NOTIFY_BAD;

	switch (action) {
	case MEM_GOING_ONLINE: {
		void *ret;

857
858
859
860
861
862
863
864
		/*
		 * If shadow is mapped already than it must have been mapped
		 * during the boot. This could happen if we onlining previously
		 * offlined memory.
		 */
		if (shadow_mapped(shadow_start))
			return NOTIFY_OK;

865
866
867
868
869
870
871
872
873
874
875
		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
					shadow_end, GFP_KERNEL,
					PAGE_KERNEL, VM_NO_GUARD,
					pfn_to_nid(mem_data->start_pfn),
					__builtin_return_address(0));
		if (!ret)
			return NOTIFY_BAD;

		kmemleak_ignore(ret);
		return NOTIFY_OK;
	}
876
	case MEM_CANCEL_ONLINE:
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
	case MEM_OFFLINE: {
		struct vm_struct *vm;

		/*
		 * shadow_start was either mapped during boot by kasan_init()
		 * or during memory online by __vmalloc_node_range().
		 * In the latter case we can use vfree() to free shadow.
		 * Non-NULL result of the find_vm_area() will tell us if
		 * that was the second case.
		 *
		 * Currently it's not possible to free shadow mapped
		 * during boot by kasan_init(). It's because the code
		 * to do that hasn't been written yet. So we'll just
		 * leak the memory.
		 */
		vm = find_vm_area((void *)shadow_start);
		if (vm)
			vfree((void *)shadow_start);
	}
896
897
898
	}

	return NOTIFY_OK;
899
900
901
902
903
904
905
906
907
}

static int __init kasan_memhotplug_init(void)
{
	hotplug_memory_notifier(kasan_mem_notifier, 0);

	return 0;
}

908
core_initcall(kasan_memhotplug_init);
909
#endif