gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

generic.c 9.37 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains core generic KASAN code.
4
5
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7
 *
8
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9
 *        Andrey Konovalov <andreyknvl@gmail.com>
10
11
12
13
14
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/export.h>
15
#include <linux/interrupt.h>
16
#include <linux/init.h>
17
#include <linux/kasan.h>
18
#include <linux/kernel.h>
19
#include <linux/kfence.h>
20
#include <linux/kmemleak.h>
21
#include <linux/linkage.h>
22
#include <linux/memblock.h>
23
#include <linux/memory.h>
24
#include <linux/mm.h>
25
#include <linux/module.h>
26
27
#include <linux/printk.h>
#include <linux/sched.h>
28
#include <linux/sched/task_stack.h>
29
30
31
32
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
33
#include <linux/vmalloc.h>
34
#include <linux/bug.h>
35
36

#include "kasan.h"
37
#include "../slab.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56

/*
 * All functions below always inlined so compiler could
 * perform better optimizations in each of __asan_loadX/__assn_storeX
 * depending on memory access size X.
 */

static __always_inline bool memory_is_poisoned_1(unsigned long addr)
{
	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);

	if (unlikely(shadow_value)) {
		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
		return unlikely(last_accessible_byte >= shadow_value);
	}

	return false;
}

57
58
static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
						unsigned long size)
59
{
60
	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
61

62
63
64
65
66
67
	/*
	 * Access crosses 8(shadow size)-byte boundary. Such access maps
	 * into 2 shadow bytes, so we need to check them both.
	 */
	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
68

69
	return memory_is_poisoned_1(addr + size - 1);
70
71
72
73
}

static __always_inline bool memory_is_poisoned_16(unsigned long addr)
{
74
	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
75

76
77
78
	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
		return *shadow_addr || memory_is_poisoned_1(addr + 15);
79

80
	return *shadow_addr;
81
82
}

83
static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
84
85
86
87
88
89
90
91
92
93
94
95
					size_t size)
{
	while (size) {
		if (unlikely(*start))
			return (unsigned long)start;
		start++;
		size--;
	}

	return 0;
}

96
static __always_inline unsigned long memory_is_nonzero(const void *start,
97
98
99
100
101
102
103
						const void *end)
{
	unsigned int words;
	unsigned long ret;
	unsigned int prefix = (unsigned long)start % 8;

	if (end - start <= 16)
104
		return bytes_is_nonzero(start, end - start);
105
106
107

	if (prefix) {
		prefix = 8 - prefix;
108
		ret = bytes_is_nonzero(start, prefix);
109
110
111
112
113
114
115
116
		if (unlikely(ret))
			return ret;
		start += prefix;
	}

	words = (end - start) / 8;
	while (words) {
		if (unlikely(*(u64 *)start))
117
			return bytes_is_nonzero(start, 8);
118
119
120
121
		start += 8;
		words--;
	}

122
	return bytes_is_nonzero(start, (end - start) % 8);
123
124
125
126
127
128
129
}

static __always_inline bool memory_is_poisoned_n(unsigned long addr,
						size_t size)
{
	unsigned long ret;

130
	ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
131
132
133
134
135
136
137
			kasan_mem_to_shadow((void *)addr + size - 1) + 1);

	if (unlikely(ret)) {
		unsigned long last_byte = addr + size - 1;
		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);

		if (unlikely(ret != (unsigned long)last_shadow ||
Wang Long's avatar
Wang Long committed
138
			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
139
140
141
142
143
144
145
146
147
148
149
150
151
152
			return true;
	}
	return false;
}

static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
{
	if (__builtin_constant_p(size)) {
		switch (size) {
		case 1:
			return memory_is_poisoned_1(addr);
		case 2:
		case 4:
		case 8:
153
			return memory_is_poisoned_2_4_8(addr, size);
154
155
156
157
158
159
160
161
162
163
		case 16:
			return memory_is_poisoned_16(addr);
		default:
			BUILD_BUG();
		}
	}

	return memory_is_poisoned_n(addr, size);
}

164
static __always_inline bool check_memory_region_inline(unsigned long addr,
165
166
						size_t size, bool write,
						unsigned long ret_ip)
167
168
{
	if (unlikely(size == 0))
169
		return true;
170

171
172
173
	if (unlikely(addr + size < addr))
		return !kasan_report(addr, size, write, ret_ip);

174
175
	if (unlikely((void *)addr <
		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
176
		return !kasan_report(addr, size, write, ret_ip);
177
178
179
	}

	if (likely(!memory_is_poisoned(addr, size)))
180
		return true;
181

182
	return !kasan_report(addr, size, write, ret_ip);
183
184
}

185
bool check_memory_region(unsigned long addr, size_t size, bool write,
186
187
				unsigned long ret_ip)
{
188
	return check_memory_region_inline(addr, size, write, ret_ip);
189
}
190

191
192
193
194
195
void kasan_cache_shrink(struct kmem_cache *cache)
{
	quarantine_remove_cache(cache);
}

196
void kasan_cache_shutdown(struct kmem_cache *cache)
197
{
198
199
	if (!__kmem_cache_empty(cache))
		quarantine_remove_cache(cache);
200
201
}

202
203
204
205
static void register_global(struct kasan_global *global)
{
	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);

206
	unpoison_range(global->beg, global->size);
207

208
209
210
	poison_range(global->beg + aligned_size,
		     global->size_with_redzone - aligned_size,
		     KASAN_GLOBAL_REDZONE);
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
}

void __asan_register_globals(struct kasan_global *globals, size_t size)
{
	int i;

	for (i = 0; i < size; i++)
		register_global(&globals[i]);
}
EXPORT_SYMBOL(__asan_register_globals);

void __asan_unregister_globals(struct kasan_global *globals, size_t size)
{
}
EXPORT_SYMBOL(__asan_unregister_globals);

227
228
229
230
231
232
#define DEFINE_ASAN_LOAD_STORE(size)					\
	void __asan_load##size(unsigned long addr)			\
	{								\
		check_memory_region_inline(addr, size, false, _RET_IP_);\
	}								\
	EXPORT_SYMBOL(__asan_load##size);				\
233
	__alias("__asan_load" #size)					\
234
235
236
237
238
239
240
	void __asan_load##size##_noabort(unsigned long);		\
	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
	void __asan_store##size(unsigned long addr)			\
	{								\
		check_memory_region_inline(addr, size, true, _RET_IP_);	\
	}								\
	EXPORT_SYMBOL(__asan_store##size);				\
241
	__alias("__asan_store" #size)					\
242
	void __asan_store##size##_noabort(unsigned long);		\
243
244
245
246
247
248
249
250
251
252
	EXPORT_SYMBOL(__asan_store##size##_noabort)

DEFINE_ASAN_LOAD_STORE(1);
DEFINE_ASAN_LOAD_STORE(2);
DEFINE_ASAN_LOAD_STORE(4);
DEFINE_ASAN_LOAD_STORE(8);
DEFINE_ASAN_LOAD_STORE(16);

void __asan_loadN(unsigned long addr, size_t size)
{
253
	check_memory_region(addr, size, false, _RET_IP_);
254
255
256
}
EXPORT_SYMBOL(__asan_loadN);

257
__alias("__asan_loadN")
258
259
260
261
262
void __asan_loadN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_loadN_noabort);

void __asan_storeN(unsigned long addr, size_t size)
{
263
	check_memory_region(addr, size, true, _RET_IP_);
264
265
266
}
EXPORT_SYMBOL(__asan_storeN);

267
__alias("__asan_storeN")
268
269
270
271
272
273
void __asan_storeN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_storeN_noabort);

/* to shut up compiler complaints */
void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return);
274

275
276
277
278
279
280
281
282
283
284
285
286
287
288
/* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size)
{
	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
			rounded_up_size;
	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);

	const void *left_redzone = (const void *)(addr -
			KASAN_ALLOCA_REDZONE_SIZE);
	const void *right_redzone = (const void *)(addr + rounded_up_size);

	WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));

289
290
291
292
293
294
	unpoison_range((const void *)(addr + rounded_down_size),
		       size - rounded_down_size);
	poison_range(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
		     KASAN_ALLOCA_LEFT);
	poison_range(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
		     KASAN_ALLOCA_RIGHT);
295
296
297
298
299
300
301
302
303
}
EXPORT_SYMBOL(__asan_alloca_poison);

/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
{
	if (unlikely(!stack_top || stack_top > stack_bottom))
		return;

304
	unpoison_range(stack_top, stack_bottom - stack_top);
305
306
307
}
EXPORT_SYMBOL(__asan_allocas_unpoison);

308
309
310
311
312
313
314
315
316
317
318
319
320
321
/* Emitted by the compiler to [un]poison local variables. */
#define DEFINE_ASAN_SET_SHADOW(byte) \
	void __asan_set_shadow_##byte(const void *addr, size_t size)	\
	{								\
		__memset((void *)addr, 0x##byte, size);			\
	}								\
	EXPORT_SYMBOL(__asan_set_shadow_##byte)

DEFINE_ASAN_SET_SHADOW(00);
DEFINE_ASAN_SET_SHADOW(f1);
DEFINE_ASAN_SET_SHADOW(f2);
DEFINE_ASAN_SET_SHADOW(f3);
DEFINE_ASAN_SET_SHADOW(f5);
DEFINE_ASAN_SET_SHADOW(f8);
322
323
324
325
326
327
328
329

void kasan_record_aux_stack(void *addr)
{
	struct page *page = kasan_addr_to_page(addr);
	struct kmem_cache *cache;
	struct kasan_alloc_meta *alloc_info;
	void *object;

330
	if (is_kfence_address(addr) || !(page && PageSlab(page)))
331
332
333
334
335
336
337
338
339
340
341
342
		return;

	cache = page->slab_cache;
	object = nearest_obj(cache, page, addr);
	alloc_info = get_alloc_info(cache, object);

	/*
	 * record the last two call_rcu() call stacks.
	 */
	alloc_info->aux_stack[1] = alloc_info->aux_stack[0];
	alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
}
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364

void kasan_set_free_info(struct kmem_cache *cache,
				void *object, u8 tag)
{
	struct kasan_free_meta *free_meta;

	free_meta = get_free_info(cache, object);
	kasan_set_track(&free_meta->free_track, GFP_NOWAIT);

	/*
	 *  the object was freed and has free track set
	 */
	*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
}

struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
				void *object, u8 tag)
{
	if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
		return NULL;
	return &get_free_info(cache, object)->free_track;
}