gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

common.c 11.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common KASAN code.
4
5
6
7
8
9
10
11
12
13
14
15
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
 *
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
 *        Andrey Konovalov <andreyknvl@gmail.com>
 */

#include <linux/export.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
16
#include <linux/kfence.h>
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#include <linux/linkage.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>

#include "kasan.h"
#include "../slab.h"

34
depot_stack_handle_t kasan_save_stack(gfp_t flags)
35
36
{
	unsigned long entries[KASAN_STACK_DEPTH];
37
	unsigned int nr_entries;
38

39
40
41
	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
	nr_entries = filter_irq_stacks(entries, nr_entries);
	return stack_depot_save(entries, nr_entries, flags);
42
43
}

44
void kasan_set_track(struct kasan_track *track, gfp_t flags)
45
46
{
	track->pid = current->pid;
47
	track->stack = kasan_save_stack(flags);
48
49
}

50
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
51
52
53
54
55
56
57
58
59
void kasan_enable_current(void)
{
	current->kasan_depth++;
}

void kasan_disable_current(void)
{
	current->kasan_depth--;
}
60
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
61

62
63
64
65
66
void kasan_unpoison_range(const void *address, size_t size)
{
	unpoison_range(address, size);
}

67
68
69
70
71
static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
{
	void *base = task_stack_page(task);
	size_t size = sp - base;

72
	unpoison_range(base, size);
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
}

/* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack(struct task_struct *task)
{
	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
}

/* Unpoison the stack for the current task beyond a watermark sp value. */
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{
	/*
	 * Calculate the task stack base address.  Avoid using 'current'
	 * because this function is called by early resume code which hasn't
	 * yet set up the percpu register (%gs).
	 */
	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));

91
	unpoison_range(base, watermark - base);
92
93
94
95
}

void kasan_alloc_pages(struct page *page, unsigned int order)
{
96
97
98
	u8 tag;
	unsigned long i;

99
100
	if (unlikely(PageHighMem(page)))
		return;
101
102
103
104

	tag = random_tag();
	for (i = 0; i < (1 << order); i++)
		page_kasan_tag_set(page + i, tag);
105
	unpoison_range(page_address(page), PAGE_SIZE << order);
106
107
108
109
110
}

void kasan_free_pages(struct page *page, unsigned int order)
{
	if (likely(!PageHighMem(page)))
111
		poison_range(page_address(page),
112
113
114
115
116
117
118
119
120
121
				PAGE_SIZE << order,
				KASAN_FREE_PAGE);
}

/*
 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
 * For larger allocations larger redzones are used.
 */
static inline unsigned int optimal_redzone(unsigned int object_size)
{
122
123
124
	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
		return 0;

125
126
127
128
129
130
131
132
133
134
135
136
137
138
	return
		object_size <= 64        - 16   ? 16 :
		object_size <= 128       - 32   ? 32 :
		object_size <= 512       - 64   ? 64 :
		object_size <= 4096      - 128  ? 128 :
		object_size <= (1 << 14) - 256  ? 256 :
		object_size <= (1 << 15) - 512  ? 512 :
		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
}

void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
			slab_flags_t *flags)
{
	unsigned int orig_size = *size;
139
	unsigned int redzone_size;
140
141
142
143
144
145
146
	int redzone_adjust;

	/* Add alloc meta. */
	cache->kasan_info.alloc_meta_offset = *size;
	*size += sizeof(struct kasan_alloc_meta);

	/* Add free meta. */
147
148
149
	if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
	    (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
	     cache->object_size < sizeof(struct kasan_free_meta))) {
150
151
152
153
		cache->kasan_info.free_meta_offset = *size;
		*size += sizeof(struct kasan_free_meta);
	}

154
155
	redzone_size = optimal_redzone(cache->object_size);
	redzone_adjust = redzone_size -	(*size - cache->object_size);
156
157
158
159
	if (redzone_adjust > 0)
		*size += redzone_adjust;

	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
160
			max(*size, cache->object_size + redzone_size));
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198

	/*
	 * If the metadata doesn't fit, don't enable KASAN at all.
	 */
	if (*size <= cache->kasan_info.alloc_meta_offset ||
			*size <= cache->kasan_info.free_meta_offset) {
		cache->kasan_info.alloc_meta_offset = 0;
		cache->kasan_info.free_meta_offset = 0;
		*size = orig_size;
		return;
	}

	*flags |= SLAB_KASAN;
}

size_t kasan_metadata_size(struct kmem_cache *cache)
{
	return (cache->kasan_info.alloc_meta_offset ?
		sizeof(struct kasan_alloc_meta) : 0) +
		(cache->kasan_info.free_meta_offset ?
		sizeof(struct kasan_free_meta) : 0);
}

struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
					const void *object)
{
	return (void *)object + cache->kasan_info.alloc_meta_offset;
}

struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
				      const void *object)
{
	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
	return (void *)object + cache->kasan_info.free_meta_offset;
}

void kasan_poison_slab(struct page *page)
{
199
200
	unsigned long i;

201
	for (i = 0; i < compound_nr(page); i++)
202
		page_kasan_tag_reset(page + i);
203
204
	poison_range(page_address(page), page_size(page),
		     KASAN_KMALLOC_REDZONE);
205
206
207
208
}

void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
209
	unpoison_range(object, cache->object_size);
210
211
212
213
}

void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
214
	poison_range(object,
215
			round_up(cache->object_size, KASAN_GRANULE_SIZE),
216
217
218
			KASAN_KMALLOC_REDZONE);
}

219
/*
220
221
222
223
224
225
226
227
228
229
230
231
 * This function assigns a tag to an object considering the following:
 * 1. A cache might have a constructor, which might save a pointer to a slab
 *    object somewhere (e.g. in the object itself). We preassign a tag for
 *    each object in caches with constructors during slab creation and reuse
 *    the same tag each time a particular object is allocated.
 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
 *    accessed after being freed. We preassign tags for objects in these
 *    caches as well.
 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
 *    is stored as an array of indexes instead of a linked list. Assign tags
 *    based on objects indexes, so that objects that are next to each other
 *    get different tags.
232
 */
233
static u8 assign_tag(struct kmem_cache *cache, const void *object,
234
			bool init, bool keep_tag)
235
{
236
237
238
239
240
241
242
	/*
	 * 1. When an object is kmalloc()'ed, two hooks are called:
	 *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
	 *    tag only in the first one.
	 * 2. We reuse the same tag for krealloc'ed objects.
	 */
	if (keep_tag)
243
244
245
246
247
248
		return get_tag(object);

	/*
	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
	 * set, assign a tag when the object is being allocated (init == false).
	 */
249
	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
250
		return init ? KASAN_TAG_KERNEL : random_tag();
251

252
	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
253
#ifdef CONFIG_SLAB
254
	/* For SLAB assign tags based on the object index in the freelist. */
255
256
	return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
#else
257
258
259
260
261
	/*
	 * For SLUB assign a random tag during slab creation, otherwise reuse
	 * the already assigned tag.
	 */
	return init ? random_tag() : get_tag(object);
262
263
264
#endif
}

265
266
void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
						const void *object)
267
268
269
270
271
272
273
274
275
{
	struct kasan_alloc_meta *alloc_info;

	if (!(cache->flags & SLAB_KASAN))
		return (void *)object;

	alloc_info = get_alloc_info(cache, object);
	__memset(alloc_info, 0, sizeof(*alloc_info));

276
	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
277
278
		object = set_tag(object,
				assign_tag(cache, object, true, false));
279

280
281
282
283
284
285
	return (void *)object;
}

static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
			      unsigned long ip, bool quarantine)
{
286
287
	u8 tag;
	void *tagged_object;
288
289
	unsigned long rounded_up_size;

290
291
292
293
	tag = get_tag(object);
	tagged_object = object;
	object = reset_tag(object);

294
295
296
	if (is_kfence_address(object))
		return false;

297
298
	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
	    object)) {
299
		kasan_report_invalid_free(tagged_object, ip);
300
301
302
303
304
305
306
		return true;
	}

	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
		return false;

307
	if (check_invalid_free(tagged_object)) {
308
		kasan_report_invalid_free(tagged_object, ip);
309
310
311
		return true;
	}

312
	rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
313
	poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
314

315
316
	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
			unlikely(!(cache->flags & SLAB_KASAN)))
317
318
		return false;

319
320
	kasan_set_free_info(cache, object, tag);

321
	quarantine_put(get_free_info(cache, object), cache);
322
323

	return IS_ENABLED(CONFIG_KASAN_GENERIC);
324
325
326
327
328
329
330
}

bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
{
	return __kasan_slab_free(cache, object, ip, true);
}

331
static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
332
				size_t size, gfp_t flags, bool keep_tag)
333
334
335
{
	unsigned long redzone_start;
	unsigned long redzone_end;
336
	u8 tag = 0xff;
337
338
339
340
341
342
343

	if (gfpflags_allow_blocking(flags))
		quarantine_reduce();

	if (unlikely(object == NULL))
		return NULL;

344
345
346
	if (is_kfence_address(object))
		return (void *)object;

347
	redzone_start = round_up((unsigned long)(object + size),
348
				KASAN_GRANULE_SIZE);
349
	redzone_end = round_up((unsigned long)object + cache->object_size,
350
				KASAN_GRANULE_SIZE);
351

352
	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
353
		tag = assign_tag(cache, object, false, keep_tag);
354
355

	/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
356
357
358
	unpoison_range(set_tag(object, tag), size);
	poison_range((void *)redzone_start, redzone_end - redzone_start,
		     KASAN_KMALLOC_REDZONE);
359
360

	if (cache->flags & SLAB_KASAN)
361
		kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
362

363
	return set_tag(object, tag);
364
}
365

366
367
368
369
370
371
void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
					gfp_t flags)
{
	return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
}

372
373
374
void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
				size_t size, gfp_t flags)
{
375
	return __kasan_kmalloc(cache, object, size, flags, true);
376
}
377
378
EXPORT_SYMBOL(kasan_kmalloc);

379
380
void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
						gfp_t flags)
381
382
383
384
385
386
387
388
389
390
391
392
393
{
	struct page *page;
	unsigned long redzone_start;
	unsigned long redzone_end;

	if (gfpflags_allow_blocking(flags))
		quarantine_reduce();

	if (unlikely(ptr == NULL))
		return NULL;

	page = virt_to_page(ptr);
	redzone_start = round_up((unsigned long)(ptr + size),
394
				KASAN_GRANULE_SIZE);
395
	redzone_end = (unsigned long)ptr + page_size(page);
396

397
398
399
	unpoison_range(ptr, size);
	poison_range((void *)redzone_start, redzone_end - redzone_start,
		     KASAN_PAGE_REDZONE);
400
401
402
403

	return (void *)ptr;
}

404
void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
405
406
407
408
409
410
411
412
413
414
415
{
	struct page *page;

	if (unlikely(object == ZERO_SIZE_PTR))
		return (void *)object;

	page = virt_to_head_page(object);

	if (unlikely(!PageSlab(page)))
		return kasan_kmalloc_large(object, size, flags);
	else
416
417
		return __kasan_kmalloc(page->slab_cache, object, size,
						flags, true);
418
419
420
421
422
423
424
425
426
}

void kasan_poison_kfree(void *ptr, unsigned long ip)
{
	struct page *page;

	page = virt_to_head_page(ptr);

	if (unlikely(!PageSlab(page))) {
427
		if (ptr != page_address(page)) {
428
429
430
			kasan_report_invalid_free(ptr, ip);
			return;
		}
431
		poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
432
433
434
435
436
437
438
	} else {
		__kasan_slab_free(page->slab_cache, ptr, ip, false);
	}
}

void kasan_kfree_large(void *ptr, unsigned long ip)
{
439
	if (ptr != page_address(virt_to_head_page(ptr)))
440
441
442
		kasan_report_invalid_free(ptr, ip);
	/* The object will be poisoned by page_alloc. */
}