kasan.h 7.52 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
3
4
5
6
7
8
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H

#include <linux/types.h>

struct kmem_cache;
struct page;
9
struct vm_struct;
10
struct task_struct;
11
12
13

#ifdef CONFIG_KASAN

14
#include <asm/kasan.h>
15

Patricia Alfonso's avatar
Patricia Alfonso committed
16
17
18
19
20
21
/* kasan_data struct is used in KUnit tests for KASAN expected failures */
struct kunit_kasan_expectation {
	bool report_expected;
	bool report_found;
};

22
23
24
25
26
27
28
29
30
31
32
33
34
35
#endif

#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)

#include <linux/pgtable.h>

/* Software KASAN implementations use shadow memory. */

#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_SHADOW_INIT 0xFF
#else
#define KASAN_SHADOW_INIT 0
#endif

36
37
38
39
40
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
41

42
int kasan_populate_early_shadow(const void *shadow_start,
43
44
				const void *shadow_end);

45
46
47
48
49
50
static inline void *kasan_mem_to_shadow(const void *addr)
{
	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
		+ KASAN_SHADOW_OFFSET;
}

51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);

#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */

static inline int kasan_add_zero_shadow(void *start, unsigned long size)
{
	return 0;
}
static inline void kasan_remove_zero_shadow(void *start,
					unsigned long size)
{}

#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */

#ifdef CONFIG_KASAN

68
/* Enable reporting bugs after kasan_disable_current() */
69
extern void kasan_enable_current(void);
70
71

/* Disable reporting bugs for current task */
72
extern void kasan_disable_current(void);
73
74
75

void kasan_unpoison_shadow(const void *address, size_t size);

76
77
void kasan_unpoison_task_stack(struct task_struct *task);

78
79
80
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);

81
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
82
			slab_flags_t *flags);
Alexander Potapenko's avatar
Alexander Potapenko committed
83

84
85
86
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
87
88
void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
					const void *object);
89

90
91
void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
						gfp_t flags);
92
void kasan_kfree_large(void *ptr, unsigned long ip);
93
void kasan_poison_kfree(void *ptr, unsigned long ip);
94
95
96
97
void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
					size_t size, gfp_t flags);
void * __must_check kasan_krealloc(const void *object, size_t new_size,
					gfp_t flags);
98

99
100
void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
					gfp_t flags);
101
bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
102

Alexander Potapenko's avatar
Alexander Potapenko committed
103
104
105
106
107
struct kasan_cache {
	int alloc_meta_offset;
	int free_meta_offset;
};

108
109
110
111
112
size_t __ksize(const void *);
static inline void kasan_unpoison_slab(const void *ptr)
{
	kasan_unpoison_shadow(ptr, __ksize(ptr));
}
113
size_t kasan_metadata_size(struct kmem_cache *cache);
114

115
116
117
bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);

118
119
120
121
#else /* CONFIG_KASAN */

static inline void kasan_unpoison_shadow(const void *address, size_t size) {}

122
123
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}

124
125
126
static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}

127
128
129
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}

Alexander Potapenko's avatar
Alexander Potapenko committed
130
static inline void kasan_cache_create(struct kmem_cache *cache,
131
				      unsigned int *size,
132
				      slab_flags_t *flags) {}
Alexander Potapenko's avatar
Alexander Potapenko committed
133

134
135
136
137
138
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
					void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
					void *object) {}
139
140
141
142
143
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
				const void *object)
{
	return (void *)object;
}
144

145
146
147
148
static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
{
	return ptr;
}
149
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
150
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
151
152
153
154
155
156
157
158
159
160
static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
				size_t size, gfp_t flags)
{
	return (void *)object;
}
static inline void *kasan_krealloc(const void *object, size_t new_size,
				 gfp_t flags)
{
	return (void *)object;
}
161

162
163
164
165
166
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
				   gfp_t flags)
{
	return object;
}
167
168
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
				   unsigned long ip)
169
170
171
{
	return false;
}
172

173
static inline void kasan_unpoison_slab(const void *ptr) { }
174
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
175

176
177
#endif /* CONFIG_KASAN */

178
179
180
181
#ifdef CONFIG_KASAN_GENERIC

void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
182
void kasan_record_aux_stack(void *ptr);
183
184
185
186
187

#else /* CONFIG_KASAN_GENERIC */

static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
188
static inline void kasan_record_aux_stack(void *ptr) {}
189
190
191

#endif /* CONFIG_KASAN_GENERIC */

192
193
#ifdef CONFIG_KASAN_SW_TAGS

194
195
196
197
void kasan_init_tags(void);

void *kasan_reset_tag(const void *addr);

198
bool kasan_report(unsigned long addr, size_t size,
199
200
		bool is_write, unsigned long ip);

201
202
203
204
205
206
207
208
209
#else /* CONFIG_KASAN_SW_TAGS */

static inline void kasan_init_tags(void) { }

static inline void *kasan_reset_tag(const void *addr)
{
	return (void *)addr;
}

210
211
#endif /* CONFIG_KASAN_SW_TAGS */

212
#ifdef CONFIG_KASAN_VMALLOC
Andrey Konovalov's avatar
Andrey Konovalov committed
213

214
215
216
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
void kasan_poison_vmalloc(const void *start, unsigned long size);
void kasan_unpoison_vmalloc(const void *start, unsigned long size);
217
218
219
void kasan_release_vmalloc(unsigned long start, unsigned long end,
			   unsigned long free_region_start,
			   unsigned long free_region_end);
Andrey Konovalov's avatar
Andrey Konovalov committed
220
221
222

#else /* CONFIG_KASAN_VMALLOC */

223
224
static inline int kasan_populate_vmalloc(unsigned long start,
					unsigned long size)
225
226
227
228
{
	return 0;
}

229
230
231
232
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
{ }
static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
{ }
233
234
235
236
static inline void kasan_release_vmalloc(unsigned long start,
					 unsigned long end,
					 unsigned long free_region_start,
					 unsigned long free_region_end) {}
Andrey Konovalov's avatar
Andrey Konovalov committed
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255

#endif /* CONFIG_KASAN_VMALLOC */

#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)

/*
 * These functions provide a special case to support backing module
 * allocations with real shadow memory. With KASAN vmalloc, the special
 * case is unnecessary, as the work is handled in the generic case.
 */
int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm);

#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */

static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}

#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
256

257
258
259
260
261
262
#ifdef CONFIG_KASAN_INLINE
void kasan_non_canonical_hook(unsigned long addr);
#else /* CONFIG_KASAN_INLINE */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
#endif /* CONFIG_KASAN_INLINE */

263
#endif /* LINUX_KASAN_H */