1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __MM_KASAN_KASAN_H
3 #define __MM_KASAN_KASAN_H
4
5 #include <linux/kasan.h>
6 #include <linux/kasan-tags.h>
7 #include <linux/kfence.h>
8 #include <linux/stackdepot.h>
9
10 #ifdef CONFIG_KASAN_HW_TAGS
11
12 #include <linux/static_key.h>
13 #include "../slab.h"
14
15 DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
16
17 enum kasan_mode {
18 KASAN_MODE_SYNC,
19 KASAN_MODE_ASYNC,
20 KASAN_MODE_ASYMM,
21 };
22
23 extern enum kasan_mode kasan_mode __ro_after_init;
24
kasan_stack_collection_enabled(void)25 static inline bool kasan_stack_collection_enabled(void)
26 {
27 return static_branch_unlikely(&kasan_flag_stacktrace);
28 }
29
kasan_async_fault_possible(void)30 static inline bool kasan_async_fault_possible(void)
31 {
32 return kasan_mode == KASAN_MODE_ASYNC || kasan_mode == KASAN_MODE_ASYMM;
33 }
34
kasan_sync_fault_possible(void)35 static inline bool kasan_sync_fault_possible(void)
36 {
37 return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
38 }
39 #else
40
kasan_stack_collection_enabled(void)41 static inline bool kasan_stack_collection_enabled(void)
42 {
43 return true;
44 }
45
kasan_async_fault_possible(void)46 static inline bool kasan_async_fault_possible(void)
47 {
48 return false;
49 }
50
kasan_sync_fault_possible(void)51 static inline bool kasan_sync_fault_possible(void)
52 {
53 return true;
54 }
55
56 #endif
57
58 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
59 #define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
60 #else
61 #include <asm/mte-kasan.h>
62 #define KASAN_GRANULE_SIZE MTE_GRANULE_SIZE
63 #endif
64
65 #define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
66
67 #define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
68
69 #ifdef CONFIG_KASAN_GENERIC
70 #define KASAN_FREE_PAGE 0xFF /* page was freed */
71 #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
72 #define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
73 #define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
74 #define KASAN_KMALLOC_FREETRACK 0xFA /* object was freed and has free track set */
75 #else
76 #define KASAN_FREE_PAGE KASAN_TAG_INVALID
77 #define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
78 #define KASAN_KMALLOC_REDZONE KASAN_TAG_INVALID
79 #define KASAN_KMALLOC_FREE KASAN_TAG_INVALID
80 #define KASAN_KMALLOC_FREETRACK KASAN_TAG_INVALID
81 #endif
82
83 #define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
84 #define KASAN_VMALLOC_INVALID 0xF8 /* unallocated space in vmapped page */
85
86 /*
87 * Stack redzone shadow values
88 * (Those are compiler's ABI, don't change them)
89 */
90 #define KASAN_STACK_LEFT 0xF1
91 #define KASAN_STACK_MID 0xF2
92 #define KASAN_STACK_RIGHT 0xF3
93 #define KASAN_STACK_PARTIAL 0xF4
94
95 /*
96 * alloca redzone shadow values
97 */
98 #define KASAN_ALLOCA_LEFT 0xCA
99 #define KASAN_ALLOCA_RIGHT 0xCB
100
101 #define KASAN_ALLOCA_REDZONE_SIZE 32
102
103 /*
104 * Stack frame marker (compiler ABI).
105 */
106 #define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
107
108 /* Don't break randconfig/all*config builds */
109 #ifndef KASAN_ABI_VERSION
110 #define KASAN_ABI_VERSION 1
111 #endif
112
113 /* Metadata layout customization. */
114 #define META_BYTES_PER_BLOCK 1
115 #define META_BLOCKS_PER_ROW 16
116 #define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
117 #define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
118 #define META_ROWS_AROUND_ADDR 2
119
120 struct kasan_access_info {
121 const void *access_addr;
122 const void *first_bad_addr;
123 size_t access_size;
124 bool is_write;
125 unsigned long ip;
126 };
127
128 /* The layout of struct dictated by compiler */
129 struct kasan_source_location {
130 const char *filename;
131 int line_no;
132 int column_no;
133 };
134
135 /* The layout of struct dictated by compiler */
136 struct kasan_global {
137 const void *beg; /* Address of the beginning of the global variable. */
138 size_t size; /* Size of the global variable. */
139 size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
140 const void *name;
141 const void *module_name; /* Name of the module where the global variable is declared. */
142 unsigned long has_dynamic_init; /* This needed for C++ */
143 #if KASAN_ABI_VERSION >= 4
144 struct kasan_source_location *location;
145 #endif
146 #if KASAN_ABI_VERSION >= 5
147 char *odr_indicator;
148 #endif
149 };
150
151 /**
152 * Structures to keep alloc and free tracks *
153 */
154
155 #define KASAN_STACK_DEPTH 64
156
157 struct kasan_track {
158 u32 pid;
159 depot_stack_handle_t stack;
160 };
161
162 #if defined(CONFIG_KASAN_TAGS_IDENTIFY) && defined(CONFIG_KASAN_SW_TAGS)
163 #define KASAN_NR_FREE_STACKS 5
164 #else
165 #define KASAN_NR_FREE_STACKS 1
166 #endif
167
168 struct kasan_alloc_meta {
169 struct kasan_track alloc_track;
170 #ifdef CONFIG_KASAN_GENERIC
171 /*
172 * The auxiliary stack is stored into struct kasan_alloc_meta.
173 * The free stack is stored into struct kasan_free_meta.
174 */
175 depot_stack_handle_t aux_stack[2];
176 #else
177 struct kasan_track free_track[KASAN_NR_FREE_STACKS];
178 #endif
179 #ifdef CONFIG_KASAN_TAGS_IDENTIFY
180 u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
181 u8 free_track_idx;
182 #endif
183 };
184
185 struct qlist_node {
186 struct qlist_node *next;
187 };
188
189 /*
190 * Generic mode either stores free meta in the object itself or in the redzone
191 * after the object. In the former case free meta offset is 0, in the latter
192 * case it has some sane value smaller than INT_MAX. Use INT_MAX as free meta
193 * offset when free meta isn't present.
194 */
195 #define KASAN_NO_FREE_META INT_MAX
196
197 struct kasan_free_meta {
198 #ifdef CONFIG_KASAN_GENERIC
199 /* This field is used while the object is in the quarantine.
200 * Otherwise it might be used for the allocator freelist.
201 */
202 struct qlist_node quarantine_link;
203 struct kasan_track free_track;
204 #endif
205 };
206
207 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
208 const void *object);
209 #ifdef CONFIG_KASAN_GENERIC
210 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
211 const void *object);
212 #endif
213
214 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
215
kasan_shadow_to_mem(const void * shadow_addr)216 static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
217 {
218 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
219 << KASAN_SHADOW_SCALE_SHIFT);
220 }
221
addr_has_metadata(const void * addr)222 static inline bool addr_has_metadata(const void *addr)
223 {
224 return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
225 }
226
227 /**
228 * kasan_check_range - Check memory region, and report if invalid access.
229 * @addr: the accessed address
230 * @size: the accessed size
231 * @write: true if access is a write access
232 * @ret_ip: return address
233 * @return: true if access was valid, false if invalid
234 */
235 bool kasan_check_range(unsigned long addr, size_t size, bool write,
236 unsigned long ret_ip);
237
238 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
239
addr_has_metadata(const void * addr)240 static inline bool addr_has_metadata(const void *addr)
241 {
242 return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
243 }
244
245 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
246
247 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
248 void kasan_print_tags(u8 addr_tag, const void *addr);
249 #else
kasan_print_tags(u8 addr_tag,const void * addr)250 static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
251 #endif
252
253 void *kasan_find_first_bad_addr(void *addr, size_t size);
254 const char *kasan_get_bug_type(struct kasan_access_info *info);
255 void kasan_metadata_fetch_row(char *buffer, void *row);
256
257 #if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
258 void kasan_print_address_stack_frame(const void *addr);
259 #else
kasan_print_address_stack_frame(const void * addr)260 static inline void kasan_print_address_stack_frame(const void *addr) { }
261 #endif
262
263 bool kasan_report(unsigned long addr, size_t size,
264 bool is_write, unsigned long ip);
265 void kasan_report_invalid_free(void *object, unsigned long ip);
266
267 struct page *kasan_addr_to_page(const void *addr);
268
269 depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
270 void kasan_set_track(struct kasan_track *track, gfp_t flags);
271 void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag);
272 struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
273 void *object, u8 tag);
274
275 #if defined(CONFIG_KASAN_GENERIC) && \
276 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
277 bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
278 void kasan_quarantine_reduce(void);
279 void kasan_quarantine_remove_cache(struct kmem_cache *cache);
280 #else
kasan_quarantine_put(struct kmem_cache * cache,void * object)281 static inline bool kasan_quarantine_put(struct kmem_cache *cache, void *object) { return false; }
kasan_quarantine_reduce(void)282 static inline void kasan_quarantine_reduce(void) { }
kasan_quarantine_remove_cache(struct kmem_cache * cache)283 static inline void kasan_quarantine_remove_cache(struct kmem_cache *cache) { }
284 #endif
285
286 #ifndef arch_kasan_set_tag
arch_kasan_set_tag(const void * addr,u8 tag)287 static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
288 {
289 return addr;
290 }
291 #endif
292 #ifndef arch_kasan_get_tag
293 #define arch_kasan_get_tag(addr) 0
294 #endif
295
296 #define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
297 #define get_tag(addr) arch_kasan_get_tag(addr)
298
299 #ifdef CONFIG_KASAN_HW_TAGS
300
301 #ifndef arch_enable_tagging_sync
302 #define arch_enable_tagging_sync()
303 #endif
304 #ifndef arch_enable_tagging_async
305 #define arch_enable_tagging_async()
306 #endif
307 #ifndef arch_enable_tagging_asymm
308 #define arch_enable_tagging_asymm()
309 #endif
310 #ifndef arch_force_async_tag_fault
311 #define arch_force_async_tag_fault()
312 #endif
313 #ifndef arch_get_random_tag
314 #define arch_get_random_tag() (0xFF)
315 #endif
316 #ifndef arch_get_mem_tag
317 #define arch_get_mem_tag(addr) (0xFF)
318 #endif
319 #ifndef arch_set_mem_tag_range
320 #define arch_set_mem_tag_range(addr, size, tag, init) ((void *)(addr))
321 #endif
322
323 #define hw_enable_tagging_sync() arch_enable_tagging_sync()
324 #define hw_enable_tagging_async() arch_enable_tagging_async()
325 #define hw_enable_tagging_asymm() arch_enable_tagging_asymm()
326 #define hw_force_async_tag_fault() arch_force_async_tag_fault()
327 #define hw_get_random_tag() arch_get_random_tag()
328 #define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
329 #define hw_set_mem_tag_range(addr, size, tag, init) \
330 arch_set_mem_tag_range((addr), (size), (tag), (init))
331
332 #else /* CONFIG_KASAN_HW_TAGS */
333
334 #define hw_enable_tagging_sync()
335 #define hw_enable_tagging_async()
336 #define hw_enable_tagging_asymm()
337
338 #endif /* CONFIG_KASAN_HW_TAGS */
339
340 #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
341
342 void kasan_enable_tagging_sync(void);
343 void kasan_force_async_fault(void);
344
345 #else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
346
kasan_enable_tagging_sync(void)347 static inline void kasan_enable_tagging_sync(void) { }
kasan_force_async_fault(void)348 static inline void kasan_force_async_fault(void) { }
349
350 #endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
351
352 #ifdef CONFIG_KASAN_SW_TAGS
353 u8 kasan_random_tag(void);
354 #elif defined(CONFIG_KASAN_HW_TAGS)
kasan_random_tag(void)355 static inline u8 kasan_random_tag(void) { return hw_get_random_tag(); }
356 #else
kasan_random_tag(void)357 static inline u8 kasan_random_tag(void) { return 0; }
358 #endif
359
360 #ifdef CONFIG_KASAN_HW_TAGS
361
kasan_poison(const void * addr,size_t size,u8 value,bool init)362 static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
363 {
364 addr = kasan_reset_tag(addr);
365
366 /* Skip KFENCE memory if called explicitly outside of sl*b. */
367 if (is_kfence_address(addr))
368 return;
369
370 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
371 return;
372 if (WARN_ON(size & KASAN_GRANULE_MASK))
373 return;
374
375 hw_set_mem_tag_range((void *)addr, size, value, init);
376 }
377
kasan_unpoison(const void * addr,size_t size,bool init)378 static inline void kasan_unpoison(const void *addr, size_t size, bool init)
379 {
380 u8 tag = get_tag(addr);
381
382 addr = kasan_reset_tag(addr);
383
384 /* Skip KFENCE memory if called explicitly outside of sl*b. */
385 if (is_kfence_address(addr))
386 return;
387
388 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
389 return;
390 /*
391 * Explicitly initialize the memory with the precise object size to
392 * avoid overwriting the SLAB redzone. This disables initialization in
393 * the arch code and may thus lead to performance penalty. The penalty
394 * is accepted since SLAB redzones aren't enabled in production builds.
395 */
396 if (__slub_debug_enabled() &&
397 init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
398 init = false;
399 memzero_explicit((void *)addr, size);
400 }
401 size = round_up(size, KASAN_GRANULE_SIZE);
402
403 hw_set_mem_tag_range((void *)addr, size, tag, init);
404 }
405
kasan_byte_accessible(const void * addr)406 static inline bool kasan_byte_accessible(const void *addr)
407 {
408 u8 ptr_tag = get_tag(addr);
409 u8 mem_tag = hw_get_mem_tag((void *)addr);
410
411 return ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag;
412 }
413
414 #else /* CONFIG_KASAN_HW_TAGS */
415
416 /**
417 * kasan_poison - mark the memory range as inaccessible
418 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
419 * @size - range size, must be aligned to KASAN_GRANULE_SIZE
420 * @value - value that's written to metadata for the range
421 * @init - whether to initialize the memory range (only for hardware tag-based)
422 *
423 * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
424 */
425 void kasan_poison(const void *addr, size_t size, u8 value, bool init);
426
427 /**
428 * kasan_unpoison - mark the memory range as accessible
429 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
430 * @size - range size, can be unaligned
431 * @init - whether to initialize the memory range (only for hardware tag-based)
432 *
433 * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
434 * marking the range.
435 * For the generic mode, the last granule of the memory range gets partially
436 * unpoisoned based on the @size.
437 */
438 void kasan_unpoison(const void *addr, size_t size, bool init);
439
440 bool kasan_byte_accessible(const void *addr);
441
442 #endif /* CONFIG_KASAN_HW_TAGS */
443
444 #ifdef CONFIG_KASAN_GENERIC
445
446 /**
447 * kasan_poison_last_granule - mark the last granule of the memory range as
448 * inaccessible
449 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
450 * @size - range size
451 *
452 * This function is only available for the generic mode, as it's the only mode
453 * that has partially poisoned memory granules.
454 */
455 void kasan_poison_last_granule(const void *address, size_t size);
456
457 #else /* CONFIG_KASAN_GENERIC */
458
kasan_poison_last_granule(const void * address,size_t size)459 static inline void kasan_poison_last_granule(const void *address, size_t size) { }
460
461 #endif /* CONFIG_KASAN_GENERIC */
462
463 #ifndef kasan_arch_is_ready
kasan_arch_is_ready(void)464 static inline bool kasan_arch_is_ready(void) { return true; }
465 #elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE)
466 #error kasan_arch_is_ready only works in KASAN generic outline mode!
467 #endif
468
469 /*
470 * Exported functions for interfaces called from assembly or from generated
471 * code. Declarations here to avoid warning about missing declarations.
472 */
473 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
474 void __asan_register_globals(struct kasan_global *globals, size_t size);
475 void __asan_unregister_globals(struct kasan_global *globals, size_t size);
476 void __asan_handle_no_return(void);
477 void __asan_alloca_poison(unsigned long addr, size_t size);
478 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
479
480 void __asan_load1(unsigned long addr);
481 void __asan_store1(unsigned long addr);
482 void __asan_load2(unsigned long addr);
483 void __asan_store2(unsigned long addr);
484 void __asan_load4(unsigned long addr);
485 void __asan_store4(unsigned long addr);
486 void __asan_load8(unsigned long addr);
487 void __asan_store8(unsigned long addr);
488 void __asan_load16(unsigned long addr);
489 void __asan_store16(unsigned long addr);
490 void __asan_loadN(unsigned long addr, size_t size);
491 void __asan_storeN(unsigned long addr, size_t size);
492
493 void __asan_load1_noabort(unsigned long addr);
494 void __asan_store1_noabort(unsigned long addr);
495 void __asan_load2_noabort(unsigned long addr);
496 void __asan_store2_noabort(unsigned long addr);
497 void __asan_load4_noabort(unsigned long addr);
498 void __asan_store4_noabort(unsigned long addr);
499 void __asan_load8_noabort(unsigned long addr);
500 void __asan_store8_noabort(unsigned long addr);
501 void __asan_load16_noabort(unsigned long addr);
502 void __asan_store16_noabort(unsigned long addr);
503 void __asan_loadN_noabort(unsigned long addr, size_t size);
504 void __asan_storeN_noabort(unsigned long addr, size_t size);
505
506 void __asan_report_load1_noabort(unsigned long addr);
507 void __asan_report_store1_noabort(unsigned long addr);
508 void __asan_report_load2_noabort(unsigned long addr);
509 void __asan_report_store2_noabort(unsigned long addr);
510 void __asan_report_load4_noabort(unsigned long addr);
511 void __asan_report_store4_noabort(unsigned long addr);
512 void __asan_report_load8_noabort(unsigned long addr);
513 void __asan_report_store8_noabort(unsigned long addr);
514 void __asan_report_load16_noabort(unsigned long addr);
515 void __asan_report_store16_noabort(unsigned long addr);
516 void __asan_report_load_n_noabort(unsigned long addr, size_t size);
517 void __asan_report_store_n_noabort(unsigned long addr, size_t size);
518
519 void __asan_set_shadow_00(const void *addr, size_t size);
520 void __asan_set_shadow_f1(const void *addr, size_t size);
521 void __asan_set_shadow_f2(const void *addr, size_t size);
522 void __asan_set_shadow_f3(const void *addr, size_t size);
523 void __asan_set_shadow_f5(const void *addr, size_t size);
524 void __asan_set_shadow_f8(const void *addr, size_t size);
525
526 void __hwasan_load1_noabort(unsigned long addr);
527 void __hwasan_store1_noabort(unsigned long addr);
528 void __hwasan_load2_noabort(unsigned long addr);
529 void __hwasan_store2_noabort(unsigned long addr);
530 void __hwasan_load4_noabort(unsigned long addr);
531 void __hwasan_store4_noabort(unsigned long addr);
532 void __hwasan_load8_noabort(unsigned long addr);
533 void __hwasan_store8_noabort(unsigned long addr);
534 void __hwasan_load16_noabort(unsigned long addr);
535 void __hwasan_store16_noabort(unsigned long addr);
536 void __hwasan_loadN_noabort(unsigned long addr, size_t size);
537 void __hwasan_storeN_noabort(unsigned long addr, size_t size);
538
539 void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size);
540
541 #endif
542