1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/kasan-enabled.h>
7 #include <linux/kernel.h>
8 #include <linux/static_key.h>
9 #include <linux/types.h>
19 #include <linux/linkage.h>
20 #include <asm/kasan.h>
24 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26 #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
27 #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
28 #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
29 #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
31 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
33 #include <linux/pgtable.h>
35 /* Software KASAN implementations use shadow memory. */
37 #ifdef CONFIG_KASAN_SW_TAGS
38 /* This matches KASAN_TAG_INVALID. */
39 #define KASAN_SHADOW_INIT 0xFE
41 #define KASAN_SHADOW_INIT 0
44 #ifndef PTE_HWTABLE_PTRS
45 #define PTE_HWTABLE_PTRS 0
48 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
49 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
50 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
51 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
52 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
54 int kasan_populate_early_shadow(const void *shadow_start,
55 const void *shadow_end);
57 #ifndef __HAVE_ARCH_SHADOW_MAP
58 static inline void *kasan_mem_to_shadow(const void *addr)
60 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
61 + KASAN_SHADOW_OFFSET;
65 int kasan_add_zero_shadow(void *start, unsigned long size);
66 void kasan_remove_zero_shadow(void *start, unsigned long size);
68 /* Enable reporting bugs after kasan_disable_current() */
69 extern void kasan_enable_current(void);
71 /* Disable reporting bugs for current task */
72 extern void kasan_disable_current(void);
74 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
76 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
80 static inline void kasan_remove_zero_shadow(void *start,
84 static inline void kasan_enable_current(void) {}
85 static inline void kasan_disable_current(void) {}
87 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
89 #ifdef CONFIG_KASAN_HW_TAGS
91 #else /* CONFIG_KASAN_HW_TAGS */
93 #endif /* CONFIG_KASAN_HW_TAGS */
95 static inline bool kasan_has_integrated_init(void)
97 return kasan_hw_tags_enabled();
101 void __kasan_unpoison_range(const void *addr, size_t size);
102 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
105 __kasan_unpoison_range(addr, size);
108 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
109 static __always_inline void kasan_poison_pages(struct page *page,
110 unsigned int order, bool init)
113 __kasan_poison_pages(page, order, init);
116 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
117 static __always_inline bool kasan_unpoison_pages(struct page *page,
118 unsigned int order, bool init)
121 return __kasan_unpoison_pages(page, order, init);
125 void __kasan_poison_slab(struct slab *slab);
126 static __always_inline void kasan_poison_slab(struct slab *slab)
129 __kasan_poison_slab(slab);
132 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
133 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
137 __kasan_unpoison_object_data(cache, object);
140 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
141 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
145 __kasan_poison_object_data(cache, object);
148 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
150 static __always_inline void * __must_check kasan_init_slab_obj(
151 struct kmem_cache *cache, const void *object)
154 return __kasan_init_slab_obj(cache, object);
155 return (void *)object;
158 bool __kasan_slab_free(struct kmem_cache *s, void *object,
159 unsigned long ip, bool init);
160 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
161 void *object, bool init)
164 return __kasan_slab_free(s, object, _RET_IP_, init);
168 void __kasan_kfree_large(void *ptr, unsigned long ip);
169 static __always_inline void kasan_kfree_large(void *ptr)
172 __kasan_kfree_large(ptr, _RET_IP_);
175 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
176 static __always_inline void kasan_slab_free_mempool(void *ptr)
179 __kasan_slab_free_mempool(ptr, _RET_IP_);
182 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
183 void *object, gfp_t flags, bool init);
184 static __always_inline void * __must_check kasan_slab_alloc(
185 struct kmem_cache *s, void *object, gfp_t flags, bool init)
188 return __kasan_slab_alloc(s, object, flags, init);
192 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
193 size_t size, gfp_t flags);
194 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
195 const void *object, size_t size, gfp_t flags)
198 return __kasan_kmalloc(s, object, size, flags);
199 return (void *)object;
202 void * __must_check __kasan_kmalloc_large(const void *ptr,
203 size_t size, gfp_t flags);
204 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
205 size_t size, gfp_t flags)
208 return __kasan_kmalloc_large(ptr, size, flags);
212 void * __must_check __kasan_krealloc(const void *object,
213 size_t new_size, gfp_t flags);
214 static __always_inline void * __must_check kasan_krealloc(const void *object,
215 size_t new_size, gfp_t flags)
218 return __kasan_krealloc(object, new_size, flags);
219 return (void *)object;
223 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
224 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
226 bool __kasan_check_byte(const void *addr, unsigned long ip);
227 static __always_inline bool kasan_check_byte(const void *addr)
230 return __kasan_check_byte(addr, _RET_IP_);
234 #else /* CONFIG_KASAN */
236 static inline void kasan_unpoison_range(const void *address, size_t size) {}
237 static inline void kasan_poison_pages(struct page *page, unsigned int order,
239 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
244 static inline void kasan_poison_slab(struct slab *slab) {}
245 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
247 static inline void kasan_poison_object_data(struct kmem_cache *cache,
249 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
252 return (void *)object;
254 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
258 static inline void kasan_kfree_large(void *ptr) {}
259 static inline void kasan_slab_free_mempool(void *ptr) {}
260 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
261 gfp_t flags, bool init)
265 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
266 size_t size, gfp_t flags)
268 return (void *)object;
270 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
274 static inline void *kasan_krealloc(const void *object, size_t new_size,
277 return (void *)object;
279 static inline bool kasan_check_byte(const void *address)
284 #endif /* CONFIG_KASAN */
286 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
287 void kasan_unpoison_task_stack(struct task_struct *task);
289 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
292 #ifdef CONFIG_KASAN_GENERIC
295 int alloc_meta_offset;
296 int free_meta_offset;
299 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
300 slab_flags_t kasan_never_merge(void);
301 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
302 slab_flags_t *flags);
304 void kasan_cache_shrink(struct kmem_cache *cache);
305 void kasan_cache_shutdown(struct kmem_cache *cache);
306 void kasan_record_aux_stack(void *ptr);
307 void kasan_record_aux_stack_noalloc(void *ptr);
309 #else /* CONFIG_KASAN_GENERIC */
311 /* Tag-based KASAN modes do not use per-object metadata. */
312 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
317 /* And thus nothing prevents cache merging. */
318 static inline slab_flags_t kasan_never_merge(void)
322 /* And no cache-related metadata initialization is required. */
323 static inline void kasan_cache_create(struct kmem_cache *cache,
325 slab_flags_t *flags) {}
327 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
328 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
329 static inline void kasan_record_aux_stack(void *ptr) {}
330 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
332 #endif /* CONFIG_KASAN_GENERIC */
334 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
336 static inline void *kasan_reset_tag(const void *addr)
338 return (void *)arch_kasan_reset_tag(addr);
342 * kasan_report - print a report about a bad memory access detected by KASAN
343 * @addr: address of the bad access
344 * @size: size of the bad access
345 * @is_write: whether the bad access is a write or a read
346 * @ip: instruction pointer for the accessibility check or the bad access itself
348 bool kasan_report(const void *addr, size_t size,
349 bool is_write, unsigned long ip);
351 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
353 static inline void *kasan_reset_tag(const void *addr)
358 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
360 #ifdef CONFIG_KASAN_HW_TAGS
362 void kasan_report_async(void);
364 #endif /* CONFIG_KASAN_HW_TAGS */
366 #ifdef CONFIG_KASAN_SW_TAGS
367 void __init kasan_init_sw_tags(void);
369 static inline void kasan_init_sw_tags(void) { }
372 #ifdef CONFIG_KASAN_HW_TAGS
373 void kasan_init_hw_tags_cpu(void);
374 void __init kasan_init_hw_tags(void);
376 static inline void kasan_init_hw_tags_cpu(void) { }
377 static inline void kasan_init_hw_tags(void) { }
380 #ifdef CONFIG_KASAN_VMALLOC
382 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
384 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
385 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
386 void kasan_release_vmalloc(unsigned long start, unsigned long end,
387 unsigned long free_region_start,
388 unsigned long free_region_end);
390 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
392 static inline void kasan_populate_early_vm_area_shadow(void *start,
395 static inline int kasan_populate_vmalloc(unsigned long start,
400 static inline void kasan_release_vmalloc(unsigned long start,
402 unsigned long free_region_start,
403 unsigned long free_region_end) { }
405 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
407 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
408 kasan_vmalloc_flags_t flags);
409 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
411 kasan_vmalloc_flags_t flags)
414 return __kasan_unpoison_vmalloc(start, size, flags);
415 return (void *)start;
418 void __kasan_poison_vmalloc(const void *start, unsigned long size);
419 static __always_inline void kasan_poison_vmalloc(const void *start,
423 __kasan_poison_vmalloc(start, size);
426 #else /* CONFIG_KASAN_VMALLOC */
428 static inline void kasan_populate_early_vm_area_shadow(void *start,
429 unsigned long size) { }
430 static inline int kasan_populate_vmalloc(unsigned long start,
435 static inline void kasan_release_vmalloc(unsigned long start,
437 unsigned long free_region_start,
438 unsigned long free_region_end) { }
440 static inline void *kasan_unpoison_vmalloc(const void *start,
442 kasan_vmalloc_flags_t flags)
444 return (void *)start;
446 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
449 #endif /* CONFIG_KASAN_VMALLOC */
451 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
452 !defined(CONFIG_KASAN_VMALLOC)
455 * These functions allocate and free shadow memory for kernel modules.
456 * They are only required when KASAN_VMALLOC is not supported, as otherwise
457 * shadow memory is allocated by the generic vmalloc handlers.
459 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
460 void kasan_free_module_shadow(const struct vm_struct *vm);
462 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
464 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
465 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
467 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
469 #ifdef CONFIG_KASAN_INLINE
470 void kasan_non_canonical_hook(unsigned long addr);
471 #else /* CONFIG_KASAN_INLINE */
472 static inline void kasan_non_canonical_hook(unsigned long addr) { }
473 #endif /* CONFIG_KASAN_INLINE */
475 #endif /* LINUX_KASAN_H */