1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/kasan-enabled.h>
7 #include <linux/kernel.h>
8 #include <linux/static_key.h>
9 #include <linux/types.h>
19 #include <linux/linkage.h>
20 #include <asm/kasan.h>
24 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26 #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
27 #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
28 #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
29 #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
31 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
33 #include <linux/pgtable.h>
35 /* Software KASAN implementations use shadow memory. */
37 #ifdef CONFIG_KASAN_SW_TAGS
38 /* This matches KASAN_TAG_INVALID. */
39 #define KASAN_SHADOW_INIT 0xFE
41 #define KASAN_SHADOW_INIT 0
44 #ifndef PTE_HWTABLE_PTRS
45 #define PTE_HWTABLE_PTRS 0
48 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
49 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
50 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
51 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
52 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
54 int kasan_populate_early_shadow(const void *shadow_start,
55 const void *shadow_end);
57 static inline void *kasan_mem_to_shadow(const void *addr)
59 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
60 + KASAN_SHADOW_OFFSET;
63 int kasan_add_zero_shadow(void *start, unsigned long size);
64 void kasan_remove_zero_shadow(void *start, unsigned long size);
66 /* Enable reporting bugs after kasan_disable_current() */
67 extern void kasan_enable_current(void);
69 /* Disable reporting bugs for current task */
70 extern void kasan_disable_current(void);
72 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
74 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
78 static inline void kasan_remove_zero_shadow(void *start,
82 static inline void kasan_enable_current(void) {}
83 static inline void kasan_disable_current(void) {}
85 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
87 #ifdef CONFIG_KASAN_HW_TAGS
89 #else /* CONFIG_KASAN_HW_TAGS */
91 #endif /* CONFIG_KASAN_HW_TAGS */
93 static inline bool kasan_has_integrated_init(void)
95 return kasan_hw_tags_enabled();
101 #ifdef CONFIG_KASAN_GENERIC
102 int alloc_meta_offset;
103 int free_meta_offset;
108 void __kasan_unpoison_range(const void *addr, size_t size);
109 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
112 __kasan_unpoison_range(addr, size);
115 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
116 static __always_inline void kasan_poison_pages(struct page *page,
117 unsigned int order, bool init)
120 __kasan_poison_pages(page, order, init);
123 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
124 static __always_inline void kasan_unpoison_pages(struct page *page,
125 unsigned int order, bool init)
128 __kasan_unpoison_pages(page, order, init);
131 void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
132 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
135 __kasan_cache_create_kmalloc(cache);
138 void __kasan_poison_slab(struct slab *slab);
139 static __always_inline void kasan_poison_slab(struct slab *slab)
142 __kasan_poison_slab(slab);
145 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
146 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
150 __kasan_unpoison_object_data(cache, object);
153 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
154 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
158 __kasan_poison_object_data(cache, object);
161 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
163 static __always_inline void * __must_check kasan_init_slab_obj(
164 struct kmem_cache *cache, const void *object)
167 return __kasan_init_slab_obj(cache, object);
168 return (void *)object;
171 bool __kasan_slab_free(struct kmem_cache *s, void *object,
172 unsigned long ip, bool init);
173 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
174 void *object, bool init)
177 return __kasan_slab_free(s, object, _RET_IP_, init);
181 void __kasan_kfree_large(void *ptr, unsigned long ip);
182 static __always_inline void kasan_kfree_large(void *ptr)
185 __kasan_kfree_large(ptr, _RET_IP_);
188 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
189 static __always_inline void kasan_slab_free_mempool(void *ptr)
192 __kasan_slab_free_mempool(ptr, _RET_IP_);
195 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
196 void *object, gfp_t flags, bool init);
197 static __always_inline void * __must_check kasan_slab_alloc(
198 struct kmem_cache *s, void *object, gfp_t flags, bool init)
201 return __kasan_slab_alloc(s, object, flags, init);
205 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
206 size_t size, gfp_t flags);
207 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
208 const void *object, size_t size, gfp_t flags)
211 return __kasan_kmalloc(s, object, size, flags);
212 return (void *)object;
215 void * __must_check __kasan_kmalloc_large(const void *ptr,
216 size_t size, gfp_t flags);
217 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
218 size_t size, gfp_t flags)
221 return __kasan_kmalloc_large(ptr, size, flags);
225 void * __must_check __kasan_krealloc(const void *object,
226 size_t new_size, gfp_t flags);
227 static __always_inline void * __must_check kasan_krealloc(const void *object,
228 size_t new_size, gfp_t flags)
231 return __kasan_krealloc(object, new_size, flags);
232 return (void *)object;
236 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
237 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
239 bool __kasan_check_byte(const void *addr, unsigned long ip);
240 static __always_inline bool kasan_check_byte(const void *addr)
243 return __kasan_check_byte(addr, _RET_IP_);
247 #else /* CONFIG_KASAN */
249 static inline void kasan_unpoison_range(const void *address, size_t size) {}
250 static inline void kasan_poison_pages(struct page *page, unsigned int order,
252 static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
254 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
255 static inline void kasan_poison_slab(struct slab *slab) {}
256 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
258 static inline void kasan_poison_object_data(struct kmem_cache *cache,
260 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
263 return (void *)object;
265 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
269 static inline void kasan_kfree_large(void *ptr) {}
270 static inline void kasan_slab_free_mempool(void *ptr) {}
271 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
272 gfp_t flags, bool init)
276 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
277 size_t size, gfp_t flags)
279 return (void *)object;
281 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
285 static inline void *kasan_krealloc(const void *object, size_t new_size,
288 return (void *)object;
290 static inline bool kasan_check_byte(const void *address)
295 #endif /* CONFIG_KASAN */
297 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
298 void kasan_unpoison_task_stack(struct task_struct *task);
300 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
303 #ifdef CONFIG_KASAN_GENERIC
305 size_t kasan_metadata_size(struct kmem_cache *cache);
306 slab_flags_t kasan_never_merge(void);
307 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
308 slab_flags_t *flags);
310 void kasan_cache_shrink(struct kmem_cache *cache);
311 void kasan_cache_shutdown(struct kmem_cache *cache);
312 void kasan_record_aux_stack(void *ptr);
313 void kasan_record_aux_stack_noalloc(void *ptr);
315 #else /* CONFIG_KASAN_GENERIC */
317 /* Tag-based KASAN modes do not use per-object metadata. */
318 static inline size_t kasan_metadata_size(struct kmem_cache *cache)
322 /* And thus nothing prevents cache merging. */
323 static inline slab_flags_t kasan_never_merge(void)
327 /* And no cache-related metadata initialization is required. */
328 static inline void kasan_cache_create(struct kmem_cache *cache,
330 slab_flags_t *flags) {}
332 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
333 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
334 static inline void kasan_record_aux_stack(void *ptr) {}
335 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
337 #endif /* CONFIG_KASAN_GENERIC */
339 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
341 static inline void *kasan_reset_tag(const void *addr)
343 return (void *)arch_kasan_reset_tag(addr);
347 * kasan_report - print a report about a bad memory access detected by KASAN
348 * @addr: address of the bad access
349 * @size: size of the bad access
350 * @is_write: whether the bad access is a write or a read
351 * @ip: instruction pointer for the accessibility check or the bad access itself
353 bool kasan_report(unsigned long addr, size_t size,
354 bool is_write, unsigned long ip);
356 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
358 static inline void *kasan_reset_tag(const void *addr)
363 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
365 #ifdef CONFIG_KASAN_HW_TAGS
367 void kasan_report_async(void);
369 #endif /* CONFIG_KASAN_HW_TAGS */
371 #ifdef CONFIG_KASAN_SW_TAGS
372 void __init kasan_init_sw_tags(void);
374 static inline void kasan_init_sw_tags(void) { }
377 #ifdef CONFIG_KASAN_HW_TAGS
378 void kasan_init_hw_tags_cpu(void);
379 void __init kasan_init_hw_tags(void);
381 static inline void kasan_init_hw_tags_cpu(void) { }
382 static inline void kasan_init_hw_tags(void) { }
385 #ifdef CONFIG_KASAN_VMALLOC
387 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
389 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
390 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
391 void kasan_release_vmalloc(unsigned long start, unsigned long end,
392 unsigned long free_region_start,
393 unsigned long free_region_end);
395 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
397 static inline void kasan_populate_early_vm_area_shadow(void *start,
400 static inline int kasan_populate_vmalloc(unsigned long start,
405 static inline void kasan_release_vmalloc(unsigned long start,
407 unsigned long free_region_start,
408 unsigned long free_region_end) { }
410 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
412 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
413 kasan_vmalloc_flags_t flags);
414 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
416 kasan_vmalloc_flags_t flags)
419 return __kasan_unpoison_vmalloc(start, size, flags);
420 return (void *)start;
423 void __kasan_poison_vmalloc(const void *start, unsigned long size);
424 static __always_inline void kasan_poison_vmalloc(const void *start,
428 __kasan_poison_vmalloc(start, size);
431 #else /* CONFIG_KASAN_VMALLOC */
433 static inline void kasan_populate_early_vm_area_shadow(void *start,
434 unsigned long size) { }
435 static inline int kasan_populate_vmalloc(unsigned long start,
440 static inline void kasan_release_vmalloc(unsigned long start,
442 unsigned long free_region_start,
443 unsigned long free_region_end) { }
445 static inline void *kasan_unpoison_vmalloc(const void *start,
447 kasan_vmalloc_flags_t flags)
449 return (void *)start;
451 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
454 #endif /* CONFIG_KASAN_VMALLOC */
456 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
457 !defined(CONFIG_KASAN_VMALLOC)
460 * These functions allocate and free shadow memory for kernel modules.
461 * They are only required when KASAN_VMALLOC is not supported, as otherwise
462 * shadow memory is allocated by the generic vmalloc handlers.
464 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
465 void kasan_free_module_shadow(const struct vm_struct *vm);
467 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
469 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
470 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
472 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
474 #ifdef CONFIG_KASAN_INLINE
475 void kasan_non_canonical_hook(unsigned long addr);
476 #else /* CONFIG_KASAN_INLINE */
477 static inline void kasan_non_canonical_hook(unsigned long addr) { }
478 #endif /* CONFIG_KASAN_INLINE */
480 #endif /* LINUX_KASAN_H */