1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common generic and tag-based KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/export.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/kasan.h>
21 #include <linux/kernel.h>
22 #include <linux/kmemleak.h>
23 #include <linux/linkage.h>
24 #include <linux/memblock.h>
25 #include <linux/memory.h>
27 #include <linux/module.h>
28 #include <linux/printk.h>
29 #include <linux/sched.h>
30 #include <linux/sched/task_stack.h>
31 #include <linux/slab.h>
32 #include <linux/stacktrace.h>
33 #include <linux/string.h>
34 #include <linux/types.h>
35 #include <linux/vmalloc.h>
36 #include <linux/bug.h>
37 #include <linux/uaccess.h>
42 static inline int in_irqentry_text(unsigned long ptr)
44 return (ptr >= (unsigned long)&__irqentry_text_start &&
45 ptr < (unsigned long)&__irqentry_text_end) ||
46 (ptr >= (unsigned long)&__softirqentry_text_start &&
47 ptr < (unsigned long)&__softirqentry_text_end);
50 static inline unsigned int filter_irq_stacks(unsigned long *entries,
51 unsigned int nr_entries)
55 for (i = 0; i < nr_entries; i++) {
56 if (in_irqentry_text(entries[i])) {
57 /* Include the irqentry function into the stack. */
64 static inline depot_stack_handle_t save_stack(gfp_t flags)
66 unsigned long entries[KASAN_STACK_DEPTH];
67 unsigned int nr_entries;
69 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
70 nr_entries = filter_irq_stacks(entries, nr_entries);
71 return stack_depot_save(entries, nr_entries, flags);
74 static inline void set_track(struct kasan_track *track, gfp_t flags)
76 track->pid = current->pid;
77 track->stack = save_stack(flags);
80 void kasan_enable_current(void)
82 current->kasan_depth++;
85 void kasan_disable_current(void)
87 current->kasan_depth--;
90 bool __kasan_check_read(const volatile void *p, unsigned int size)
92 return check_memory_region((unsigned long)p, size, false, _RET_IP_);
94 EXPORT_SYMBOL(__kasan_check_read);
96 bool __kasan_check_write(const volatile void *p, unsigned int size)
98 return check_memory_region((unsigned long)p, size, true, _RET_IP_);
100 EXPORT_SYMBOL(__kasan_check_write);
103 void *memset(void *addr, int c, size_t len)
105 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
107 return __memset(addr, c, len);
111 void *memmove(void *dest, const void *src, size_t len)
113 check_memory_region((unsigned long)src, len, false, _RET_IP_);
114 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
116 return __memmove(dest, src, len);
120 void *memcpy(void *dest, const void *src, size_t len)
122 check_memory_region((unsigned long)src, len, false, _RET_IP_);
123 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
125 return __memcpy(dest, src, len);
129 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
130 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
132 void kasan_poison_shadow(const void *address, size_t size, u8 value)
134 void *shadow_start, *shadow_end;
137 * Perform shadow offset calculation based on untagged address, as
138 * some of the callers (e.g. kasan_poison_object_data) pass tagged
139 * addresses to this function.
141 address = reset_tag(address);
143 shadow_start = kasan_mem_to_shadow(address);
144 shadow_end = kasan_mem_to_shadow(address + size);
146 __memset(shadow_start, value, shadow_end - shadow_start);
149 void kasan_unpoison_shadow(const void *address, size_t size)
151 u8 tag = get_tag(address);
154 * Perform shadow offset calculation based on untagged address, as
155 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
156 * addresses to this function.
158 address = reset_tag(address);
160 kasan_poison_shadow(address, size, tag);
162 if (size & KASAN_SHADOW_MASK) {
163 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
165 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
168 *shadow = size & KASAN_SHADOW_MASK;
172 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
174 void *base = task_stack_page(task);
175 size_t size = sp - base;
177 kasan_unpoison_shadow(base, size);
180 /* Unpoison the entire stack for a task. */
181 void kasan_unpoison_task_stack(struct task_struct *task)
183 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
186 /* Unpoison the stack for the current task beyond a watermark sp value. */
187 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
190 * Calculate the task stack base address. Avoid using 'current'
191 * because this function is called by early resume code which hasn't
192 * yet set up the percpu register (%gs).
194 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
196 kasan_unpoison_shadow(base, watermark - base);
200 * Clear all poison for the region between the current SP and a provided
201 * watermark value, as is sometimes required prior to hand-crafted asm function
202 * returns in the middle of functions.
204 void kasan_unpoison_stack_above_sp_to(const void *watermark)
206 const void *sp = __builtin_frame_address(0);
207 size_t size = watermark - sp;
209 if (WARN_ON(sp > watermark))
211 kasan_unpoison_shadow(sp, size);
214 void kasan_alloc_pages(struct page *page, unsigned int order)
219 if (unlikely(PageHighMem(page)))
223 for (i = 0; i < (1 << order); i++)
224 page_kasan_tag_set(page + i, tag);
225 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
228 void kasan_free_pages(struct page *page, unsigned int order)
230 if (likely(!PageHighMem(page)))
231 kasan_poison_shadow(page_address(page),
237 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
238 * For larger allocations larger redzones are used.
240 static inline unsigned int optimal_redzone(unsigned int object_size)
242 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
246 object_size <= 64 - 16 ? 16 :
247 object_size <= 128 - 32 ? 32 :
248 object_size <= 512 - 64 ? 64 :
249 object_size <= 4096 - 128 ? 128 :
250 object_size <= (1 << 14) - 256 ? 256 :
251 object_size <= (1 << 15) - 512 ? 512 :
252 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
255 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
258 unsigned int orig_size = *size;
259 unsigned int redzone_size;
262 /* Add alloc meta. */
263 cache->kasan_info.alloc_meta_offset = *size;
264 *size += sizeof(struct kasan_alloc_meta);
267 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
268 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
269 cache->object_size < sizeof(struct kasan_free_meta))) {
270 cache->kasan_info.free_meta_offset = *size;
271 *size += sizeof(struct kasan_free_meta);
274 redzone_size = optimal_redzone(cache->object_size);
275 redzone_adjust = redzone_size - (*size - cache->object_size);
276 if (redzone_adjust > 0)
277 *size += redzone_adjust;
279 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
280 max(*size, cache->object_size + redzone_size));
283 * If the metadata doesn't fit, don't enable KASAN at all.
285 if (*size <= cache->kasan_info.alloc_meta_offset ||
286 *size <= cache->kasan_info.free_meta_offset) {
287 cache->kasan_info.alloc_meta_offset = 0;
288 cache->kasan_info.free_meta_offset = 0;
293 *flags |= SLAB_KASAN;
296 size_t kasan_metadata_size(struct kmem_cache *cache)
298 return (cache->kasan_info.alloc_meta_offset ?
299 sizeof(struct kasan_alloc_meta) : 0) +
300 (cache->kasan_info.free_meta_offset ?
301 sizeof(struct kasan_free_meta) : 0);
304 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
307 return (void *)object + cache->kasan_info.alloc_meta_offset;
310 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
313 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
314 return (void *)object + cache->kasan_info.free_meta_offset;
318 static void kasan_set_free_info(struct kmem_cache *cache,
319 void *object, u8 tag)
321 struct kasan_alloc_meta *alloc_meta;
324 alloc_meta = get_alloc_info(cache, object);
326 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
327 idx = alloc_meta->free_track_idx;
328 alloc_meta->free_pointer_tag[idx] = tag;
329 alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
332 set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
335 void kasan_poison_slab(struct page *page)
339 for (i = 0; i < (1 << compound_order(page)); i++)
340 page_kasan_tag_reset(page + i);
341 kasan_poison_shadow(page_address(page), page_size(page),
342 KASAN_KMALLOC_REDZONE);
345 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
347 kasan_unpoison_shadow(object, cache->object_size);
350 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
352 kasan_poison_shadow(object,
353 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
354 KASAN_KMALLOC_REDZONE);
358 * This function assigns a tag to an object considering the following:
359 * 1. A cache might have a constructor, which might save a pointer to a slab
360 * object somewhere (e.g. in the object itself). We preassign a tag for
361 * each object in caches with constructors during slab creation and reuse
362 * the same tag each time a particular object is allocated.
363 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
364 * accessed after being freed. We preassign tags for objects in these
366 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
367 * is stored as an array of indexes instead of a linked list. Assign tags
368 * based on objects indexes, so that objects that are next to each other
369 * get different tags.
371 static u8 assign_tag(struct kmem_cache *cache, const void *object,
372 bool init, bool keep_tag)
375 * 1. When an object is kmalloc()'ed, two hooks are called:
376 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
377 * tag only in the first one.
378 * 2. We reuse the same tag for krealloc'ed objects.
381 return get_tag(object);
384 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
385 * set, assign a tag when the object is being allocated (init == false).
387 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
388 return init ? KASAN_TAG_KERNEL : random_tag();
390 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
392 /* For SLAB assign tags based on the object index in the freelist. */
393 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
396 * For SLUB assign a random tag during slab creation, otherwise reuse
397 * the already assigned tag.
399 return init ? random_tag() : get_tag(object);
403 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
406 struct kasan_alloc_meta *alloc_info;
408 if (!(cache->flags & SLAB_KASAN))
409 return (void *)object;
411 alloc_info = get_alloc_info(cache, object);
412 __memset(alloc_info, 0, sizeof(*alloc_info));
414 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
415 object = set_tag(object,
416 assign_tag(cache, object, true, false));
418 return (void *)object;
421 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
423 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
424 return shadow_byte < 0 ||
425 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
427 /* else CONFIG_KASAN_SW_TAGS: */
428 if ((u8)shadow_byte == KASAN_TAG_INVALID)
430 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
436 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
437 unsigned long ip, bool quarantine)
442 unsigned long rounded_up_size;
444 tag = get_tag(object);
445 tagged_object = object;
446 object = reset_tag(object);
448 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
450 kasan_report_invalid_free(tagged_object, ip);
454 /* RCU slabs could be legally used after free within the RCU period */
455 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
458 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
459 if (shadow_invalid(tag, shadow_byte)) {
460 kasan_report_invalid_free(tagged_object, ip);
464 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
465 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
467 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
468 unlikely(!(cache->flags & SLAB_KASAN)))
471 kasan_set_free_info(cache, object, tag);
473 quarantine_put(get_free_info(cache, object), cache);
475 return IS_ENABLED(CONFIG_KASAN_GENERIC);
478 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
480 return __kasan_slab_free(cache, object, ip, true);
483 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
484 size_t size, gfp_t flags, bool keep_tag)
486 unsigned long redzone_start;
487 unsigned long redzone_end;
490 if (gfpflags_allow_blocking(flags))
493 if (unlikely(object == NULL))
496 redzone_start = round_up((unsigned long)(object + size),
497 KASAN_SHADOW_SCALE_SIZE);
498 redzone_end = round_up((unsigned long)object + cache->object_size,
499 KASAN_SHADOW_SCALE_SIZE);
501 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
502 tag = assign_tag(cache, object, false, keep_tag);
504 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
505 kasan_unpoison_shadow(set_tag(object, tag), size);
506 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
507 KASAN_KMALLOC_REDZONE);
509 if (cache->flags & SLAB_KASAN)
510 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
512 return set_tag(object, tag);
515 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
518 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
521 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
522 size_t size, gfp_t flags)
524 return __kasan_kmalloc(cache, object, size, flags, true);
526 EXPORT_SYMBOL(kasan_kmalloc);
528 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
532 unsigned long redzone_start;
533 unsigned long redzone_end;
535 if (gfpflags_allow_blocking(flags))
538 if (unlikely(ptr == NULL))
541 page = virt_to_page(ptr);
542 redzone_start = round_up((unsigned long)(ptr + size),
543 KASAN_SHADOW_SCALE_SIZE);
544 redzone_end = (unsigned long)ptr + page_size(page);
546 kasan_unpoison_shadow(ptr, size);
547 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
553 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
557 if (unlikely(object == ZERO_SIZE_PTR))
558 return (void *)object;
560 page = virt_to_head_page(object);
562 if (unlikely(!PageSlab(page)))
563 return kasan_kmalloc_large(object, size, flags);
565 return __kasan_kmalloc(page->slab_cache, object, size,
569 void kasan_poison_kfree(void *ptr, unsigned long ip)
573 page = virt_to_head_page(ptr);
575 if (unlikely(!PageSlab(page))) {
576 if (ptr != page_address(page)) {
577 kasan_report_invalid_free(ptr, ip);
580 kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
582 __kasan_slab_free(page->slab_cache, ptr, ip, false);
586 void kasan_kfree_large(void *ptr, unsigned long ip)
588 if (ptr != page_address(virt_to_head_page(ptr)))
589 kasan_report_invalid_free(ptr, ip);
590 /* The object will be poisoned by page_alloc. */
593 int kasan_module_alloc(void *addr, size_t size)
598 unsigned long shadow_start;
600 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
601 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
602 shadow_size = round_up(scaled_size, PAGE_SIZE);
604 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
607 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
608 shadow_start + shadow_size,
610 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
611 __builtin_return_address(0));
614 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
615 find_vm_area(addr)->flags |= VM_KASAN;
616 kmemleak_ignore(ret);
623 void kasan_free_shadow(const struct vm_struct *vm)
625 if (vm->flags & VM_KASAN)
626 vfree(kasan_mem_to_shadow(vm->addr));
629 extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
631 void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
633 unsigned long flags = user_access_save();
634 __kasan_report(addr, size, is_write, ip);
635 user_access_restore(flags);
638 #ifdef CONFIG_MEMORY_HOTPLUG
639 static bool shadow_mapped(unsigned long addr)
641 pgd_t *pgd = pgd_offset_k(addr);
649 p4d = p4d_offset(pgd, addr);
652 pud = pud_offset(p4d, addr);
657 * We can't use pud_large() or pud_huge(), the first one is
658 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
659 * pud_bad(), if pud is bad then it's bad because it's huge.
663 pmd = pmd_offset(pud, addr);
669 pte = pte_offset_kernel(pmd, addr);
670 return !pte_none(*pte);
673 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
674 unsigned long action, void *data)
676 struct memory_notify *mem_data = data;
677 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
678 unsigned long shadow_end, shadow_size;
680 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
681 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
682 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
683 shadow_size = nr_shadow_pages << PAGE_SHIFT;
684 shadow_end = shadow_start + shadow_size;
686 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
687 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
691 case MEM_GOING_ONLINE: {
695 * If shadow is mapped already than it must have been mapped
696 * during the boot. This could happen if we onlining previously
699 if (shadow_mapped(shadow_start))
702 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
703 shadow_end, GFP_KERNEL,
704 PAGE_KERNEL, VM_NO_GUARD,
705 pfn_to_nid(mem_data->start_pfn),
706 __builtin_return_address(0));
710 kmemleak_ignore(ret);
713 case MEM_CANCEL_ONLINE:
715 struct vm_struct *vm;
718 * shadow_start was either mapped during boot by kasan_init()
719 * or during memory online by __vmalloc_node_range().
720 * In the latter case we can use vfree() to free shadow.
721 * Non-NULL result of the find_vm_area() will tell us if
722 * that was the second case.
724 * Currently it's not possible to free shadow mapped
725 * during boot by kasan_init(). It's because the code
726 * to do that hasn't been written yet. So we'll just
729 vm = find_vm_area((void *)shadow_start);
731 vfree((void *)shadow_start);
738 static int __init kasan_memhotplug_init(void)
740 hotplug_memory_notifier(kasan_mem_notifier, 0);
745 core_initcall(kasan_memhotplug_init);