1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
33 depot_stack_handle_t kasan_save_stack(gfp_t flags)
35 unsigned long entries[KASAN_STACK_DEPTH];
36 unsigned int nr_entries;
38 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 nr_entries = filter_irq_stacks(entries, nr_entries);
40 return stack_depot_save(entries, nr_entries, flags);
43 void kasan_set_track(struct kasan_track *track, gfp_t flags)
45 track->pid = current->pid;
46 track->stack = kasan_save_stack(flags);
49 void kasan_enable_current(void)
51 current->kasan_depth++;
54 void kasan_disable_current(void)
56 current->kasan_depth--;
59 void kasan_unpoison_range(const void *address, size_t size)
61 unpoison_range(address, size);
64 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
66 void *base = task_stack_page(task);
67 size_t size = sp - base;
69 unpoison_range(base, size);
72 /* Unpoison the entire stack for a task. */
73 void kasan_unpoison_task_stack(struct task_struct *task)
75 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
78 /* Unpoison the stack for the current task beyond a watermark sp value. */
79 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
82 * Calculate the task stack base address. Avoid using 'current'
83 * because this function is called by early resume code which hasn't
84 * yet set up the percpu register (%gs).
86 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
88 unpoison_range(base, watermark - base);
91 void kasan_alloc_pages(struct page *page, unsigned int order)
96 if (unlikely(PageHighMem(page)))
100 for (i = 0; i < (1 << order); i++)
101 page_kasan_tag_set(page + i, tag);
102 unpoison_range(page_address(page), PAGE_SIZE << order);
105 void kasan_free_pages(struct page *page, unsigned int order)
107 if (likely(!PageHighMem(page)))
108 poison_range(page_address(page),
114 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
115 * For larger allocations larger redzones are used.
117 static inline unsigned int optimal_redzone(unsigned int object_size)
119 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
123 object_size <= 64 - 16 ? 16 :
124 object_size <= 128 - 32 ? 32 :
125 object_size <= 512 - 64 ? 64 :
126 object_size <= 4096 - 128 ? 128 :
127 object_size <= (1 << 14) - 256 ? 256 :
128 object_size <= (1 << 15) - 512 ? 512 :
129 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
132 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
135 unsigned int orig_size = *size;
136 unsigned int redzone_size;
139 /* Add alloc meta. */
140 cache->kasan_info.alloc_meta_offset = *size;
141 *size += sizeof(struct kasan_alloc_meta);
144 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
145 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
146 cache->object_size < sizeof(struct kasan_free_meta))) {
147 cache->kasan_info.free_meta_offset = *size;
148 *size += sizeof(struct kasan_free_meta);
151 redzone_size = optimal_redzone(cache->object_size);
152 redzone_adjust = redzone_size - (*size - cache->object_size);
153 if (redzone_adjust > 0)
154 *size += redzone_adjust;
156 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
157 max(*size, cache->object_size + redzone_size));
160 * If the metadata doesn't fit, don't enable KASAN at all.
162 if (*size <= cache->kasan_info.alloc_meta_offset ||
163 *size <= cache->kasan_info.free_meta_offset) {
164 cache->kasan_info.alloc_meta_offset = 0;
165 cache->kasan_info.free_meta_offset = 0;
170 *flags |= SLAB_KASAN;
173 size_t kasan_metadata_size(struct kmem_cache *cache)
175 return (cache->kasan_info.alloc_meta_offset ?
176 sizeof(struct kasan_alloc_meta) : 0) +
177 (cache->kasan_info.free_meta_offset ?
178 sizeof(struct kasan_free_meta) : 0);
181 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
184 return (void *)object + cache->kasan_info.alloc_meta_offset;
187 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
190 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
191 return (void *)object + cache->kasan_info.free_meta_offset;
194 void kasan_poison_slab(struct page *page)
198 for (i = 0; i < compound_nr(page); i++)
199 page_kasan_tag_reset(page + i);
200 poison_range(page_address(page), page_size(page),
201 KASAN_KMALLOC_REDZONE);
204 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
206 unpoison_range(object, cache->object_size);
209 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
212 round_up(cache->object_size, KASAN_GRANULE_SIZE),
213 KASAN_KMALLOC_REDZONE);
217 * This function assigns a tag to an object considering the following:
218 * 1. A cache might have a constructor, which might save a pointer to a slab
219 * object somewhere (e.g. in the object itself). We preassign a tag for
220 * each object in caches with constructors during slab creation and reuse
221 * the same tag each time a particular object is allocated.
222 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
223 * accessed after being freed. We preassign tags for objects in these
225 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
226 * is stored as an array of indexes instead of a linked list. Assign tags
227 * based on objects indexes, so that objects that are next to each other
228 * get different tags.
230 static u8 assign_tag(struct kmem_cache *cache, const void *object,
231 bool init, bool keep_tag)
234 * 1. When an object is kmalloc()'ed, two hooks are called:
235 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
236 * tag only in the first one.
237 * 2. We reuse the same tag for krealloc'ed objects.
240 return get_tag(object);
243 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
244 * set, assign a tag when the object is being allocated (init == false).
246 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
247 return init ? KASAN_TAG_KERNEL : random_tag();
249 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
251 /* For SLAB assign tags based on the object index in the freelist. */
252 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
255 * For SLUB assign a random tag during slab creation, otherwise reuse
256 * the already assigned tag.
258 return init ? random_tag() : get_tag(object);
262 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
265 struct kasan_alloc_meta *alloc_info;
267 if (!(cache->flags & SLAB_KASAN))
268 return (void *)object;
270 alloc_info = get_alloc_info(cache, object);
271 __memset(alloc_info, 0, sizeof(*alloc_info));
273 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
274 object = set_tag(object,
275 assign_tag(cache, object, true, false));
277 return (void *)object;
280 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
282 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
283 return shadow_byte < 0 ||
284 shadow_byte >= KASAN_GRANULE_SIZE;
286 /* else CONFIG_KASAN_SW_TAGS: */
287 if ((u8)shadow_byte == KASAN_TAG_INVALID)
289 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
295 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
296 unsigned long ip, bool quarantine)
301 unsigned long rounded_up_size;
303 tag = get_tag(object);
304 tagged_object = object;
305 object = reset_tag(object);
307 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
309 kasan_report_invalid_free(tagged_object, ip);
313 /* RCU slabs could be legally used after free within the RCU period */
314 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
317 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
318 if (shadow_invalid(tag, shadow_byte)) {
319 kasan_report_invalid_free(tagged_object, ip);
323 rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
324 poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
326 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
327 unlikely(!(cache->flags & SLAB_KASAN)))
330 kasan_set_free_info(cache, object, tag);
332 quarantine_put(get_free_info(cache, object), cache);
334 return IS_ENABLED(CONFIG_KASAN_GENERIC);
337 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
339 return __kasan_slab_free(cache, object, ip, true);
342 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
343 size_t size, gfp_t flags, bool keep_tag)
345 unsigned long redzone_start;
346 unsigned long redzone_end;
349 if (gfpflags_allow_blocking(flags))
352 if (unlikely(object == NULL))
355 redzone_start = round_up((unsigned long)(object + size),
357 redzone_end = round_up((unsigned long)object + cache->object_size,
360 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
361 tag = assign_tag(cache, object, false, keep_tag);
363 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
364 unpoison_range(set_tag(object, tag), size);
365 poison_range((void *)redzone_start, redzone_end - redzone_start,
366 KASAN_KMALLOC_REDZONE);
368 if (cache->flags & SLAB_KASAN)
369 kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
371 return set_tag(object, tag);
374 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
377 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
380 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
381 size_t size, gfp_t flags)
383 return __kasan_kmalloc(cache, object, size, flags, true);
385 EXPORT_SYMBOL(kasan_kmalloc);
387 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
391 unsigned long redzone_start;
392 unsigned long redzone_end;
394 if (gfpflags_allow_blocking(flags))
397 if (unlikely(ptr == NULL))
400 page = virt_to_page(ptr);
401 redzone_start = round_up((unsigned long)(ptr + size),
403 redzone_end = (unsigned long)ptr + page_size(page);
405 unpoison_range(ptr, size);
406 poison_range((void *)redzone_start, redzone_end - redzone_start,
412 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
416 if (unlikely(object == ZERO_SIZE_PTR))
417 return (void *)object;
419 page = virt_to_head_page(object);
421 if (unlikely(!PageSlab(page)))
422 return kasan_kmalloc_large(object, size, flags);
424 return __kasan_kmalloc(page->slab_cache, object, size,
428 void kasan_poison_kfree(void *ptr, unsigned long ip)
432 page = virt_to_head_page(ptr);
434 if (unlikely(!PageSlab(page))) {
435 if (ptr != page_address(page)) {
436 kasan_report_invalid_free(ptr, ip);
439 poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
441 __kasan_slab_free(page->slab_cache, ptr, ip, false);
445 void kasan_kfree_large(void *ptr, unsigned long ip)
447 if (ptr != page_address(virt_to_head_page(ptr)))
448 kasan_report_invalid_free(ptr, ip);
449 /* The object will be poisoned by page_alloc. */