1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
33 depot_stack_handle_t kasan_save_stack(gfp_t flags)
35 unsigned long entries[KASAN_STACK_DEPTH];
36 unsigned int nr_entries;
38 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 nr_entries = filter_irq_stacks(entries, nr_entries);
40 return stack_depot_save(entries, nr_entries, flags);
43 void kasan_set_track(struct kasan_track *track, gfp_t flags)
45 track->pid = current->pid;
46 track->stack = kasan_save_stack(flags);
49 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
50 void kasan_enable_current(void)
52 current->kasan_depth++;
55 void kasan_disable_current(void)
57 current->kasan_depth--;
59 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
61 void __kasan_unpoison_range(const void *address, size_t size)
63 unpoison_range(address, size);
66 #if CONFIG_KASAN_STACK
67 /* Unpoison the entire stack for a task. */
68 void kasan_unpoison_task_stack(struct task_struct *task)
70 void *base = task_stack_page(task);
72 unpoison_range(base, THREAD_SIZE);
75 /* Unpoison the stack for the current task beyond a watermark sp value. */
76 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
79 * Calculate the task stack base address. Avoid using 'current'
80 * because this function is called by early resume code which hasn't
81 * yet set up the percpu register (%gs).
83 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
85 unpoison_range(base, watermark - base);
87 #endif /* CONFIG_KASAN_STACK */
89 void __kasan_alloc_pages(struct page *page, unsigned int order)
94 if (unlikely(PageHighMem(page)))
98 for (i = 0; i < (1 << order); i++)
99 page_kasan_tag_set(page + i, tag);
100 unpoison_range(page_address(page), PAGE_SIZE << order);
103 void __kasan_free_pages(struct page *page, unsigned int order)
105 if (likely(!PageHighMem(page)))
106 poison_range(page_address(page),
112 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
113 * For larger allocations larger redzones are used.
115 static inline unsigned int optimal_redzone(unsigned int object_size)
117 if (!IS_ENABLED(CONFIG_KASAN_GENERIC))
121 object_size <= 64 - 16 ? 16 :
122 object_size <= 128 - 32 ? 32 :
123 object_size <= 512 - 64 ? 64 :
124 object_size <= 4096 - 128 ? 128 :
125 object_size <= (1 << 14) - 256 ? 256 :
126 object_size <= (1 << 15) - 512 ? 512 :
127 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
130 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
133 unsigned int orig_size = *size;
134 unsigned int redzone_size;
137 if (!kasan_stack_collection_enabled()) {
138 *flags |= SLAB_KASAN;
142 /* Add alloc meta. */
143 cache->kasan_info.alloc_meta_offset = *size;
144 *size += sizeof(struct kasan_alloc_meta);
147 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
148 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
149 cache->object_size < sizeof(struct kasan_free_meta))) {
150 cache->kasan_info.free_meta_offset = *size;
151 *size += sizeof(struct kasan_free_meta);
154 redzone_size = optimal_redzone(cache->object_size);
155 redzone_adjust = redzone_size - (*size - cache->object_size);
156 if (redzone_adjust > 0)
157 *size += redzone_adjust;
159 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
160 max(*size, cache->object_size + redzone_size));
163 * If the metadata doesn't fit, don't enable KASAN at all.
165 if (*size <= cache->kasan_info.alloc_meta_offset ||
166 *size <= cache->kasan_info.free_meta_offset) {
167 cache->kasan_info.alloc_meta_offset = 0;
168 cache->kasan_info.free_meta_offset = 0;
173 *flags |= SLAB_KASAN;
176 size_t __kasan_metadata_size(struct kmem_cache *cache)
178 if (!kasan_stack_collection_enabled())
180 return (cache->kasan_info.alloc_meta_offset ?
181 sizeof(struct kasan_alloc_meta) : 0) +
182 (cache->kasan_info.free_meta_offset ?
183 sizeof(struct kasan_free_meta) : 0);
186 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
189 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
192 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
195 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
196 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
199 void __kasan_poison_slab(struct page *page)
203 for (i = 0; i < compound_nr(page); i++)
204 page_kasan_tag_reset(page + i);
205 poison_range(page_address(page), page_size(page),
206 KASAN_KMALLOC_REDZONE);
209 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
211 unpoison_range(object, cache->object_size);
214 void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
217 round_up(cache->object_size, KASAN_GRANULE_SIZE),
218 KASAN_KMALLOC_REDZONE);
222 * This function assigns a tag to an object considering the following:
223 * 1. A cache might have a constructor, which might save a pointer to a slab
224 * object somewhere (e.g. in the object itself). We preassign a tag for
225 * each object in caches with constructors during slab creation and reuse
226 * the same tag each time a particular object is allocated.
227 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
228 * accessed after being freed. We preassign tags for objects in these
230 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
231 * is stored as an array of indexes instead of a linked list. Assign tags
232 * based on objects indexes, so that objects that are next to each other
233 * get different tags.
235 static u8 assign_tag(struct kmem_cache *cache, const void *object,
236 bool init, bool keep_tag)
239 * 1. When an object is kmalloc()'ed, two hooks are called:
240 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
241 * tag only in the first one.
242 * 2. We reuse the same tag for krealloc'ed objects.
245 return get_tag(object);
248 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
249 * set, assign a tag when the object is being allocated (init == false).
251 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
252 return init ? KASAN_TAG_KERNEL : random_tag();
254 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
256 /* For SLAB assign tags based on the object index in the freelist. */
257 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
260 * For SLUB assign a random tag during slab creation, otherwise reuse
261 * the already assigned tag.
263 return init ? random_tag() : get_tag(object);
267 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
270 struct kasan_alloc_meta *alloc_meta;
272 if (kasan_stack_collection_enabled()) {
273 if (!(cache->flags & SLAB_KASAN))
274 return (void *)object;
276 alloc_meta = kasan_get_alloc_meta(cache, object);
277 __memset(alloc_meta, 0, sizeof(*alloc_meta));
280 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
281 object = set_tag(object, assign_tag(cache, object, true, false));
283 return (void *)object;
286 static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
287 unsigned long ip, bool quarantine)
291 unsigned long rounded_up_size;
293 tag = get_tag(object);
294 tagged_object = object;
295 object = kasan_reset_tag(object);
297 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
299 kasan_report_invalid_free(tagged_object, ip);
303 /* RCU slabs could be legally used after free within the RCU period */
304 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
307 if (check_invalid_free(tagged_object)) {
308 kasan_report_invalid_free(tagged_object, ip);
312 rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
313 poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
315 if (!kasan_stack_collection_enabled())
318 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
319 unlikely(!(cache->flags & SLAB_KASAN)))
322 kasan_set_free_info(cache, object, tag);
324 quarantine_put(cache, object);
326 return IS_ENABLED(CONFIG_KASAN_GENERIC);
329 bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
331 return ____kasan_slab_free(cache, object, ip, true);
334 void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
338 page = virt_to_head_page(ptr);
341 * Even though this function is only called for kmem_cache_alloc and
342 * kmalloc backed mempool allocations, those allocations can still be
343 * !PageSlab() when the size provided to kmalloc is larger than
344 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
346 if (unlikely(!PageSlab(page))) {
347 if (ptr != page_address(page)) {
348 kasan_report_invalid_free(ptr, ip);
351 poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
353 ____kasan_slab_free(page->slab_cache, ptr, ip, false);
357 static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
359 kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
362 static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
363 size_t size, gfp_t flags, bool keep_tag)
365 unsigned long redzone_start;
366 unsigned long redzone_end;
369 if (gfpflags_allow_blocking(flags))
372 if (unlikely(object == NULL))
375 redzone_start = round_up((unsigned long)(object + size),
377 redzone_end = round_up((unsigned long)object + cache->object_size,
380 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
381 tag = assign_tag(cache, object, false, keep_tag);
383 /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
384 unpoison_range(set_tag(object, tag), size);
385 poison_range((void *)redzone_start, redzone_end - redzone_start,
386 KASAN_KMALLOC_REDZONE);
388 if (kasan_stack_collection_enabled() && (cache->flags & SLAB_KASAN))
389 set_alloc_info(cache, (void *)object, flags);
391 return set_tag(object, tag);
394 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
395 void *object, gfp_t flags)
397 return ____kasan_kmalloc(cache, object, cache->object_size, flags, false);
400 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
401 size_t size, gfp_t flags)
403 return ____kasan_kmalloc(cache, object, size, flags, true);
405 EXPORT_SYMBOL(__kasan_kmalloc);
407 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
411 unsigned long redzone_start;
412 unsigned long redzone_end;
414 if (gfpflags_allow_blocking(flags))
417 if (unlikely(ptr == NULL))
420 page = virt_to_page(ptr);
421 redzone_start = round_up((unsigned long)(ptr + size),
423 redzone_end = (unsigned long)ptr + page_size(page);
425 unpoison_range(ptr, size);
426 poison_range((void *)redzone_start, redzone_end - redzone_start,
432 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
436 if (unlikely(object == ZERO_SIZE_PTR))
437 return (void *)object;
439 page = virt_to_head_page(object);
441 if (unlikely(!PageSlab(page)))
442 return __kasan_kmalloc_large(object, size, flags);
444 return ____kasan_kmalloc(page->slab_cache, object, size,
448 void __kasan_kfree_large(void *ptr, unsigned long ip)
450 if (ptr != page_address(virt_to_head_page(ptr)))
451 kasan_report_invalid_free(ptr, ip);
452 /* The object will be poisoned by page_alloc. */