1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/uaccess.h>
21 #include <linux/vmalloc.h>
25 #include <kunit/test.h>
27 #include "../mm/kasan/kasan.h"
29 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
32 * Some tests use these global variables to store return values from function
33 * calls that could otherwise be eliminated by the compiler as dead code.
35 void *kasan_ptr_result;
38 static struct kunit_resource resource;
39 static struct kunit_kasan_expectation fail_data;
40 static bool multishot;
43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
44 * first detected bug and panic the kernel if panic_on_warn is enabled. For
45 * hardware tag-based KASAN also allow tag checking to be reenabled for each
46 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
48 static int kasan_test_init(struct kunit *test)
50 if (!kasan_enabled()) {
51 kunit_err(test, "can't run KASAN tests with KASAN disabled");
55 multishot = kasan_save_enable_multi_shot();
56 fail_data.report_found = false;
57 kunit_add_named_resource(test, NULL, NULL, &resource,
58 "kasan_data", &fail_data);
62 static void kasan_test_exit(struct kunit *test)
64 kasan_restore_multi_shot(multishot);
65 KUNIT_EXPECT_FALSE(test, fail_data.report_found);
69 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
70 * KASAN report; causes a test failure otherwise. This relies on a KUnit
71 * resource named "kasan_data". Do not use this name for KUnit resources
72 * outside of KASAN tests.
74 * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
75 * checking is auto-disabled. When this happens, this test handler reenables
76 * tag checking. As tag checking can be only disabled or enabled per CPU,
77 * this handler disables migration (preemption).
79 * Since the compiler doesn't see that the expression can change the fail_data
80 * fields, it can reorder or optimize away the accesses to those fields.
81 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
82 * expression to prevent that.
84 * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
85 * false. This allows detecting KASAN reports that happen outside of the checks
86 * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
87 * and in kasan_test_exit.
89 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
90 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
91 !kasan_async_mode_enabled()) \
93 KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
97 if (!READ_ONCE(fail_data.report_found)) { \
98 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
99 "expected in \"" #expression \
100 "\", but none occurred"); \
102 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
103 if (READ_ONCE(fail_data.report_found)) \
104 kasan_enable_tagging_sync(); \
107 WRITE_ONCE(fail_data.report_found, false); \
110 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
111 if (!IS_ENABLED(config)) \
112 kunit_skip((test), "Test requires " #config "=y"); \
115 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
116 if (IS_ENABLED(config)) \
117 kunit_skip((test), "Test requires " #config "=n"); \
120 static void kmalloc_oob_right(struct kunit *test)
123 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
125 ptr = kmalloc(size, GFP_KERNEL);
126 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
128 OPTIMIZER_HIDE_VAR(ptr);
130 * An unaligned access past the requested kmalloc size.
131 * Only generic KASAN can precisely detect these.
133 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
134 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
137 * An aligned access into the first out-of-bounds granule that falls
138 * within the aligned kmalloc object.
140 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
142 /* Out-of-bounds access past the aligned kmalloc object. */
143 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
144 ptr[size + KASAN_GRANULE_SIZE + 5]);
149 static void kmalloc_oob_left(struct kunit *test)
154 ptr = kmalloc(size, GFP_KERNEL);
155 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
157 OPTIMIZER_HIDE_VAR(ptr);
158 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
162 static void kmalloc_node_oob_right(struct kunit *test)
167 ptr = kmalloc_node(size, GFP_KERNEL, 0);
168 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
170 OPTIMIZER_HIDE_VAR(ptr);
171 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
176 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
177 * fit into a slab cache and therefore is allocated via the page allocator
178 * fallback. Since this kind of fallback is only implemented for SLUB, these
179 * tests are limited to that allocator.
181 static void kmalloc_pagealloc_oob_right(struct kunit *test)
184 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
186 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
188 ptr = kmalloc(size, GFP_KERNEL);
189 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
191 OPTIMIZER_HIDE_VAR(ptr);
192 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
197 static void kmalloc_pagealloc_uaf(struct kunit *test)
200 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
202 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
204 ptr = kmalloc(size, GFP_KERNEL);
205 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
208 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
211 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
214 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
216 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
218 ptr = kmalloc(size, GFP_KERNEL);
219 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
221 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
224 static void pagealloc_oob_right(struct kunit *test)
229 size_t size = (1UL << (PAGE_SHIFT + order));
232 * With generic KASAN page allocations have no redzones, thus
233 * out-of-bounds detection is not guaranteed.
234 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
236 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
238 pages = alloc_pages(GFP_KERNEL, order);
239 ptr = page_address(pages);
240 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
242 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
243 free_pages((unsigned long)ptr, order);
246 static void pagealloc_uaf(struct kunit *test)
252 pages = alloc_pages(GFP_KERNEL, order);
253 ptr = page_address(pages);
254 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
255 free_pages((unsigned long)ptr, order);
257 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
260 static void kmalloc_large_oob_right(struct kunit *test)
263 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
266 * Allocate a chunk that is large enough, but still fits into a slab
267 * and does not trigger the page allocator fallback in SLUB.
269 ptr = kmalloc(size, GFP_KERNEL);
270 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
272 OPTIMIZER_HIDE_VAR(ptr);
273 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
277 static void krealloc_more_oob_helper(struct kunit *test,
278 size_t size1, size_t size2)
283 KUNIT_ASSERT_LT(test, size1, size2);
284 middle = size1 + (size2 - size1) / 2;
286 ptr1 = kmalloc(size1, GFP_KERNEL);
287 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
289 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
290 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
292 /* All offsets up to size2 must be accessible. */
293 ptr2[size1 - 1] = 'x';
296 ptr2[size2 - 1] = 'x';
298 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
299 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
300 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
302 /* For all modes first aligned offset after size2 must be inaccessible. */
303 KUNIT_EXPECT_KASAN_FAIL(test,
304 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
309 static void krealloc_less_oob_helper(struct kunit *test,
310 size_t size1, size_t size2)
315 KUNIT_ASSERT_LT(test, size2, size1);
316 middle = size2 + (size1 - size2) / 2;
318 ptr1 = kmalloc(size1, GFP_KERNEL);
319 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
321 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
322 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
324 /* Must be accessible for all modes. */
325 ptr2[size2 - 1] = 'x';
327 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
328 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
329 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
331 /* For all modes first aligned offset after size2 must be inaccessible. */
332 KUNIT_EXPECT_KASAN_FAIL(test,
333 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
336 * For all modes all size2, middle, and size1 should land in separate
337 * granules and thus the latter two offsets should be inaccessible.
339 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
340 round_down(middle, KASAN_GRANULE_SIZE));
341 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
342 round_down(size1, KASAN_GRANULE_SIZE));
343 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
344 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
345 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
350 static void krealloc_more_oob(struct kunit *test)
352 krealloc_more_oob_helper(test, 201, 235);
355 static void krealloc_less_oob(struct kunit *test)
357 krealloc_less_oob_helper(test, 235, 201);
360 static void krealloc_pagealloc_more_oob(struct kunit *test)
362 /* page_alloc fallback in only implemented for SLUB. */
363 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
365 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
366 KMALLOC_MAX_CACHE_SIZE + 235);
369 static void krealloc_pagealloc_less_oob(struct kunit *test)
371 /* page_alloc fallback in only implemented for SLUB. */
372 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
374 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
375 KMALLOC_MAX_CACHE_SIZE + 201);
379 * Check that krealloc() detects a use-after-free, returns NULL,
380 * and doesn't unpoison the freed object.
382 static void krealloc_uaf(struct kunit *test)
388 ptr1 = kmalloc(size1, GFP_KERNEL);
389 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
392 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
393 KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
394 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
397 static void kmalloc_oob_16(struct kunit *test)
403 /* This test is specifically crafted for the generic mode. */
404 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
406 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
407 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
409 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
410 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
412 OPTIMIZER_HIDE_VAR(ptr1);
413 OPTIMIZER_HIDE_VAR(ptr2);
414 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
419 static void kmalloc_uaf_16(struct kunit *test)
425 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
426 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
428 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
429 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
432 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
437 * Note: in the memset tests below, the written range touches both valid and
438 * invalid memory. This makes sure that the instrumentation does not only check
439 * the starting address but the whole range.
442 static void kmalloc_oob_memset_2(struct kunit *test)
445 size_t size = 128 - KASAN_GRANULE_SIZE;
447 ptr = kmalloc(size, GFP_KERNEL);
448 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
450 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
454 static void kmalloc_oob_memset_4(struct kunit *test)
457 size_t size = 128 - KASAN_GRANULE_SIZE;
459 ptr = kmalloc(size, GFP_KERNEL);
460 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
462 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
466 static void kmalloc_oob_memset_8(struct kunit *test)
469 size_t size = 128 - KASAN_GRANULE_SIZE;
471 ptr = kmalloc(size, GFP_KERNEL);
472 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
474 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
478 static void kmalloc_oob_memset_16(struct kunit *test)
481 size_t size = 128 - KASAN_GRANULE_SIZE;
483 ptr = kmalloc(size, GFP_KERNEL);
484 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
486 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
490 static void kmalloc_oob_in_memset(struct kunit *test)
493 size_t size = 128 - KASAN_GRANULE_SIZE;
495 ptr = kmalloc(size, GFP_KERNEL);
496 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
498 KUNIT_EXPECT_KASAN_FAIL(test,
499 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
503 static void kmalloc_memmove_invalid_size(struct kunit *test)
507 volatile size_t invalid_size = -2;
510 * Hardware tag-based mode doesn't check memmove for negative size.
511 * As a result, this test introduces a side-effect memory corruption,
512 * which can result in a crash.
514 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
516 ptr = kmalloc(size, GFP_KERNEL);
517 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
519 memset((char *)ptr, 0, 64);
520 KUNIT_EXPECT_KASAN_FAIL(test,
521 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
525 static void kmalloc_uaf(struct kunit *test)
530 ptr = kmalloc(size, GFP_KERNEL);
531 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
534 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
537 static void kmalloc_uaf_memset(struct kunit *test)
543 * Only generic KASAN uses quarantine, which is required to avoid a
544 * kernel memory corruption this test causes.
546 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
548 ptr = kmalloc(size, GFP_KERNEL);
549 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
552 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
555 static void kmalloc_uaf2(struct kunit *test)
562 ptr1 = kmalloc(size, GFP_KERNEL);
563 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
567 ptr2 = kmalloc(size, GFP_KERNEL);
568 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
571 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
572 * Allow up to 16 attempts at generating different tags.
574 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
579 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
580 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
585 static void kfree_via_page(struct kunit *test)
590 unsigned long offset;
592 ptr = kmalloc(size, GFP_KERNEL);
593 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
595 page = virt_to_page(ptr);
596 offset = offset_in_page(ptr);
597 kfree(page_address(page) + offset);
600 static void kfree_via_phys(struct kunit *test)
606 ptr = kmalloc(size, GFP_KERNEL);
607 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
609 phys = virt_to_phys(ptr);
610 kfree(phys_to_virt(phys));
613 static void kmem_cache_oob(struct kunit *test)
617 struct kmem_cache *cache;
619 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
620 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
622 p = kmem_cache_alloc(cache, GFP_KERNEL);
624 kunit_err(test, "Allocation failed: %s\n", __func__);
625 kmem_cache_destroy(cache);
629 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
631 kmem_cache_free(cache, p);
632 kmem_cache_destroy(cache);
635 static void kmem_cache_accounted(struct kunit *test)
640 struct kmem_cache *cache;
642 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
643 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
646 * Several allocations with a delay to allow for lazy per memcg kmem
649 for (i = 0; i < 5; i++) {
650 p = kmem_cache_alloc(cache, GFP_KERNEL);
654 kmem_cache_free(cache, p);
659 kmem_cache_destroy(cache);
662 static void kmem_cache_bulk(struct kunit *test)
664 struct kmem_cache *cache;
670 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
671 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
673 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
675 kunit_err(test, "Allocation failed: %s\n", __func__);
676 kmem_cache_destroy(cache);
680 for (i = 0; i < ARRAY_SIZE(p); i++)
681 p[i][0] = p[i][size - 1] = 42;
683 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
684 kmem_cache_destroy(cache);
687 static char global_array[10];
689 static void kasan_global_oob(struct kunit *test)
692 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
693 * from failing here and panicking the kernel, access the array via a
694 * volatile pointer, which will prevent the compiler from being able to
695 * determine the array bounds.
697 * This access uses a volatile pointer to char (char *volatile) rather
698 * than the more conventional pointer to volatile char (volatile char *)
699 * because we want to prevent the compiler from making inferences about
700 * the pointer itself (i.e. its array bounds), not the data that it
703 char *volatile array = global_array;
704 char *p = &array[ARRAY_SIZE(global_array) + 3];
706 /* Only generic mode instruments globals. */
707 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
709 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
712 /* Check that ksize() makes the whole object accessible. */
713 static void ksize_unpoisons_memory(struct kunit *test)
716 size_t size = 123, real_size;
718 ptr = kmalloc(size, GFP_KERNEL);
719 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
720 real_size = ksize(ptr);
722 OPTIMIZER_HIDE_VAR(ptr);
724 /* This access shouldn't trigger a KASAN report. */
728 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
734 * Check that a use-after-free is detected by ksize() and via normal accesses
737 static void ksize_uaf(struct kunit *test)
740 int size = 128 - KASAN_GRANULE_SIZE;
742 ptr = kmalloc(size, GFP_KERNEL);
743 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
746 OPTIMIZER_HIDE_VAR(ptr);
747 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
748 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
749 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
752 static void kasan_stack_oob(struct kunit *test)
754 char stack_array[10];
755 /* See comment in kasan_global_oob. */
756 char *volatile array = stack_array;
757 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
759 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
761 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
764 static void kasan_alloca_oob_left(struct kunit *test)
767 char alloca_array[i];
768 /* See comment in kasan_global_oob. */
769 char *volatile array = alloca_array;
772 /* Only generic mode instruments dynamic allocas. */
773 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
774 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
776 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
779 static void kasan_alloca_oob_right(struct kunit *test)
782 char alloca_array[i];
783 /* See comment in kasan_global_oob. */
784 char *volatile array = alloca_array;
787 /* Only generic mode instruments dynamic allocas. */
788 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
789 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
791 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
794 static void kmem_cache_double_free(struct kunit *test)
798 struct kmem_cache *cache;
800 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
801 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
803 p = kmem_cache_alloc(cache, GFP_KERNEL);
805 kunit_err(test, "Allocation failed: %s\n", __func__);
806 kmem_cache_destroy(cache);
810 kmem_cache_free(cache, p);
811 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
812 kmem_cache_destroy(cache);
815 static void kmem_cache_invalid_free(struct kunit *test)
819 struct kmem_cache *cache;
821 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
823 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
825 p = kmem_cache_alloc(cache, GFP_KERNEL);
827 kunit_err(test, "Allocation failed: %s\n", __func__);
828 kmem_cache_destroy(cache);
832 /* Trigger invalid free, the object doesn't get freed. */
833 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
836 * Properly free the object to prevent the "Objects remaining in
837 * test_cache on __kmem_cache_shutdown" BUG failure.
839 kmem_cache_free(cache, p);
841 kmem_cache_destroy(cache);
844 static void kasan_memchr(struct kunit *test)
850 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
851 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
853 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
856 size = round_up(size, OOB_TAG_OFF);
858 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
859 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
861 KUNIT_EXPECT_KASAN_FAIL(test,
862 kasan_ptr_result = memchr(ptr, '1', size + 1));
867 static void kasan_memcmp(struct kunit *test)
874 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
875 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
877 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
880 size = round_up(size, OOB_TAG_OFF);
882 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
883 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
884 memset(arr, 0, sizeof(arr));
886 KUNIT_EXPECT_KASAN_FAIL(test,
887 kasan_int_result = memcmp(ptr, arr, size+1));
891 static void kasan_strings(struct kunit *test)
897 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
898 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
900 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
902 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
903 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
908 * Try to cause only 1 invalid access (less spam in dmesg).
909 * For that we need ptr to point to zeroed byte.
910 * Skip metadata that could be stored in freed object so ptr
911 * will likely point to zeroed byte.
914 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
916 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
918 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
920 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
922 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
924 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
927 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
929 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
930 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
931 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
932 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
933 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
934 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
935 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
936 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
939 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
941 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
942 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
943 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
944 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
945 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
946 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
947 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
948 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
950 #if defined(clear_bit_unlock_is_negative_byte)
951 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
952 clear_bit_unlock_is_negative_byte(nr, addr));
956 static void kasan_bitops_generic(struct kunit *test)
960 /* This test is specifically crafted for the generic mode. */
961 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
964 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
965 * this way we do not actually corrupt other memory.
967 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
968 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
971 * Below calls try to access bit within allocated memory; however, the
972 * below accesses are still out-of-bounds, since bitops are defined to
973 * operate on the whole long the bit is in.
975 kasan_bitops_modify(test, BITS_PER_LONG, bits);
978 * Below calls try to access bit beyond allocated memory.
980 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
985 static void kasan_bitops_tags(struct kunit *test)
989 /* This test is specifically crafted for tag-based modes. */
990 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
992 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
993 bits = kzalloc(48, GFP_KERNEL);
994 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
996 /* Do the accesses past the 48 allocated bytes, but within the redone. */
997 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
998 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1003 static void kmalloc_double_kzfree(struct kunit *test)
1008 ptr = kmalloc(size, GFP_KERNEL);
1009 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1011 kfree_sensitive(ptr);
1012 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
1015 static void vmalloc_oob(struct kunit *test)
1019 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1022 * We have to be careful not to hit the guard page.
1023 * The MMU will catch that and crash us.
1025 area = vmalloc(3000);
1026 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
1028 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
1033 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1034 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1037 static void match_all_not_assigned(struct kunit *test)
1043 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1045 for (i = 0; i < 256; i++) {
1046 size = (get_random_int() % 1024) + 1;
1047 ptr = kmalloc(size, GFP_KERNEL);
1048 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1049 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1050 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1054 for (i = 0; i < 256; i++) {
1055 order = (get_random_int() % 4) + 1;
1056 pages = alloc_pages(GFP_KERNEL, order);
1057 ptr = page_address(pages);
1058 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1059 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1060 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1061 free_pages((unsigned long)ptr, order);
1065 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1066 static void match_all_ptr_tag(struct kunit *test)
1071 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1073 ptr = kmalloc(128, GFP_KERNEL);
1074 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1076 /* Backup the assigned tag. */
1078 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1080 /* Reset the tag to 0xff.*/
1081 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1083 /* This access shouldn't trigger a KASAN report. */
1086 /* Recover the pointer tag and free. */
1087 ptr = set_tag(ptr, tag);
1091 /* Check that there are no match-all memory tags for tag-based modes. */
1092 static void match_all_mem_tag(struct kunit *test)
1097 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1099 ptr = kmalloc(128, GFP_KERNEL);
1100 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1101 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1103 /* For each possible tag value not matching the pointer tag. */
1104 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1105 if (tag == get_tag(ptr))
1108 /* Mark the first memory granule with the chosen memory tag. */
1109 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1111 /* This access must cause a KASAN report. */
1112 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1115 /* Recover the memory tag and free. */
1116 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1120 static struct kunit_case kasan_kunit_test_cases[] = {
1121 KUNIT_CASE(kmalloc_oob_right),
1122 KUNIT_CASE(kmalloc_oob_left),
1123 KUNIT_CASE(kmalloc_node_oob_right),
1124 KUNIT_CASE(kmalloc_pagealloc_oob_right),
1125 KUNIT_CASE(kmalloc_pagealloc_uaf),
1126 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1127 KUNIT_CASE(pagealloc_oob_right),
1128 KUNIT_CASE(pagealloc_uaf),
1129 KUNIT_CASE(kmalloc_large_oob_right),
1130 KUNIT_CASE(krealloc_more_oob),
1131 KUNIT_CASE(krealloc_less_oob),
1132 KUNIT_CASE(krealloc_pagealloc_more_oob),
1133 KUNIT_CASE(krealloc_pagealloc_less_oob),
1134 KUNIT_CASE(krealloc_uaf),
1135 KUNIT_CASE(kmalloc_oob_16),
1136 KUNIT_CASE(kmalloc_uaf_16),
1137 KUNIT_CASE(kmalloc_oob_in_memset),
1138 KUNIT_CASE(kmalloc_oob_memset_2),
1139 KUNIT_CASE(kmalloc_oob_memset_4),
1140 KUNIT_CASE(kmalloc_oob_memset_8),
1141 KUNIT_CASE(kmalloc_oob_memset_16),
1142 KUNIT_CASE(kmalloc_memmove_invalid_size),
1143 KUNIT_CASE(kmalloc_uaf),
1144 KUNIT_CASE(kmalloc_uaf_memset),
1145 KUNIT_CASE(kmalloc_uaf2),
1146 KUNIT_CASE(kfree_via_page),
1147 KUNIT_CASE(kfree_via_phys),
1148 KUNIT_CASE(kmem_cache_oob),
1149 KUNIT_CASE(kmem_cache_accounted),
1150 KUNIT_CASE(kmem_cache_bulk),
1151 KUNIT_CASE(kasan_global_oob),
1152 KUNIT_CASE(kasan_stack_oob),
1153 KUNIT_CASE(kasan_alloca_oob_left),
1154 KUNIT_CASE(kasan_alloca_oob_right),
1155 KUNIT_CASE(ksize_unpoisons_memory),
1156 KUNIT_CASE(ksize_uaf),
1157 KUNIT_CASE(kmem_cache_double_free),
1158 KUNIT_CASE(kmem_cache_invalid_free),
1159 KUNIT_CASE(kasan_memchr),
1160 KUNIT_CASE(kasan_memcmp),
1161 KUNIT_CASE(kasan_strings),
1162 KUNIT_CASE(kasan_bitops_generic),
1163 KUNIT_CASE(kasan_bitops_tags),
1164 KUNIT_CASE(kmalloc_double_kzfree),
1165 KUNIT_CASE(vmalloc_oob),
1166 KUNIT_CASE(match_all_not_assigned),
1167 KUNIT_CASE(match_all_ptr_tag),
1168 KUNIT_CASE(match_all_mem_tag),
1172 static struct kunit_suite kasan_kunit_test_suite = {
1174 .init = kasan_test_init,
1175 .test_cases = kasan_kunit_test_cases,
1176 .exit = kasan_test_exit,
1179 kunit_test_suite(kasan_kunit_test_suite);
1181 MODULE_LICENSE("GPL");