1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
8 #define pr_fmt(fmt) "kasan_test: " fmt
10 #include <kunit/test.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
17 #include <linux/mman.h>
18 #include <linux/module.h>
19 #include <linux/printk.h>
20 #include <linux/random.h>
21 #include <linux/set_memory.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include <linux/tracepoint.h>
25 #include <linux/uaccess.h>
26 #include <linux/vmalloc.h>
27 #include <trace/events/printk.h>
33 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
35 static bool multishot;
37 /* Fields set based on lines observed in the console. */
44 * Some tests use these global variables to store return values from function
45 * calls that could otherwise be eliminated by the compiler as dead code.
47 void *kasan_ptr_result;
50 /* Probe for console output: obtains test_status lines of interest. */
51 static void probe_console(void *ignore, const char *buf, size_t len)
53 if (strnstr(buf, "BUG: KASAN: ", len))
54 WRITE_ONCE(test_status.report_found, true);
55 else if (strnstr(buf, "Asynchronous fault: ", len))
56 WRITE_ONCE(test_status.async_fault, true);
59 static void register_tracepoints(struct tracepoint *tp, void *ignore)
61 check_trace_callback_type_console(probe_console);
62 if (!strcmp(tp->name, "console"))
63 WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
66 static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
68 if (!strcmp(tp->name, "console"))
69 tracepoint_probe_unregister(tp, probe_console, NULL);
72 static int kasan_suite_init(struct kunit_suite *suite)
74 if (!kasan_enabled()) {
75 pr_err("Can't run KASAN tests with KASAN disabled");
79 /* Stop failing KUnit tests on KASAN reports. */
80 kasan_kunit_test_suite_start();
83 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
84 * report the first detected bug and panic the kernel if panic_on_warn
87 multishot = kasan_save_enable_multi_shot();
90 * Because we want to be able to build the test as a module, we need to
91 * iterate through all known tracepoints, since the static registration
94 for_each_kernel_tracepoint(register_tracepoints, NULL);
98 static void kasan_suite_exit(struct kunit_suite *suite)
100 kasan_kunit_test_suite_end();
101 kasan_restore_multi_shot(multishot);
102 for_each_kernel_tracepoint(unregister_tracepoints, NULL);
103 tracepoint_synchronize_unregister();
106 static void kasan_test_exit(struct kunit *test)
108 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
112 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
113 * KASAN report; causes a test failure otherwise. This relies on a KUnit
114 * resource named "kasan_status". Do not use this name for KUnit resources
115 * outside of KASAN tests.
117 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
118 * checking is auto-disabled. When this happens, this test handler reenables
119 * tag checking. As tag checking can be only disabled or enabled per CPU,
120 * this handler disables migration (preemption).
122 * Since the compiler doesn't see that the expression can change the test_status
123 * fields, it can reorder or optimize away the accesses to those fields.
124 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
125 * expression to prevent that.
127 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
128 * as false. This allows detecting KASAN reports that happen outside of the
129 * checks by asserting !test_status.report_found at the start of
130 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
132 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
133 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
134 kasan_sync_fault_possible()) \
136 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
140 if (kasan_async_fault_possible()) \
141 kasan_force_async_fault(); \
142 if (!READ_ONCE(test_status.report_found)) { \
143 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
144 "expected in \"" #expression \
145 "\", but none occurred"); \
147 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
148 kasan_sync_fault_possible()) { \
149 if (READ_ONCE(test_status.report_found) && \
150 !READ_ONCE(test_status.async_fault)) \
151 kasan_enable_tagging(); \
154 WRITE_ONCE(test_status.report_found, false); \
155 WRITE_ONCE(test_status.async_fault, false); \
158 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
159 if (!IS_ENABLED(config)) \
160 kunit_skip((test), "Test requires " #config "=y"); \
163 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
164 if (IS_ENABLED(config)) \
165 kunit_skip((test), "Test requires " #config "=n"); \
168 static void kmalloc_oob_right(struct kunit *test)
171 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
173 ptr = kmalloc(size, GFP_KERNEL);
174 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
176 OPTIMIZER_HIDE_VAR(ptr);
178 * An unaligned access past the requested kmalloc size.
179 * Only generic KASAN can precisely detect these.
181 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
182 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
185 * An aligned access into the first out-of-bounds granule that falls
186 * within the aligned kmalloc object.
188 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
190 /* Out-of-bounds access past the aligned kmalloc object. */
191 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
192 ptr[size + KASAN_GRANULE_SIZE + 5]);
197 static void kmalloc_oob_left(struct kunit *test)
202 ptr = kmalloc(size, GFP_KERNEL);
203 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
205 OPTIMIZER_HIDE_VAR(ptr);
206 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
210 static void kmalloc_node_oob_right(struct kunit *test)
215 ptr = kmalloc_node(size, GFP_KERNEL, 0);
216 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
218 OPTIMIZER_HIDE_VAR(ptr);
219 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
224 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
225 * fit into a slab cache and therefore is allocated via the page allocator
226 * fallback. Since this kind of fallback is only implemented for SLUB, these
227 * tests are limited to that allocator.
229 static void kmalloc_pagealloc_oob_right(struct kunit *test)
232 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
234 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
236 ptr = kmalloc(size, GFP_KERNEL);
237 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
239 OPTIMIZER_HIDE_VAR(ptr);
240 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
245 static void kmalloc_pagealloc_uaf(struct kunit *test)
248 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
250 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
252 ptr = kmalloc(size, GFP_KERNEL);
253 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
256 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
259 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
262 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
264 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
266 ptr = kmalloc(size, GFP_KERNEL);
267 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
269 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
272 static void pagealloc_oob_right(struct kunit *test)
277 size_t size = (1UL << (PAGE_SHIFT + order));
280 * With generic KASAN page allocations have no redzones, thus
281 * out-of-bounds detection is not guaranteed.
282 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
284 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
286 pages = alloc_pages(GFP_KERNEL, order);
287 ptr = page_address(pages);
288 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
290 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
291 free_pages((unsigned long)ptr, order);
294 static void pagealloc_uaf(struct kunit *test)
300 pages = alloc_pages(GFP_KERNEL, order);
301 ptr = page_address(pages);
302 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
303 free_pages((unsigned long)ptr, order);
305 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
308 static void kmalloc_large_oob_right(struct kunit *test)
311 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
314 * Allocate a chunk that is large enough, but still fits into a slab
315 * and does not trigger the page allocator fallback in SLUB.
317 ptr = kmalloc(size, GFP_KERNEL);
318 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
320 OPTIMIZER_HIDE_VAR(ptr);
321 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
325 static void krealloc_more_oob_helper(struct kunit *test,
326 size_t size1, size_t size2)
331 KUNIT_ASSERT_LT(test, size1, size2);
332 middle = size1 + (size2 - size1) / 2;
334 ptr1 = kmalloc(size1, GFP_KERNEL);
335 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
337 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
338 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
340 /* Suppress -Warray-bounds warnings. */
341 OPTIMIZER_HIDE_VAR(ptr2);
343 /* All offsets up to size2 must be accessible. */
344 ptr2[size1 - 1] = 'x';
347 ptr2[size2 - 1] = 'x';
349 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
350 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
351 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
353 /* For all modes first aligned offset after size2 must be inaccessible. */
354 KUNIT_EXPECT_KASAN_FAIL(test,
355 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
360 static void krealloc_less_oob_helper(struct kunit *test,
361 size_t size1, size_t size2)
366 KUNIT_ASSERT_LT(test, size2, size1);
367 middle = size2 + (size1 - size2) / 2;
369 ptr1 = kmalloc(size1, GFP_KERNEL);
370 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
372 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
373 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
375 /* Suppress -Warray-bounds warnings. */
376 OPTIMIZER_HIDE_VAR(ptr2);
378 /* Must be accessible for all modes. */
379 ptr2[size2 - 1] = 'x';
381 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
382 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
383 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
385 /* For all modes first aligned offset after size2 must be inaccessible. */
386 KUNIT_EXPECT_KASAN_FAIL(test,
387 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
390 * For all modes all size2, middle, and size1 should land in separate
391 * granules and thus the latter two offsets should be inaccessible.
393 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
394 round_down(middle, KASAN_GRANULE_SIZE));
395 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
396 round_down(size1, KASAN_GRANULE_SIZE));
397 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
398 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
399 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
404 static void krealloc_more_oob(struct kunit *test)
406 krealloc_more_oob_helper(test, 201, 235);
409 static void krealloc_less_oob(struct kunit *test)
411 krealloc_less_oob_helper(test, 235, 201);
414 static void krealloc_pagealloc_more_oob(struct kunit *test)
416 /* page_alloc fallback in only implemented for SLUB. */
417 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
419 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
420 KMALLOC_MAX_CACHE_SIZE + 235);
423 static void krealloc_pagealloc_less_oob(struct kunit *test)
425 /* page_alloc fallback in only implemented for SLUB. */
426 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
428 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
429 KMALLOC_MAX_CACHE_SIZE + 201);
433 * Check that krealloc() detects a use-after-free, returns NULL,
434 * and doesn't unpoison the freed object.
436 static void krealloc_uaf(struct kunit *test)
442 ptr1 = kmalloc(size1, GFP_KERNEL);
443 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
446 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
447 KUNIT_ASSERT_NULL(test, ptr2);
448 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
451 static void kmalloc_oob_16(struct kunit *test)
457 /* This test is specifically crafted for the generic mode. */
458 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
460 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
461 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
463 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
464 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
466 OPTIMIZER_HIDE_VAR(ptr1);
467 OPTIMIZER_HIDE_VAR(ptr2);
468 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
473 static void kmalloc_uaf_16(struct kunit *test)
479 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
480 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
482 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
483 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
486 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
491 * Note: in the memset tests below, the written range touches both valid and
492 * invalid memory. This makes sure that the instrumentation does not only check
493 * the starting address but the whole range.
496 static void kmalloc_oob_memset_2(struct kunit *test)
499 size_t size = 128 - KASAN_GRANULE_SIZE;
501 ptr = kmalloc(size, GFP_KERNEL);
502 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
504 OPTIMIZER_HIDE_VAR(size);
505 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
509 static void kmalloc_oob_memset_4(struct kunit *test)
512 size_t size = 128 - KASAN_GRANULE_SIZE;
514 ptr = kmalloc(size, GFP_KERNEL);
515 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
517 OPTIMIZER_HIDE_VAR(size);
518 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
522 static void kmalloc_oob_memset_8(struct kunit *test)
525 size_t size = 128 - KASAN_GRANULE_SIZE;
527 ptr = kmalloc(size, GFP_KERNEL);
528 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
530 OPTIMIZER_HIDE_VAR(size);
531 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
535 static void kmalloc_oob_memset_16(struct kunit *test)
538 size_t size = 128 - KASAN_GRANULE_SIZE;
540 ptr = kmalloc(size, GFP_KERNEL);
541 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
543 OPTIMIZER_HIDE_VAR(size);
544 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
548 static void kmalloc_oob_in_memset(struct kunit *test)
551 size_t size = 128 - KASAN_GRANULE_SIZE;
553 ptr = kmalloc(size, GFP_KERNEL);
554 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
556 OPTIMIZER_HIDE_VAR(ptr);
557 OPTIMIZER_HIDE_VAR(size);
558 KUNIT_EXPECT_KASAN_FAIL(test,
559 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
563 static void kmalloc_memmove_negative_size(struct kunit *test)
567 size_t invalid_size = -2;
570 * Hardware tag-based mode doesn't check memmove for negative size.
571 * As a result, this test introduces a side-effect memory corruption,
572 * which can result in a crash.
574 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
576 ptr = kmalloc(size, GFP_KERNEL);
577 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
579 memset((char *)ptr, 0, 64);
580 OPTIMIZER_HIDE_VAR(ptr);
581 OPTIMIZER_HIDE_VAR(invalid_size);
582 KUNIT_EXPECT_KASAN_FAIL(test,
583 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
587 static void kmalloc_memmove_invalid_size(struct kunit *test)
591 size_t invalid_size = size;
593 ptr = kmalloc(size, GFP_KERNEL);
594 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
596 memset((char *)ptr, 0, 64);
597 OPTIMIZER_HIDE_VAR(ptr);
598 OPTIMIZER_HIDE_VAR(invalid_size);
599 KUNIT_EXPECT_KASAN_FAIL(test,
600 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
604 static void kmalloc_uaf(struct kunit *test)
609 ptr = kmalloc(size, GFP_KERNEL);
610 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
613 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
616 static void kmalloc_uaf_memset(struct kunit *test)
622 * Only generic KASAN uses quarantine, which is required to avoid a
623 * kernel memory corruption this test causes.
625 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
627 ptr = kmalloc(size, GFP_KERNEL);
628 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
631 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
634 static void kmalloc_uaf2(struct kunit *test)
641 ptr1 = kmalloc(size, GFP_KERNEL);
642 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
646 ptr2 = kmalloc(size, GFP_KERNEL);
647 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
650 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
651 * Allow up to 16 attempts at generating different tags.
653 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
658 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
659 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
665 * Check that KASAN detects use-after-free when another object was allocated in
666 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
668 static void kmalloc_uaf3(struct kunit *test)
673 /* This test is specifically crafted for tag-based modes. */
674 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
676 ptr1 = kmalloc(size, GFP_KERNEL);
677 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
680 ptr2 = kmalloc(size, GFP_KERNEL);
681 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
684 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
687 static void kfree_via_page(struct kunit *test)
692 unsigned long offset;
694 ptr = kmalloc(size, GFP_KERNEL);
695 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
697 page = virt_to_page(ptr);
698 offset = offset_in_page(ptr);
699 kfree(page_address(page) + offset);
702 static void kfree_via_phys(struct kunit *test)
708 ptr = kmalloc(size, GFP_KERNEL);
709 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
711 phys = virt_to_phys(ptr);
712 kfree(phys_to_virt(phys));
715 static void kmem_cache_oob(struct kunit *test)
719 struct kmem_cache *cache;
721 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
722 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
724 p = kmem_cache_alloc(cache, GFP_KERNEL);
726 kunit_err(test, "Allocation failed: %s\n", __func__);
727 kmem_cache_destroy(cache);
731 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
733 kmem_cache_free(cache, p);
734 kmem_cache_destroy(cache);
737 static void kmem_cache_accounted(struct kunit *test)
742 struct kmem_cache *cache;
744 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
745 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
748 * Several allocations with a delay to allow for lazy per memcg kmem
751 for (i = 0; i < 5; i++) {
752 p = kmem_cache_alloc(cache, GFP_KERNEL);
756 kmem_cache_free(cache, p);
761 kmem_cache_destroy(cache);
764 static void kmem_cache_bulk(struct kunit *test)
766 struct kmem_cache *cache;
772 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
773 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
775 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
777 kunit_err(test, "Allocation failed: %s\n", __func__);
778 kmem_cache_destroy(cache);
782 for (i = 0; i < ARRAY_SIZE(p); i++)
783 p[i][0] = p[i][size - 1] = 42;
785 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
786 kmem_cache_destroy(cache);
789 static char global_array[10];
791 static void kasan_global_oob_right(struct kunit *test)
794 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
795 * from failing here and panicking the kernel, access the array via a
796 * volatile pointer, which will prevent the compiler from being able to
797 * determine the array bounds.
799 * This access uses a volatile pointer to char (char *volatile) rather
800 * than the more conventional pointer to volatile char (volatile char *)
801 * because we want to prevent the compiler from making inferences about
802 * the pointer itself (i.e. its array bounds), not the data that it
805 char *volatile array = global_array;
806 char *p = &array[ARRAY_SIZE(global_array) + 3];
808 /* Only generic mode instruments globals. */
809 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
811 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
814 static void kasan_global_oob_left(struct kunit *test)
816 char *volatile array = global_array;
820 * GCC is known to fail this test, skip it.
821 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
823 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
824 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
825 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
828 /* Check that ksize() does NOT unpoison whole object. */
829 static void ksize_unpoisons_memory(struct kunit *test)
832 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
835 ptr = kmalloc(size, GFP_KERNEL);
836 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
838 real_size = ksize(ptr);
839 KUNIT_EXPECT_GT(test, real_size, size);
841 OPTIMIZER_HIDE_VAR(ptr);
843 /* These accesses shouldn't trigger a KASAN report. */
847 /* These must trigger a KASAN report. */
848 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
849 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
850 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
851 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
857 * Check that a use-after-free is detected by ksize() and via normal accesses
860 static void ksize_uaf(struct kunit *test)
863 int size = 128 - KASAN_GRANULE_SIZE;
865 ptr = kmalloc(size, GFP_KERNEL);
866 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
869 OPTIMIZER_HIDE_VAR(ptr);
870 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
871 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
872 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
875 static void kasan_stack_oob(struct kunit *test)
877 char stack_array[10];
878 /* See comment in kasan_global_oob_right. */
879 char *volatile array = stack_array;
880 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
882 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
884 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
887 static void kasan_alloca_oob_left(struct kunit *test)
890 char alloca_array[i];
891 /* See comment in kasan_global_oob_right. */
892 char *volatile array = alloca_array;
895 /* Only generic mode instruments dynamic allocas. */
896 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
897 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
899 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
902 static void kasan_alloca_oob_right(struct kunit *test)
905 char alloca_array[i];
906 /* See comment in kasan_global_oob_right. */
907 char *volatile array = alloca_array;
910 /* Only generic mode instruments dynamic allocas. */
911 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
912 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
914 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
917 static void kmem_cache_double_free(struct kunit *test)
921 struct kmem_cache *cache;
923 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
924 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
926 p = kmem_cache_alloc(cache, GFP_KERNEL);
928 kunit_err(test, "Allocation failed: %s\n", __func__);
929 kmem_cache_destroy(cache);
933 kmem_cache_free(cache, p);
934 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
935 kmem_cache_destroy(cache);
938 static void kmem_cache_invalid_free(struct kunit *test)
942 struct kmem_cache *cache;
944 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
946 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
948 p = kmem_cache_alloc(cache, GFP_KERNEL);
950 kunit_err(test, "Allocation failed: %s\n", __func__);
951 kmem_cache_destroy(cache);
955 /* Trigger invalid free, the object doesn't get freed. */
956 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
959 * Properly free the object to prevent the "Objects remaining in
960 * test_cache on __kmem_cache_shutdown" BUG failure.
962 kmem_cache_free(cache, p);
964 kmem_cache_destroy(cache);
967 static void empty_cache_ctor(void *object) { }
969 static void kmem_cache_double_destroy(struct kunit *test)
971 struct kmem_cache *cache;
973 /* Provide a constructor to prevent cache merging. */
974 cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
975 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
976 kmem_cache_destroy(cache);
977 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
980 static void kasan_memchr(struct kunit *test)
986 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
987 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
989 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
992 size = round_up(size, OOB_TAG_OFF);
994 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
995 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
997 OPTIMIZER_HIDE_VAR(ptr);
998 OPTIMIZER_HIDE_VAR(size);
999 KUNIT_EXPECT_KASAN_FAIL(test,
1000 kasan_ptr_result = memchr(ptr, '1', size + 1));
1005 static void kasan_memcmp(struct kunit *test)
1012 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1013 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1015 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1018 size = round_up(size, OOB_TAG_OFF);
1020 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1021 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1022 memset(arr, 0, sizeof(arr));
1024 OPTIMIZER_HIDE_VAR(ptr);
1025 OPTIMIZER_HIDE_VAR(size);
1026 KUNIT_EXPECT_KASAN_FAIL(test,
1027 kasan_int_result = memcmp(ptr, arr, size+1));
1031 static void kasan_strings(struct kunit *test)
1037 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1038 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1040 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1042 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1043 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1048 * Try to cause only 1 invalid access (less spam in dmesg).
1049 * For that we need ptr to point to zeroed byte.
1050 * Skip metadata that could be stored in freed object so ptr
1051 * will likely point to zeroed byte.
1054 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1056 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1058 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1060 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1062 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1064 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1067 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1069 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1070 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1071 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1072 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1073 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1074 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1075 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1076 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1079 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1081 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1082 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1083 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1084 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1085 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1086 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1087 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1088 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1090 #if defined(clear_bit_unlock_is_negative_byte)
1091 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1092 clear_bit_unlock_is_negative_byte(nr, addr));
1096 static void kasan_bitops_generic(struct kunit *test)
1100 /* This test is specifically crafted for the generic mode. */
1101 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1104 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1105 * this way we do not actually corrupt other memory.
1107 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1108 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1111 * Below calls try to access bit within allocated memory; however, the
1112 * below accesses are still out-of-bounds, since bitops are defined to
1113 * operate on the whole long the bit is in.
1115 kasan_bitops_modify(test, BITS_PER_LONG, bits);
1118 * Below calls try to access bit beyond allocated memory.
1120 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1125 static void kasan_bitops_tags(struct kunit *test)
1129 /* This test is specifically crafted for tag-based modes. */
1130 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1132 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1133 bits = kzalloc(48, GFP_KERNEL);
1134 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1136 /* Do the accesses past the 48 allocated bytes, but within the redone. */
1137 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1138 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1143 static void kmalloc_double_kzfree(struct kunit *test)
1148 ptr = kmalloc(size, GFP_KERNEL);
1149 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1151 kfree_sensitive(ptr);
1152 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
1156 * The two tests below check that Generic KASAN prints auxiliary stack traces
1157 * for RCU callbacks and workqueues. The reports need to be inspected manually.
1159 * These tests are still enabled for other KASAN modes to make sure that all
1160 * modes report bad accesses in tested scenarios.
1163 static struct kasan_rcu_info {
1165 struct rcu_head rcu;
1168 static void rcu_uaf_reclaim(struct rcu_head *rp)
1170 struct kasan_rcu_info *fp =
1171 container_of(rp, struct kasan_rcu_info, rcu);
1174 ((volatile struct kasan_rcu_info *)fp)->i;
1177 static void rcu_uaf(struct kunit *test)
1179 struct kasan_rcu_info *ptr;
1181 ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
1182 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1184 global_rcu_ptr = rcu_dereference_protected(
1185 (struct kasan_rcu_info __rcu *)ptr, NULL);
1187 KUNIT_EXPECT_KASAN_FAIL(test,
1188 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
1192 static void workqueue_uaf_work(struct work_struct *work)
1197 static void workqueue_uaf(struct kunit *test)
1199 struct workqueue_struct *workqueue;
1200 struct work_struct *work;
1202 workqueue = create_workqueue("kasan_workqueue_test");
1203 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
1205 work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
1206 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
1208 INIT_WORK(work, workqueue_uaf_work);
1209 queue_work(workqueue, work);
1210 destroy_workqueue(workqueue);
1212 KUNIT_EXPECT_KASAN_FAIL(test,
1213 ((volatile struct work_struct *)work)->data);
1216 static void vmalloc_helpers_tags(struct kunit *test)
1220 /* This test is intended for tag-based modes. */
1221 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1223 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1225 ptr = vmalloc(PAGE_SIZE);
1226 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1228 /* Check that the returned pointer is tagged. */
1229 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1230 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1232 /* Make sure exported vmalloc helpers handle tagged pointers. */
1233 KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1234 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1236 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1240 /* Make sure vmalloc'ed memory permissions can be changed. */
1241 rv = set_memory_ro((unsigned long)ptr, 1);
1242 KUNIT_ASSERT_GE(test, rv, 0);
1243 rv = set_memory_rw((unsigned long)ptr, 1);
1244 KUNIT_ASSERT_GE(test, rv, 0);
1251 static void vmalloc_oob(struct kunit *test)
1253 char *v_ptr, *p_ptr;
1255 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1257 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1259 v_ptr = vmalloc(size);
1260 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1262 OPTIMIZER_HIDE_VAR(v_ptr);
1265 * We have to be careful not to hit the guard page in vmalloc tests.
1266 * The MMU will catch that and crash us.
1269 /* Make sure in-bounds accesses are valid. */
1271 v_ptr[size - 1] = 0;
1274 * An unaligned access past the requested vmalloc size.
1275 * Only generic KASAN can precisely detect these.
1277 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1278 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1280 /* An aligned access into the first out-of-bounds granule. */
1281 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1283 /* Check that in-bounds accesses to the physical page are valid. */
1284 page = vmalloc_to_page(v_ptr);
1285 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1286 p_ptr = page_address(page);
1287 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1293 * We can't check for use-after-unmap bugs in this nor in the following
1294 * vmalloc tests, as the page might be fully unmapped and accessing it
1295 * will crash the kernel.
1299 static void vmap_tags(struct kunit *test)
1301 char *p_ptr, *v_ptr;
1302 struct page *p_page, *v_page;
1305 * This test is specifically crafted for the software tag-based mode,
1306 * the only tag-based mode that poisons vmap mappings.
1308 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1310 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1312 p_page = alloc_pages(GFP_KERNEL, 1);
1313 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1314 p_ptr = page_address(p_page);
1315 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1317 v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1318 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1321 * We can't check for out-of-bounds bugs in this nor in the following
1322 * vmalloc tests, as allocations have page granularity and accessing
1323 * the guard page will crash the kernel.
1326 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1327 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1329 /* Make sure that in-bounds accesses through both pointers work. */
1333 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1334 v_page = vmalloc_to_page(v_ptr);
1335 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1336 KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1339 free_pages((unsigned long)p_ptr, 1);
1342 static void vm_map_ram_tags(struct kunit *test)
1344 char *p_ptr, *v_ptr;
1348 * This test is specifically crafted for the software tag-based mode,
1349 * the only tag-based mode that poisons vm_map_ram mappings.
1351 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1353 page = alloc_pages(GFP_KERNEL, 1);
1354 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1355 p_ptr = page_address(page);
1356 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1358 v_ptr = vm_map_ram(&page, 1, -1);
1359 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1361 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1362 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1364 /* Make sure that in-bounds accesses through both pointers work. */
1368 vm_unmap_ram(v_ptr, 1);
1369 free_pages((unsigned long)p_ptr, 1);
1372 static void vmalloc_percpu(struct kunit *test)
1378 * This test is specifically crafted for the software tag-based mode,
1379 * the only tag-based mode that poisons percpu mappings.
1381 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1383 ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1385 for_each_possible_cpu(cpu) {
1386 char *c_ptr = per_cpu_ptr(ptr, cpu);
1388 KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1389 KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1391 /* Make sure that in-bounds accesses don't crash the kernel. */
1399 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1400 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1403 static void match_all_not_assigned(struct kunit *test)
1409 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1411 for (i = 0; i < 256; i++) {
1412 size = get_random_u32_inclusive(1, 1024);
1413 ptr = kmalloc(size, GFP_KERNEL);
1414 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1415 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1416 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1420 for (i = 0; i < 256; i++) {
1421 order = get_random_u32_inclusive(1, 4);
1422 pages = alloc_pages(GFP_KERNEL, order);
1423 ptr = page_address(pages);
1424 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1425 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1426 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1427 free_pages((unsigned long)ptr, order);
1430 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
1433 for (i = 0; i < 256; i++) {
1434 size = get_random_u32_inclusive(1, 1024);
1435 ptr = vmalloc(size);
1436 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1437 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1438 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1443 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1444 static void match_all_ptr_tag(struct kunit *test)
1449 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1451 ptr = kmalloc(128, GFP_KERNEL);
1452 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1454 /* Backup the assigned tag. */
1456 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1458 /* Reset the tag to 0xff.*/
1459 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1461 /* This access shouldn't trigger a KASAN report. */
1464 /* Recover the pointer tag and free. */
1465 ptr = set_tag(ptr, tag);
1469 /* Check that there are no match-all memory tags for tag-based modes. */
1470 static void match_all_mem_tag(struct kunit *test)
1475 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1477 ptr = kmalloc(128, GFP_KERNEL);
1478 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1479 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1481 /* For each possible tag value not matching the pointer tag. */
1482 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1483 if (tag == get_tag(ptr))
1486 /* Mark the first memory granule with the chosen memory tag. */
1487 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1489 /* This access must cause a KASAN report. */
1490 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1493 /* Recover the memory tag and free. */
1494 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1498 static struct kunit_case kasan_kunit_test_cases[] = {
1499 KUNIT_CASE(kmalloc_oob_right),
1500 KUNIT_CASE(kmalloc_oob_left),
1501 KUNIT_CASE(kmalloc_node_oob_right),
1502 KUNIT_CASE(kmalloc_pagealloc_oob_right),
1503 KUNIT_CASE(kmalloc_pagealloc_uaf),
1504 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1505 KUNIT_CASE(pagealloc_oob_right),
1506 KUNIT_CASE(pagealloc_uaf),
1507 KUNIT_CASE(kmalloc_large_oob_right),
1508 KUNIT_CASE(krealloc_more_oob),
1509 KUNIT_CASE(krealloc_less_oob),
1510 KUNIT_CASE(krealloc_pagealloc_more_oob),
1511 KUNIT_CASE(krealloc_pagealloc_less_oob),
1512 KUNIT_CASE(krealloc_uaf),
1513 KUNIT_CASE(kmalloc_oob_16),
1514 KUNIT_CASE(kmalloc_uaf_16),
1515 KUNIT_CASE(kmalloc_oob_in_memset),
1516 KUNIT_CASE(kmalloc_oob_memset_2),
1517 KUNIT_CASE(kmalloc_oob_memset_4),
1518 KUNIT_CASE(kmalloc_oob_memset_8),
1519 KUNIT_CASE(kmalloc_oob_memset_16),
1520 KUNIT_CASE(kmalloc_memmove_negative_size),
1521 KUNIT_CASE(kmalloc_memmove_invalid_size),
1522 KUNIT_CASE(kmalloc_uaf),
1523 KUNIT_CASE(kmalloc_uaf_memset),
1524 KUNIT_CASE(kmalloc_uaf2),
1525 KUNIT_CASE(kmalloc_uaf3),
1526 KUNIT_CASE(kfree_via_page),
1527 KUNIT_CASE(kfree_via_phys),
1528 KUNIT_CASE(kmem_cache_oob),
1529 KUNIT_CASE(kmem_cache_accounted),
1530 KUNIT_CASE(kmem_cache_bulk),
1531 KUNIT_CASE(kasan_global_oob_right),
1532 KUNIT_CASE(kasan_global_oob_left),
1533 KUNIT_CASE(kasan_stack_oob),
1534 KUNIT_CASE(kasan_alloca_oob_left),
1535 KUNIT_CASE(kasan_alloca_oob_right),
1536 KUNIT_CASE(ksize_unpoisons_memory),
1537 KUNIT_CASE(ksize_uaf),
1538 KUNIT_CASE(kmem_cache_double_free),
1539 KUNIT_CASE(kmem_cache_invalid_free),
1540 KUNIT_CASE(kmem_cache_double_destroy),
1541 KUNIT_CASE(kasan_memchr),
1542 KUNIT_CASE(kasan_memcmp),
1543 KUNIT_CASE(kasan_strings),
1544 KUNIT_CASE(kasan_bitops_generic),
1545 KUNIT_CASE(kasan_bitops_tags),
1546 KUNIT_CASE(kmalloc_double_kzfree),
1547 KUNIT_CASE(rcu_uaf),
1548 KUNIT_CASE(workqueue_uaf),
1549 KUNIT_CASE(vmalloc_helpers_tags),
1550 KUNIT_CASE(vmalloc_oob),
1551 KUNIT_CASE(vmap_tags),
1552 KUNIT_CASE(vm_map_ram_tags),
1553 KUNIT_CASE(vmalloc_percpu),
1554 KUNIT_CASE(match_all_not_assigned),
1555 KUNIT_CASE(match_all_ptr_tag),
1556 KUNIT_CASE(match_all_mem_tag),
1560 static struct kunit_suite kasan_kunit_test_suite = {
1562 .test_cases = kasan_kunit_test_cases,
1563 .exit = kasan_test_exit,
1564 .suite_init = kasan_suite_init,
1565 .suite_exit = kasan_suite_exit,
1568 kunit_test_suite(kasan_kunit_test_suite);
1570 MODULE_LICENSE("GPL");