1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/uaccess.h>
20 #include <linux/vmalloc.h>
24 #include <kunit/test.h>
26 #include "../mm/kasan/kasan.h"
28 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
31 * Some tests use these global variables to store return values from function
32 * calls that could otherwise be eliminated by the compiler as dead code.
34 void *kasan_ptr_result;
37 static struct kunit_resource resource;
38 static struct kunit_kasan_expectation fail_data;
39 static bool multishot;
42 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
43 * first detected bug and panic the kernel if panic_on_warn is enabled.
45 static int kasan_test_init(struct kunit *test)
47 multishot = kasan_save_enable_multi_shot();
51 static void kasan_test_exit(struct kunit *test)
53 kasan_restore_multi_shot(multishot);
57 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
58 * KASAN report; causes a test failure otherwise. This relies on a KUnit
59 * resource named "kasan_data". Do not use this name for KUnit resources
60 * outside of KASAN tests.
62 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
63 fail_data.report_expected = true; \
64 fail_data.report_found = false; \
65 kunit_add_named_resource(test, \
69 "kasan_data", &fail_data); \
71 KUNIT_EXPECT_EQ(test, \
72 fail_data.report_expected, \
73 fail_data.report_found); \
76 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
77 if (!IS_ENABLED(config)) { \
78 kunit_info((test), "skipping, " #config " required"); \
83 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
84 if (IS_ENABLED(config)) { \
85 kunit_info((test), "skipping, " #config " enabled"); \
90 static void kmalloc_oob_right(struct kunit *test)
95 ptr = kmalloc(size, GFP_KERNEL);
96 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
98 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
102 static void kmalloc_oob_left(struct kunit *test)
107 ptr = kmalloc(size, GFP_KERNEL);
108 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
110 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
114 static void kmalloc_node_oob_right(struct kunit *test)
119 ptr = kmalloc_node(size, GFP_KERNEL, 0);
120 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
122 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
126 static void kmalloc_pagealloc_oob_right(struct kunit *test)
129 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
131 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
134 * Allocate a chunk that does not fit into a SLUB cache to trigger
135 * the page allocator fallback.
137 ptr = kmalloc(size, GFP_KERNEL);
138 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
140 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
144 static void kmalloc_pagealloc_uaf(struct kunit *test)
147 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
149 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
151 ptr = kmalloc(size, GFP_KERNEL);
152 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
155 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
158 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
161 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
163 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
165 ptr = kmalloc(size, GFP_KERNEL);
166 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
168 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
171 static void kmalloc_large_oob_right(struct kunit *test)
174 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
177 * Allocate a chunk that is large enough, but still fits into a slab
178 * and does not trigger the page allocator fallback in SLUB.
180 ptr = kmalloc(size, GFP_KERNEL);
181 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
183 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
187 static void kmalloc_oob_krealloc_more(struct kunit *test)
193 ptr1 = kmalloc(size1, GFP_KERNEL);
194 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
196 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
197 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
199 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
203 static void kmalloc_oob_krealloc_less(struct kunit *test)
209 ptr1 = kmalloc(size1, GFP_KERNEL);
210 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
212 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
213 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
215 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
219 static void kmalloc_oob_16(struct kunit *test)
225 /* This test is specifically crafted for the generic mode. */
226 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
228 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
229 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
231 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
232 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
234 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
239 static void kmalloc_uaf_16(struct kunit *test)
245 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
246 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
248 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
249 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
252 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
256 static void kmalloc_oob_memset_2(struct kunit *test)
261 ptr = kmalloc(size, GFP_KERNEL);
262 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
264 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
268 static void kmalloc_oob_memset_4(struct kunit *test)
273 ptr = kmalloc(size, GFP_KERNEL);
274 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
276 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
281 static void kmalloc_oob_memset_8(struct kunit *test)
286 ptr = kmalloc(size, GFP_KERNEL);
287 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
289 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
293 static void kmalloc_oob_memset_16(struct kunit *test)
298 ptr = kmalloc(size, GFP_KERNEL);
299 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
301 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
305 static void kmalloc_oob_in_memset(struct kunit *test)
310 ptr = kmalloc(size, GFP_KERNEL);
311 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
313 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
317 static void kmalloc_memmove_invalid_size(struct kunit *test)
321 volatile size_t invalid_size = -2;
323 ptr = kmalloc(size, GFP_KERNEL);
324 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
326 memset((char *)ptr, 0, 64);
328 KUNIT_EXPECT_KASAN_FAIL(test,
329 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
333 static void kmalloc_uaf(struct kunit *test)
338 ptr = kmalloc(size, GFP_KERNEL);
339 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
342 KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
345 static void kmalloc_uaf_memset(struct kunit *test)
350 ptr = kmalloc(size, GFP_KERNEL);
351 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
354 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
357 static void kmalloc_uaf2(struct kunit *test)
362 ptr1 = kmalloc(size, GFP_KERNEL);
363 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
367 ptr2 = kmalloc(size, GFP_KERNEL);
368 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
370 KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
371 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
376 static void kfree_via_page(struct kunit *test)
381 unsigned long offset;
383 ptr = kmalloc(size, GFP_KERNEL);
384 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
386 page = virt_to_page(ptr);
387 offset = offset_in_page(ptr);
388 kfree(page_address(page) + offset);
391 static void kfree_via_phys(struct kunit *test)
397 ptr = kmalloc(size, GFP_KERNEL);
398 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
400 phys = virt_to_phys(ptr);
401 kfree(phys_to_virt(phys));
404 static void kmem_cache_oob(struct kunit *test)
408 struct kmem_cache *cache = kmem_cache_create("test_cache",
411 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
412 p = kmem_cache_alloc(cache, GFP_KERNEL);
414 kunit_err(test, "Allocation failed: %s\n", __func__);
415 kmem_cache_destroy(cache);
419 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
420 kmem_cache_free(cache, p);
421 kmem_cache_destroy(cache);
424 static void memcg_accounted_kmem_cache(struct kunit *test)
429 struct kmem_cache *cache;
431 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
432 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
435 * Several allocations with a delay to allow for lazy per memcg kmem
438 for (i = 0; i < 5; i++) {
439 p = kmem_cache_alloc(cache, GFP_KERNEL);
443 kmem_cache_free(cache, p);
448 kmem_cache_destroy(cache);
451 static char global_array[10];
453 static void kasan_global_oob(struct kunit *test)
456 char *p = &global_array[ARRAY_SIZE(global_array) + i];
458 /* Only generic mode instruments globals. */
459 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
461 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
464 static void ksize_unpoisons_memory(struct kunit *test)
467 size_t size = 123, real_size;
469 ptr = kmalloc(size, GFP_KERNEL);
470 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
471 real_size = ksize(ptr);
473 /* This access shouldn't trigger a KASAN report. */
477 KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
482 static void kasan_stack_oob(struct kunit *test)
484 char stack_array[10];
485 volatile int i = OOB_TAG_OFF;
486 char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
488 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
490 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
493 static void kasan_alloca_oob_left(struct kunit *test)
496 char alloca_array[i];
497 char *p = alloca_array - 1;
499 /* Only generic mode instruments dynamic allocas. */
500 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
501 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
503 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
506 static void kasan_alloca_oob_right(struct kunit *test)
509 char alloca_array[i];
510 char *p = alloca_array + i;
512 /* Only generic mode instruments dynamic allocas. */
513 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
514 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
516 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
519 static void kmem_cache_double_free(struct kunit *test)
523 struct kmem_cache *cache;
525 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
526 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
528 p = kmem_cache_alloc(cache, GFP_KERNEL);
530 kunit_err(test, "Allocation failed: %s\n", __func__);
531 kmem_cache_destroy(cache);
535 kmem_cache_free(cache, p);
536 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
537 kmem_cache_destroy(cache);
540 static void kmem_cache_invalid_free(struct kunit *test)
544 struct kmem_cache *cache;
546 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
548 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
550 p = kmem_cache_alloc(cache, GFP_KERNEL);
552 kunit_err(test, "Allocation failed: %s\n", __func__);
553 kmem_cache_destroy(cache);
557 /* Trigger invalid free, the object doesn't get freed. */
558 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
561 * Properly free the object to prevent the "Objects remaining in
562 * test_cache on __kmem_cache_shutdown" BUG failure.
564 kmem_cache_free(cache, p);
566 kmem_cache_destroy(cache);
569 static void kasan_memchr(struct kunit *test)
575 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
576 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
578 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
581 size = round_up(size, OOB_TAG_OFF);
583 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
584 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
586 KUNIT_EXPECT_KASAN_FAIL(test,
587 kasan_ptr_result = memchr(ptr, '1', size + 1));
592 static void kasan_memcmp(struct kunit *test)
599 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
600 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
602 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
605 size = round_up(size, OOB_TAG_OFF);
607 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
608 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
609 memset(arr, 0, sizeof(arr));
611 KUNIT_EXPECT_KASAN_FAIL(test,
612 kasan_int_result = memcmp(ptr, arr, size+1));
616 static void kasan_strings(struct kunit *test)
622 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
623 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
625 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
627 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
628 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
633 * Try to cause only 1 invalid access (less spam in dmesg).
634 * For that we need ptr to point to zeroed byte.
635 * Skip metadata that could be stored in freed object so ptr
636 * will likely point to zeroed byte.
639 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
641 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
643 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
645 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
647 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
649 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
652 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
654 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
655 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
656 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
657 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
658 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
659 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
660 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
661 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
664 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
666 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
667 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
668 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
669 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
670 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
671 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
672 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
673 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
675 #if defined(clear_bit_unlock_is_negative_byte)
676 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
677 clear_bit_unlock_is_negative_byte(nr, addr));
681 static void kasan_bitops_generic(struct kunit *test)
685 /* This test is specifically crafted for the generic mode. */
686 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
689 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
690 * this way we do not actually corrupt other memory.
692 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
693 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
696 * Below calls try to access bit within allocated memory; however, the
697 * below accesses are still out-of-bounds, since bitops are defined to
698 * operate on the whole long the bit is in.
700 kasan_bitops_modify(test, BITS_PER_LONG, bits);
703 * Below calls try to access bit beyond allocated memory.
705 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
710 static void kasan_bitops_tags(struct kunit *test)
714 /* This test is specifically crafted for tag-based modes. */
715 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
717 /* Allocation size will be rounded to up granule size, which is 16. */
718 bits = kzalloc(sizeof(*bits), GFP_KERNEL);
719 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
721 /* Do the accesses past the 16 allocated bytes. */
722 kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
723 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
728 static void kmalloc_double_kzfree(struct kunit *test)
733 ptr = kmalloc(size, GFP_KERNEL);
734 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
736 kfree_sensitive(ptr);
737 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
740 static void vmalloc_oob(struct kunit *test)
744 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
747 * We have to be careful not to hit the guard page.
748 * The MMU will catch that and crash us.
750 area = vmalloc(3000);
751 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
753 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
757 static struct kunit_case kasan_kunit_test_cases[] = {
758 KUNIT_CASE(kmalloc_oob_right),
759 KUNIT_CASE(kmalloc_oob_left),
760 KUNIT_CASE(kmalloc_node_oob_right),
761 KUNIT_CASE(kmalloc_pagealloc_oob_right),
762 KUNIT_CASE(kmalloc_pagealloc_uaf),
763 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
764 KUNIT_CASE(kmalloc_large_oob_right),
765 KUNIT_CASE(kmalloc_oob_krealloc_more),
766 KUNIT_CASE(kmalloc_oob_krealloc_less),
767 KUNIT_CASE(kmalloc_oob_16),
768 KUNIT_CASE(kmalloc_uaf_16),
769 KUNIT_CASE(kmalloc_oob_in_memset),
770 KUNIT_CASE(kmalloc_oob_memset_2),
771 KUNIT_CASE(kmalloc_oob_memset_4),
772 KUNIT_CASE(kmalloc_oob_memset_8),
773 KUNIT_CASE(kmalloc_oob_memset_16),
774 KUNIT_CASE(kmalloc_memmove_invalid_size),
775 KUNIT_CASE(kmalloc_uaf),
776 KUNIT_CASE(kmalloc_uaf_memset),
777 KUNIT_CASE(kmalloc_uaf2),
778 KUNIT_CASE(kfree_via_page),
779 KUNIT_CASE(kfree_via_phys),
780 KUNIT_CASE(kmem_cache_oob),
781 KUNIT_CASE(memcg_accounted_kmem_cache),
782 KUNIT_CASE(kasan_global_oob),
783 KUNIT_CASE(kasan_stack_oob),
784 KUNIT_CASE(kasan_alloca_oob_left),
785 KUNIT_CASE(kasan_alloca_oob_right),
786 KUNIT_CASE(ksize_unpoisons_memory),
787 KUNIT_CASE(kmem_cache_double_free),
788 KUNIT_CASE(kmem_cache_invalid_free),
789 KUNIT_CASE(kasan_memchr),
790 KUNIT_CASE(kasan_memcmp),
791 KUNIT_CASE(kasan_strings),
792 KUNIT_CASE(kasan_bitops_generic),
793 KUNIT_CASE(kasan_bitops_tags),
794 KUNIT_CASE(kmalloc_double_kzfree),
795 KUNIT_CASE(vmalloc_oob),
799 static struct kunit_suite kasan_kunit_test_suite = {
801 .init = kasan_test_init,
802 .test_cases = kasan_kunit_test_cases,
803 .exit = kasan_test_exit,
806 kunit_test_suite(kasan_kunit_test_suite);
808 MODULE_LICENSE("GPL");