Merge tag 'for-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/pateldipen19...
[platform/kernel/linux-rpi.git] / mm / kmsan / kmsan_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test cases for KMSAN.
4  * For each test case checks the presence (or absence) of generated reports.
5  * Relies on 'console' tracepoint to capture reports as they appear in the
6  * kernel log.
7  *
8  * Copyright (C) 2021-2022, Google LLC.
9  * Author: Alexander Potapenko <glider@google.com>
10  *
11  */
12
13 #include <kunit/test.h>
14 #include "kmsan.h"
15
16 #include <linux/jiffies.h>
17 #include <linux/kernel.h>
18 #include <linux/kmsan.h>
19 #include <linux/mm.h>
20 #include <linux/random.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/tracepoint.h>
25 #include <linux/vmalloc.h>
26 #include <trace/events/printk.h>
27
28 static DEFINE_PER_CPU(int, per_cpu_var);
29
30 /* Report as observed from console. */
31 static struct {
32         spinlock_t lock;
33         bool available;
34         bool ignore; /* Stop console output collection. */
35         char header[256];
36 } observed = {
37         .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
38 };
39
40 /* Probe for console output: obtains observed lines of interest. */
41 static void probe_console(void *ignore, const char *buf, size_t len)
42 {
43         unsigned long flags;
44
45         if (observed.ignore)
46                 return;
47         spin_lock_irqsave(&observed.lock, flags);
48
49         if (strnstr(buf, "BUG: KMSAN: ", len)) {
50                 /*
51                  * KMSAN report and related to the test.
52                  *
53                  * The provided @buf is not NUL-terminated; copy no more than
54                  * @len bytes and let strscpy() add the missing NUL-terminator.
55                  */
56                 strscpy(observed.header, buf,
57                         min(len + 1, sizeof(observed.header)));
58                 WRITE_ONCE(observed.available, true);
59                 observed.ignore = true;
60         }
61         spin_unlock_irqrestore(&observed.lock, flags);
62 }
63
64 /* Check if a report related to the test exists. */
65 static bool report_available(void)
66 {
67         return READ_ONCE(observed.available);
68 }
69
70 /* Information we expect in a report. */
71 struct expect_report {
72         const char *error_type; /* Error type. */
73         /*
74          * Kernel symbol from the error header, or NULL if no report is
75          * expected.
76          */
77         const char *symbol;
78 };
79
80 /* Check observed report matches information in @r. */
81 static bool report_matches(const struct expect_report *r)
82 {
83         typeof(observed.header) expected_header;
84         unsigned long flags;
85         bool ret = false;
86         const char *end;
87         char *cur;
88
89         /* Doubled-checked locking. */
90         if (!report_available() || !r->symbol)
91                 return (!report_available() && !r->symbol);
92
93         /* Generate expected report contents. */
94
95         /* Title */
96         cur = expected_header;
97         end = &expected_header[sizeof(expected_header) - 1];
98
99         cur += scnprintf(cur, end - cur, "BUG: KMSAN: %s", r->error_type);
100
101         scnprintf(cur, end - cur, " in %s", r->symbol);
102         /* The exact offset won't match, remove it; also strip module name. */
103         cur = strchr(expected_header, '+');
104         if (cur)
105                 *cur = '\0';
106
107         spin_lock_irqsave(&observed.lock, flags);
108         if (!report_available())
109                 goto out; /* A new report is being captured. */
110
111         /* Finally match expected output to what we actually observed. */
112         ret = strstr(observed.header, expected_header);
113 out:
114         spin_unlock_irqrestore(&observed.lock, flags);
115
116         return ret;
117 }
118
119 /* ===== Test cases ===== */
120
121 /* Prevent replacing branch with select in LLVM. */
122 static noinline void check_true(char *arg)
123 {
124         pr_info("%s is true\n", arg);
125 }
126
127 static noinline void check_false(char *arg)
128 {
129         pr_info("%s is false\n", arg);
130 }
131
132 #define USE(x)                           \
133         do {                             \
134                 if (x)                   \
135                         check_true(#x);  \
136                 else                     \
137                         check_false(#x); \
138         } while (0)
139
140 #define EXPECTATION_ETYPE_FN(e, reason, fn) \
141         struct expect_report e = {          \
142                 .error_type = reason,       \
143                 .symbol = fn,               \
144         }
145
146 #define EXPECTATION_NO_REPORT(e) EXPECTATION_ETYPE_FN(e, NULL, NULL)
147 #define EXPECTATION_UNINIT_VALUE_FN(e, fn) \
148         EXPECTATION_ETYPE_FN(e, "uninit-value", fn)
149 #define EXPECTATION_UNINIT_VALUE(e) EXPECTATION_UNINIT_VALUE_FN(e, __func__)
150 #define EXPECTATION_USE_AFTER_FREE(e) \
151         EXPECTATION_ETYPE_FN(e, "use-after-free", __func__)
152
153 /* Test case: ensure that kmalloc() returns uninitialized memory. */
154 static void test_uninit_kmalloc(struct kunit *test)
155 {
156         EXPECTATION_UNINIT_VALUE(expect);
157         int *ptr;
158
159         kunit_info(test, "uninitialized kmalloc test (UMR report)\n");
160         ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
161         USE(*ptr);
162         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
163 }
164
165 /*
166  * Test case: ensure that kmalloc'ed memory becomes initialized after memset().
167  */
168 static void test_init_kmalloc(struct kunit *test)
169 {
170         EXPECTATION_NO_REPORT(expect);
171         int *ptr;
172
173         kunit_info(test, "initialized kmalloc test (no reports)\n");
174         ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
175         memset(ptr, 0, sizeof(*ptr));
176         USE(*ptr);
177         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
178 }
179
180 /* Test case: ensure that kzalloc() returns initialized memory. */
181 static void test_init_kzalloc(struct kunit *test)
182 {
183         EXPECTATION_NO_REPORT(expect);
184         int *ptr;
185
186         kunit_info(test, "initialized kzalloc test (no reports)\n");
187         ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
188         USE(*ptr);
189         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
190 }
191
192 /* Test case: ensure that local variables are uninitialized by default. */
193 static void test_uninit_stack_var(struct kunit *test)
194 {
195         EXPECTATION_UNINIT_VALUE(expect);
196         volatile int cond;
197
198         kunit_info(test, "uninitialized stack variable (UMR report)\n");
199         USE(cond);
200         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
201 }
202
203 /* Test case: ensure that local variables with initializers are initialized. */
204 static void test_init_stack_var(struct kunit *test)
205 {
206         EXPECTATION_NO_REPORT(expect);
207         volatile int cond = 1;
208
209         kunit_info(test, "initialized stack variable (no reports)\n");
210         USE(cond);
211         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
212 }
213
214 static noinline void two_param_fn_2(int arg1, int arg2)
215 {
216         USE(arg1);
217         USE(arg2);
218 }
219
220 static noinline void one_param_fn(int arg)
221 {
222         two_param_fn_2(arg, arg);
223         USE(arg);
224 }
225
226 static noinline void two_param_fn(int arg1, int arg2)
227 {
228         int init = 0;
229
230         one_param_fn(init);
231         USE(arg1);
232         USE(arg2);
233 }
234
235 static void test_params(struct kunit *test)
236 {
237 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
238         /*
239          * With eager param/retval checking enabled, KMSAN will report an error
240          * before the call to two_param_fn().
241          */
242         EXPECTATION_UNINIT_VALUE_FN(expect, "test_params");
243 #else
244         EXPECTATION_UNINIT_VALUE_FN(expect, "two_param_fn");
245 #endif
246         volatile int uninit, init = 1;
247
248         kunit_info(test,
249                    "uninit passed through a function parameter (UMR report)\n");
250         two_param_fn(uninit, init);
251         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
252 }
253
254 static int signed_sum3(int a, int b, int c)
255 {
256         return a + b + c;
257 }
258
259 /*
260  * Test case: ensure that uninitialized values are tracked through function
261  * arguments.
262  */
263 static void test_uninit_multiple_params(struct kunit *test)
264 {
265         EXPECTATION_UNINIT_VALUE(expect);
266         volatile char b = 3, c;
267         volatile int a;
268
269         kunit_info(test, "uninitialized local passed to fn (UMR report)\n");
270         USE(signed_sum3(a, b, c));
271         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
272 }
273
274 /* Helper function to make an array uninitialized. */
275 static noinline void do_uninit_local_array(char *array, int start, int stop)
276 {
277         volatile char uninit;
278
279         for (int i = start; i < stop; i++)
280                 array[i] = uninit;
281 }
282
283 /*
284  * Test case: ensure kmsan_check_memory() reports an error when checking
285  * uninitialized memory.
286  */
287 static void test_uninit_kmsan_check_memory(struct kunit *test)
288 {
289         EXPECTATION_UNINIT_VALUE_FN(expect, "test_uninit_kmsan_check_memory");
290         volatile char local_array[8];
291
292         kunit_info(
293                 test,
294                 "kmsan_check_memory() called on uninit local (UMR report)\n");
295         do_uninit_local_array((char *)local_array, 5, 7);
296
297         kmsan_check_memory((char *)local_array, 8);
298         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
299 }
300
301 /*
302  * Test case: check that a virtual memory range created with vmap() from
303  * initialized pages is still considered as initialized.
304  */
305 static void test_init_kmsan_vmap_vunmap(struct kunit *test)
306 {
307         EXPECTATION_NO_REPORT(expect);
308         const int npages = 2;
309         struct page **pages;
310         void *vbuf;
311
312         kunit_info(test, "pages initialized via vmap (no reports)\n");
313
314         pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
315         for (int i = 0; i < npages; i++)
316                 pages[i] = alloc_page(GFP_KERNEL);
317         vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
318         memset(vbuf, 0xfe, npages * PAGE_SIZE);
319         for (int i = 0; i < npages; i++)
320                 kmsan_check_memory(page_address(pages[i]), PAGE_SIZE);
321
322         if (vbuf)
323                 vunmap(vbuf);
324         for (int i = 0; i < npages; i++) {
325                 if (pages[i])
326                         __free_page(pages[i]);
327         }
328         kfree(pages);
329         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
330 }
331
332 /*
333  * Test case: ensure that memset() can initialize a buffer allocated via
334  * vmalloc().
335  */
336 static void test_init_vmalloc(struct kunit *test)
337 {
338         EXPECTATION_NO_REPORT(expect);
339         int npages = 8;
340         char *buf;
341
342         kunit_info(test, "vmalloc buffer can be initialized (no reports)\n");
343         buf = vmalloc(PAGE_SIZE * npages);
344         buf[0] = 1;
345         memset(buf, 0xfe, PAGE_SIZE * npages);
346         USE(buf[0]);
347         for (int i = 0; i < npages; i++)
348                 kmsan_check_memory(&buf[PAGE_SIZE * i], PAGE_SIZE);
349         vfree(buf);
350         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
351 }
352
353 /* Test case: ensure that use-after-free reporting works. */
354 static void test_uaf(struct kunit *test)
355 {
356         EXPECTATION_USE_AFTER_FREE(expect);
357         volatile int value;
358         volatile int *var;
359
360         kunit_info(test, "use-after-free in kmalloc-ed buffer (UMR report)\n");
361         var = kmalloc(80, GFP_KERNEL);
362         var[3] = 0xfeedface;
363         kfree((int *)var);
364         /* Copy the invalid value before checking it. */
365         value = var[3];
366         USE(value);
367         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
368 }
369
370 /*
371  * Test case: ensure that uninitialized values are propagated through per-CPU
372  * memory.
373  */
374 static void test_percpu_propagate(struct kunit *test)
375 {
376         EXPECTATION_UNINIT_VALUE(expect);
377         volatile int uninit, check;
378
379         kunit_info(test,
380                    "uninit local stored to per_cpu memory (UMR report)\n");
381
382         this_cpu_write(per_cpu_var, uninit);
383         check = this_cpu_read(per_cpu_var);
384         USE(check);
385         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
386 }
387
388 /*
389  * Test case: ensure that passing uninitialized values to printk() leads to an
390  * error report.
391  */
392 static void test_printk(struct kunit *test)
393 {
394 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
395         /*
396          * With eager param/retval checking enabled, KMSAN will report an error
397          * before the call to pr_info().
398          */
399         EXPECTATION_UNINIT_VALUE_FN(expect, "test_printk");
400 #else
401         EXPECTATION_UNINIT_VALUE_FN(expect, "number");
402 #endif
403         volatile int uninit;
404
405         kunit_info(test, "uninit local passed to pr_info() (UMR report)\n");
406         pr_info("%px contains %d\n", &uninit, uninit);
407         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
408 }
409
410 /*
411  * Prevent the compiler from optimizing @var away. Without this, Clang may
412  * notice that @var is uninitialized and drop memcpy() calls that use it.
413  *
414  * There is OPTIMIZER_HIDE_VAR() in linux/compier.h that we cannot use here,
415  * because it is implemented as inline assembly receiving @var as a parameter
416  * and will enforce a KMSAN check. Same is true for e.g. barrier_data(var).
417  */
418 #define DO_NOT_OPTIMIZE(var) barrier()
419
420 /*
421  * Test case: ensure that memcpy() correctly copies initialized values.
422  * Also serves as a regression test to ensure DO_NOT_OPTIMIZE() does not cause
423  * extra checks.
424  */
425 static void test_init_memcpy(struct kunit *test)
426 {
427         EXPECTATION_NO_REPORT(expect);
428         volatile int src;
429         volatile int dst = 0;
430
431         DO_NOT_OPTIMIZE(src);
432         src = 1;
433         kunit_info(
434                 test,
435                 "memcpy()ing aligned initialized src to aligned dst (no reports)\n");
436         memcpy((void *)&dst, (void *)&src, sizeof(src));
437         kmsan_check_memory((void *)&dst, sizeof(dst));
438         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
439 }
440
441 /*
442  * Test case: ensure that memcpy() correctly copies uninitialized values between
443  * aligned `src` and `dst`.
444  */
445 static void test_memcpy_aligned_to_aligned(struct kunit *test)
446 {
447         EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_aligned");
448         volatile int uninit_src;
449         volatile int dst = 0;
450
451         kunit_info(
452                 test,
453                 "memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
454         DO_NOT_OPTIMIZE(uninit_src);
455         memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
456         kmsan_check_memory((void *)&dst, sizeof(dst));
457         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
458 }
459
460 /*
461  * Test case: ensure that memcpy() correctly copies uninitialized values between
462  * aligned `src` and unaligned `dst`.
463  *
464  * Copying aligned 4-byte value to an unaligned one leads to touching two
465  * aligned 4-byte values. This test case checks that KMSAN correctly reports an
466  * error on the first of the two values.
467  */
468 static void test_memcpy_aligned_to_unaligned(struct kunit *test)
469 {
470         EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned");
471         volatile int uninit_src;
472         volatile char dst[8] = { 0 };
473
474         kunit_info(
475                 test,
476                 "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
477         DO_NOT_OPTIMIZE(uninit_src);
478         memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
479         kmsan_check_memory((void *)dst, 4);
480         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
481 }
482
483 /*
484  * Test case: ensure that memcpy() correctly copies uninitialized values between
485  * aligned `src` and unaligned `dst`.
486  *
487  * Copying aligned 4-byte value to an unaligned one leads to touching two
488  * aligned 4-byte values. This test case checks that KMSAN correctly reports an
489  * error on the second of the two values.
490  */
491 static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
492 {
493         EXPECTATION_UNINIT_VALUE_FN(expect,
494                                     "test_memcpy_aligned_to_unaligned2");
495         volatile int uninit_src;
496         volatile char dst[8] = { 0 };
497
498         kunit_info(
499                 test,
500                 "memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n");
501         DO_NOT_OPTIMIZE(uninit_src);
502         memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
503         kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
504         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
505 }
506
507 /* Generate test cases for memset16(), memset32(), memset64(). */
508 #define DEFINE_TEST_MEMSETXX(size)                                          \
509         static void test_memset##size(struct kunit *test)                   \
510         {                                                                   \
511                 EXPECTATION_NO_REPORT(expect);                              \
512                 volatile uint##size##_t uninit;                             \
513                                                                             \
514                 kunit_info(test,                                            \
515                            "memset" #size "() should initialize memory\n"); \
516                 DO_NOT_OPTIMIZE(uninit);                                    \
517                 memset##size((uint##size##_t *)&uninit, 0, 1);              \
518                 kmsan_check_memory((void *)&uninit, sizeof(uninit));        \
519                 KUNIT_EXPECT_TRUE(test, report_matches(&expect));           \
520         }
521
522 DEFINE_TEST_MEMSETXX(16)
523 DEFINE_TEST_MEMSETXX(32)
524 DEFINE_TEST_MEMSETXX(64)
525
526 static noinline void fibonacci(int *array, int size, int start)
527 {
528         if (start < 2 || (start == size))
529                 return;
530         array[start] = array[start - 1] + array[start - 2];
531         fibonacci(array, size, start + 1);
532 }
533
534 static void test_long_origin_chain(struct kunit *test)
535 {
536         EXPECTATION_UNINIT_VALUE_FN(expect, "test_long_origin_chain");
537         /* (KMSAN_MAX_ORIGIN_DEPTH * 2) recursive calls to fibonacci(). */
538         volatile int accum[KMSAN_MAX_ORIGIN_DEPTH * 2 + 2];
539         int last = ARRAY_SIZE(accum) - 1;
540
541         kunit_info(
542                 test,
543                 "origin chain exceeding KMSAN_MAX_ORIGIN_DEPTH (UMR report)\n");
544         /*
545          * We do not set accum[1] to 0, so the uninitializedness will be carried
546          * over to accum[2..last].
547          */
548         accum[0] = 1;
549         fibonacci((int *)accum, ARRAY_SIZE(accum), 2);
550         kmsan_check_memory((void *)&accum[last], sizeof(int));
551         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
552 }
553
554 /*
555  * Test case: ensure that saving/restoring/printing stacks to/from stackdepot
556  * does not trigger errors.
557  *
558  * KMSAN uses stackdepot to store origin stack traces, that's why we do not
559  * instrument lib/stackdepot.c. Yet it must properly mark its outputs as
560  * initialized because other kernel features (e.g. netdev tracker) may also
561  * access stackdepot from instrumented code.
562  */
563 static void test_stackdepot_roundtrip(struct kunit *test)
564 {
565         unsigned long src_entries[16], *dst_entries;
566         unsigned int src_nentries, dst_nentries;
567         EXPECTATION_NO_REPORT(expect);
568         depot_stack_handle_t handle;
569
570         kunit_info(test, "testing stackdepot roundtrip (no reports)\n");
571
572         src_nentries =
573                 stack_trace_save(src_entries, ARRAY_SIZE(src_entries), 1);
574         handle = stack_depot_save(src_entries, src_nentries, GFP_KERNEL);
575         stack_depot_print(handle);
576         dst_nentries = stack_depot_fetch(handle, &dst_entries);
577         KUNIT_EXPECT_TRUE(test, src_nentries == dst_nentries);
578
579         kmsan_check_memory((void *)dst_entries,
580                            sizeof(*dst_entries) * dst_nentries);
581         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
582 }
583
584 static struct kunit_case kmsan_test_cases[] = {
585         KUNIT_CASE(test_uninit_kmalloc),
586         KUNIT_CASE(test_init_kmalloc),
587         KUNIT_CASE(test_init_kzalloc),
588         KUNIT_CASE(test_uninit_stack_var),
589         KUNIT_CASE(test_init_stack_var),
590         KUNIT_CASE(test_params),
591         KUNIT_CASE(test_uninit_multiple_params),
592         KUNIT_CASE(test_uninit_kmsan_check_memory),
593         KUNIT_CASE(test_init_kmsan_vmap_vunmap),
594         KUNIT_CASE(test_init_vmalloc),
595         KUNIT_CASE(test_uaf),
596         KUNIT_CASE(test_percpu_propagate),
597         KUNIT_CASE(test_printk),
598         KUNIT_CASE(test_init_memcpy),
599         KUNIT_CASE(test_memcpy_aligned_to_aligned),
600         KUNIT_CASE(test_memcpy_aligned_to_unaligned),
601         KUNIT_CASE(test_memcpy_aligned_to_unaligned2),
602         KUNIT_CASE(test_memset16),
603         KUNIT_CASE(test_memset32),
604         KUNIT_CASE(test_memset64),
605         KUNIT_CASE(test_long_origin_chain),
606         KUNIT_CASE(test_stackdepot_roundtrip),
607         {},
608 };
609
610 /* ===== End test cases ===== */
611
612 static int test_init(struct kunit *test)
613 {
614         unsigned long flags;
615
616         spin_lock_irqsave(&observed.lock, flags);
617         observed.header[0] = '\0';
618         observed.ignore = false;
619         observed.available = false;
620         spin_unlock_irqrestore(&observed.lock, flags);
621
622         return 0;
623 }
624
625 static void test_exit(struct kunit *test)
626 {
627 }
628
629 static int kmsan_suite_init(struct kunit_suite *suite)
630 {
631         register_trace_console(probe_console, NULL);
632         return 0;
633 }
634
635 static void kmsan_suite_exit(struct kunit_suite *suite)
636 {
637         unregister_trace_console(probe_console, NULL);
638         tracepoint_synchronize_unregister();
639 }
640
641 static struct kunit_suite kmsan_test_suite = {
642         .name = "kmsan",
643         .test_cases = kmsan_test_cases,
644         .init = test_init,
645         .exit = test_exit,
646         .suite_init = kmsan_suite_init,
647         .suite_exit = kmsan_suite_exit,
648 };
649 kunit_test_suites(&kmsan_test_suite);
650
651 MODULE_LICENSE("GPL");
652 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>");