kfence: add test suite
[platform/kernel/linux-starfive.git] / mm / kfence / report.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE reporting.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7
8 #include <stdarg.h>
9
10 #include <linux/kernel.h>
11 #include <linux/lockdep.h>
12 #include <linux/printk.h>
13 #include <linux/sched/debug.h>
14 #include <linux/seq_file.h>
15 #include <linux/stacktrace.h>
16 #include <linux/string.h>
17
18 #include <asm/kfence.h>
19
20 #include "kfence.h"
21
22 /* Helper function to either print to a seq_file or to console. */
23 __printf(2, 3)
24 static void seq_con_printf(struct seq_file *seq, const char *fmt, ...)
25 {
26         va_list args;
27
28         va_start(args, fmt);
29         if (seq)
30                 seq_vprintf(seq, fmt, args);
31         else
32                 vprintk(fmt, args);
33         va_end(args);
34 }
35
36 /*
37  * Get the number of stack entries to skip to get out of MM internals. @type is
38  * optional, and if set to NULL, assumes an allocation or free stack.
39  */
40 static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries,
41                             const enum kfence_error_type *type)
42 {
43         char buf[64];
44         int skipnr, fallback = 0;
45
46         if (type) {
47                 /* Depending on error type, find different stack entries. */
48                 switch (*type) {
49                 case KFENCE_ERROR_UAF:
50                 case KFENCE_ERROR_OOB:
51                 case KFENCE_ERROR_INVALID:
52                         /*
53                          * kfence_handle_page_fault() may be called with pt_regs
54                          * set to NULL; in that case we'll simply show the full
55                          * stack trace.
56                          */
57                         return 0;
58                 case KFENCE_ERROR_CORRUPTION:
59                 case KFENCE_ERROR_INVALID_FREE:
60                         break;
61                 }
62         }
63
64         for (skipnr = 0; skipnr < num_entries; skipnr++) {
65                 int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
66
67                 if (str_has_prefix(buf, "kfence_") || str_has_prefix(buf, "__kfence_") ||
68                     !strncmp(buf, "__slab_free", len)) {
69                         /*
70                          * In case of tail calls from any of the below
71                          * to any of the above.
72                          */
73                         fallback = skipnr + 1;
74                 }
75
76                 /* Also the *_bulk() variants by only checking prefixes. */
77                 if (str_has_prefix(buf, "kfree") ||
78                     str_has_prefix(buf, "kmem_cache_free") ||
79                     str_has_prefix(buf, "__kmalloc") ||
80                     str_has_prefix(buf, "kmem_cache_alloc"))
81                         goto found;
82         }
83         if (fallback < num_entries)
84                 return fallback;
85 found:
86         skipnr++;
87         return skipnr < num_entries ? skipnr : 0;
88 }
89
90 static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
91                                bool show_alloc)
92 {
93         const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
94
95         if (track->num_stack_entries) {
96                 /* Skip allocation/free internals stack. */
97                 int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
98
99                 /* stack_trace_seq_print() does not exist; open code our own. */
100                 for (; i < track->num_stack_entries; i++)
101                         seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]);
102         } else {
103                 seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation");
104         }
105 }
106
107 void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta)
108 {
109         const int size = abs(meta->size);
110         const unsigned long start = meta->addr;
111         const struct kmem_cache *const cache = meta->cache;
112
113         lockdep_assert_held(&meta->lock);
114
115         if (meta->state == KFENCE_OBJECT_UNUSED) {
116                 seq_con_printf(seq, "kfence-#%zd unused\n", meta - kfence_metadata);
117                 return;
118         }
119
120         seq_con_printf(seq,
121                        "kfence-#%zd [0x" PTR_FMT "-0x" PTR_FMT
122                        ", size=%d, cache=%s] allocated by task %d:\n",
123                        meta - kfence_metadata, (void *)start, (void *)(start + size - 1), size,
124                        (cache && cache->name) ? cache->name : "<destroyed>", meta->alloc_track.pid);
125         kfence_print_stack(seq, meta, true);
126
127         if (meta->state == KFENCE_OBJECT_FREED) {
128                 seq_con_printf(seq, "\nfreed by task %d:\n", meta->free_track.pid);
129                 kfence_print_stack(seq, meta, false);
130         }
131 }
132
133 /*
134  * Show bytes at @addr that are different from the expected canary values, up to
135  * @max_bytes.
136  */
137 static void print_diff_canary(unsigned long address, size_t bytes_to_show,
138                               const struct kfence_metadata *meta)
139 {
140         const unsigned long show_until_addr = address + bytes_to_show;
141         const u8 *cur, *end;
142
143         /* Do not show contents of object nor read into following guard page. */
144         end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr)
145                                                 : min(show_until_addr, PAGE_ALIGN(address)));
146
147         pr_cont("[");
148         for (cur = (const u8 *)address; cur < end; cur++) {
149                 if (*cur == KFENCE_CANARY_PATTERN(cur))
150                         pr_cont(" .");
151                 else if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
152                         pr_cont(" 0x%02x", *cur);
153                 else /* Do not leak kernel memory in non-debug builds. */
154                         pr_cont(" !");
155         }
156         pr_cont(" ]");
157 }
158
159 static const char *get_access_type(bool is_write)
160 {
161         return is_write ? "write" : "read";
162 }
163
164 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
165                          const struct kfence_metadata *meta, enum kfence_error_type type)
166 {
167         unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
168         const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
169         int num_stack_entries;
170         int skipnr = 0;
171
172         if (regs) {
173                 num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
174         } else {
175                 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
176                 skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
177         }
178
179         /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
180         if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
181                 return;
182
183         if (meta)
184                 lockdep_assert_held(&meta->lock);
185         /*
186          * Because we may generate reports in printk-unfriendly parts of the
187          * kernel, such as scheduler code, the use of printk() could deadlock.
188          * Until such time that all printing code here is safe in all parts of
189          * the kernel, accept the risk, and just get our message out (given the
190          * system might already behave unpredictably due to the memory error).
191          * As such, also disable lockdep to hide warnings, and avoid disabling
192          * lockdep for the rest of the kernel.
193          */
194         lockdep_off();
195
196         pr_err("==================================================================\n");
197         /* Print report header. */
198         switch (type) {
199         case KFENCE_ERROR_OOB: {
200                 const bool left_of_object = address < meta->addr;
201
202                 pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write),
203                        (void *)stack_entries[skipnr]);
204                 pr_err("Out-of-bounds %s at 0x" PTR_FMT " (%luB %s of kfence-#%zd):\n",
205                        get_access_type(is_write), (void *)address,
206                        left_of_object ? meta->addr - address : address - meta->addr,
207                        left_of_object ? "left" : "right", object_index);
208                 break;
209         }
210         case KFENCE_ERROR_UAF:
211                 pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write),
212                        (void *)stack_entries[skipnr]);
213                 pr_err("Use-after-free %s at 0x" PTR_FMT " (in kfence-#%zd):\n",
214                        get_access_type(is_write), (void *)address, object_index);
215                 break;
216         case KFENCE_ERROR_CORRUPTION:
217                 pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]);
218                 pr_err("Corrupted memory at 0x" PTR_FMT " ", (void *)address);
219                 print_diff_canary(address, 16, meta);
220                 pr_cont(" (in kfence-#%zd):\n", object_index);
221                 break;
222         case KFENCE_ERROR_INVALID:
223                 pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write),
224                        (void *)stack_entries[skipnr]);
225                 pr_err("Invalid %s at 0x" PTR_FMT ":\n", get_access_type(is_write),
226                        (void *)address);
227                 break;
228         case KFENCE_ERROR_INVALID_FREE:
229                 pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]);
230                 pr_err("Invalid free of 0x" PTR_FMT " (in kfence-#%zd):\n", (void *)address,
231                        object_index);
232                 break;
233         }
234
235         /* Print stack trace and object info. */
236         stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
237
238         if (meta) {
239                 pr_err("\n");
240                 kfence_print_object(NULL, meta);
241         }
242
243         /* Print report footer. */
244         pr_err("\n");
245         if (IS_ENABLED(CONFIG_DEBUG_KERNEL) && regs)
246                 show_regs(regs);
247         else
248                 dump_stack_print_info(KERN_ERR);
249         pr_err("==================================================================\n");
250
251         lockdep_on();
252
253         if (panic_on_warn)
254                 panic("panic_on_warn set ...\n");
255
256         /* We encountered a memory unsafety error, taint the kernel! */
257         add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
258 }