Merge tag 'drm-intel-gt-next-2023-03-16' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / drivers / misc / lkdtm / heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This is for all the tests relating directly to heap memory, including
4  * page allocation and slab allocations.
5  */
6 #include "lkdtm.h"
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/sched.h>
10
11 static struct kmem_cache *double_free_cache;
12 static struct kmem_cache *a_cache;
13 static struct kmem_cache *b_cache;
14
15 /*
16  * Using volatile here means the compiler cannot ever make assumptions
17  * about this value. This means compile-time length checks involving
18  * this variable cannot be performed; only run-time checks.
19  */
20 static volatile int __offset = 1;
21
22 /*
23  * If there aren't guard pages, it's likely that a consecutive allocation will
24  * let us overflow into the second allocation without overwriting something real.
25  *
26  * This should always be caught because there is an unconditional unmapped
27  * page after vmap allocations.
28  */
29 static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
30 {
31         char *one, *two;
32
33         one = vzalloc(PAGE_SIZE);
34         OPTIMIZER_HIDE_VAR(one);
35         two = vzalloc(PAGE_SIZE);
36
37         pr_info("Attempting vmalloc linear overflow ...\n");
38         memset(one, 0xAA, PAGE_SIZE + __offset);
39
40         vfree(two);
41         vfree(one);
42 }
43
44 /*
45  * This tries to stay within the next largest power-of-2 kmalloc cache
46  * to avoid actually overwriting anything important if it's not detected
47  * correctly.
48  *
49  * This should get caught by either memory tagging, KASan, or by using
50  * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
51  */
52 static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
53 {
54         size_t len = 1020;
55         u32 *data = kmalloc(len, GFP_KERNEL);
56         if (!data)
57                 return;
58
59         pr_info("Attempting slab linear overflow ...\n");
60         OPTIMIZER_HIDE_VAR(data);
61         data[1024 / sizeof(u32)] = 0x12345678;
62         kfree(data);
63 }
64
65 static void lkdtm_WRITE_AFTER_FREE(void)
66 {
67         int *base, *again;
68         size_t len = 1024;
69         /*
70          * The slub allocator uses the first word to store the free
71          * pointer in some configurations. Use the middle of the
72          * allocation to avoid running into the freelist
73          */
74         size_t offset = (len / sizeof(*base)) / 2;
75
76         base = kmalloc(len, GFP_KERNEL);
77         if (!base)
78                 return;
79         pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
80         pr_info("Attempting bad write to freed memory at %p\n",
81                 &base[offset]);
82         kfree(base);
83         base[offset] = 0x0abcdef0;
84         /* Attempt to notice the overwrite. */
85         again = kmalloc(len, GFP_KERNEL);
86         kfree(again);
87         if (again != base)
88                 pr_info("Hmm, didn't get the same memory range.\n");
89 }
90
91 static void lkdtm_READ_AFTER_FREE(void)
92 {
93         int *base, *val, saw;
94         size_t len = 1024;
95         /*
96          * The slub allocator will use the either the first word or
97          * the middle of the allocation to store the free pointer,
98          * depending on configurations. Store in the second word to
99          * avoid running into the freelist.
100          */
101         size_t offset = sizeof(*base);
102
103         base = kmalloc(len, GFP_KERNEL);
104         if (!base) {
105                 pr_info("Unable to allocate base memory.\n");
106                 return;
107         }
108
109         val = kmalloc(len, GFP_KERNEL);
110         if (!val) {
111                 pr_info("Unable to allocate val memory.\n");
112                 kfree(base);
113                 return;
114         }
115
116         *val = 0x12345678;
117         base[offset] = *val;
118         pr_info("Value in memory before free: %x\n", base[offset]);
119
120         kfree(base);
121
122         pr_info("Attempting bad read from freed memory\n");
123         saw = base[offset];
124         if (saw != *val) {
125                 /* Good! Poisoning happened, so declare a win. */
126                 pr_info("Memory correctly poisoned (%x)\n", saw);
127         } else {
128                 pr_err("FAIL: Memory was not poisoned!\n");
129                 pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
130         }
131
132         kfree(val);
133 }
134
135 static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
136 {
137         unsigned long p = __get_free_page(GFP_KERNEL);
138         if (!p) {
139                 pr_info("Unable to allocate free page\n");
140                 return;
141         }
142
143         pr_info("Writing to the buddy page before free\n");
144         memset((void *)p, 0x3, PAGE_SIZE);
145         free_page(p);
146         schedule();
147         pr_info("Attempting bad write to the buddy page after free\n");
148         memset((void *)p, 0x78, PAGE_SIZE);
149         /* Attempt to notice the overwrite. */
150         p = __get_free_page(GFP_KERNEL);
151         free_page(p);
152         schedule();
153 }
154
155 static void lkdtm_READ_BUDDY_AFTER_FREE(void)
156 {
157         unsigned long p = __get_free_page(GFP_KERNEL);
158         int saw, *val;
159         int *base;
160
161         if (!p) {
162                 pr_info("Unable to allocate free page\n");
163                 return;
164         }
165
166         val = kmalloc(1024, GFP_KERNEL);
167         if (!val) {
168                 pr_info("Unable to allocate val memory.\n");
169                 free_page(p);
170                 return;
171         }
172
173         base = (int *)p;
174
175         *val = 0x12345678;
176         base[0] = *val;
177         pr_info("Value in memory before free: %x\n", base[0]);
178         free_page(p);
179         pr_info("Attempting to read from freed memory\n");
180         saw = base[0];
181         if (saw != *val) {
182                 /* Good! Poisoning happened, so declare a win. */
183                 pr_info("Memory correctly poisoned (%x)\n", saw);
184         } else {
185                 pr_err("FAIL: Buddy page was not poisoned!\n");
186                 pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
187         }
188
189         kfree(val);
190 }
191
192 static void lkdtm_SLAB_INIT_ON_ALLOC(void)
193 {
194         u8 *first;
195         u8 *val;
196
197         first = kmalloc(512, GFP_KERNEL);
198         if (!first) {
199                 pr_info("Unable to allocate 512 bytes the first time.\n");
200                 return;
201         }
202
203         memset(first, 0xAB, 512);
204         kfree(first);
205
206         val = kmalloc(512, GFP_KERNEL);
207         if (!val) {
208                 pr_info("Unable to allocate 512 bytes the second time.\n");
209                 return;
210         }
211         if (val != first) {
212                 pr_warn("Reallocation missed clobbered memory.\n");
213         }
214
215         if (memchr(val, 0xAB, 512) == NULL) {
216                 pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
217         } else {
218                 pr_err("FAIL: Slab was not initialized\n");
219                 pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
220         }
221         kfree(val);
222 }
223
224 static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
225 {
226         u8 *first;
227         u8 *val;
228
229         first = (u8 *)__get_free_page(GFP_KERNEL);
230         if (!first) {
231                 pr_info("Unable to allocate first free page\n");
232                 return;
233         }
234
235         memset(first, 0xAB, PAGE_SIZE);
236         free_page((unsigned long)first);
237
238         val = (u8 *)__get_free_page(GFP_KERNEL);
239         if (!val) {
240                 pr_info("Unable to allocate second free page\n");
241                 return;
242         }
243
244         if (val != first) {
245                 pr_warn("Reallocation missed clobbered memory.\n");
246         }
247
248         if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
249                 pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
250         } else {
251                 pr_err("FAIL: Slab was not initialized\n");
252                 pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
253         }
254         free_page((unsigned long)val);
255 }
256
257 static void lkdtm_SLAB_FREE_DOUBLE(void)
258 {
259         int *val;
260
261         val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
262         if (!val) {
263                 pr_info("Unable to allocate double_free_cache memory.\n");
264                 return;
265         }
266
267         /* Just make sure we got real memory. */
268         *val = 0x12345678;
269         pr_info("Attempting double slab free ...\n");
270         kmem_cache_free(double_free_cache, val);
271         kmem_cache_free(double_free_cache, val);
272 }
273
274 static void lkdtm_SLAB_FREE_CROSS(void)
275 {
276         int *val;
277
278         val = kmem_cache_alloc(a_cache, GFP_KERNEL);
279         if (!val) {
280                 pr_info("Unable to allocate a_cache memory.\n");
281                 return;
282         }
283
284         /* Just make sure we got real memory. */
285         *val = 0x12345679;
286         pr_info("Attempting cross-cache slab free ...\n");
287         kmem_cache_free(b_cache, val);
288 }
289
290 static void lkdtm_SLAB_FREE_PAGE(void)
291 {
292         unsigned long p = __get_free_page(GFP_KERNEL);
293
294         pr_info("Attempting non-Slab slab free ...\n");
295         kmem_cache_free(NULL, (void *)p);
296         free_page(p);
297 }
298
299 /*
300  * We have constructors to keep the caches distinctly separated without
301  * needing to boot with "slab_nomerge".
302  */
303 static void ctor_double_free(void *region)
304 { }
305 static void ctor_a(void *region)
306 { }
307 static void ctor_b(void *region)
308 { }
309
310 void __init lkdtm_heap_init(void)
311 {
312         double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
313                                               64, 0, 0, ctor_double_free);
314         a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
315         b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
316 }
317
318 void __exit lkdtm_heap_exit(void)
319 {
320         kmem_cache_destroy(double_free_cache);
321         kmem_cache_destroy(a_cache);
322         kmem_cache_destroy(b_cache);
323 }
324
325 static struct crashtype crashtypes[] = {
326         CRASHTYPE(SLAB_LINEAR_OVERFLOW),
327         CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
328         CRASHTYPE(WRITE_AFTER_FREE),
329         CRASHTYPE(READ_AFTER_FREE),
330         CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
331         CRASHTYPE(READ_BUDDY_AFTER_FREE),
332         CRASHTYPE(SLAB_INIT_ON_ALLOC),
333         CRASHTYPE(BUDDY_INIT_ON_ALLOC),
334         CRASHTYPE(SLAB_FREE_DOUBLE),
335         CRASHTYPE(SLAB_FREE_CROSS),
336         CRASHTYPE(SLAB_FREE_PAGE),
337 };
338
339 struct crashtype_category heap_crashtypes = {
340         .crashtypes = crashtypes,
341         .len        = ARRAY_SIZE(crashtypes),
342 };