Merge tag 'x86_sgx_for_v6.0-2022-08-03.1' of git://git.kernel.org/pub/scm/linux/kerne...
[platform/kernel/linux-starfive.git] / mm / kasan / generic.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core generic KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  */
11
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/linkage.h>
20 #include <linux/memblock.h>
21 #include <linux/memory.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/printk.h>
25 #include <linux/sched.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/slab.h>
28 #include <linux/stacktrace.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/vmalloc.h>
32 #include <linux/bug.h>
33
34 #include "kasan.h"
35 #include "../slab.h"
36
37 /*
38  * All functions below always inlined so compiler could
39  * perform better optimizations in each of __asan_loadX/__assn_storeX
40  * depending on memory access size X.
41  */
42
43 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
44 {
45         s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
46
47         if (unlikely(shadow_value)) {
48                 s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
49                 return unlikely(last_accessible_byte >= shadow_value);
50         }
51
52         return false;
53 }
54
55 static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
56                                                 unsigned long size)
57 {
58         u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
59
60         /*
61          * Access crosses 8(shadow size)-byte boundary. Such access maps
62          * into 2 shadow bytes, so we need to check them both.
63          */
64         if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
65                 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
66
67         return memory_is_poisoned_1(addr + size - 1);
68 }
69
70 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
71 {
72         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
73
74         /* Unaligned 16-bytes access maps into 3 shadow bytes. */
75         if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
76                 return *shadow_addr || memory_is_poisoned_1(addr + 15);
77
78         return *shadow_addr;
79 }
80
81 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
82                                         size_t size)
83 {
84         while (size) {
85                 if (unlikely(*start))
86                         return (unsigned long)start;
87                 start++;
88                 size--;
89         }
90
91         return 0;
92 }
93
94 static __always_inline unsigned long memory_is_nonzero(const void *start,
95                                                 const void *end)
96 {
97         unsigned int words;
98         unsigned long ret;
99         unsigned int prefix = (unsigned long)start % 8;
100
101         if (end - start <= 16)
102                 return bytes_is_nonzero(start, end - start);
103
104         if (prefix) {
105                 prefix = 8 - prefix;
106                 ret = bytes_is_nonzero(start, prefix);
107                 if (unlikely(ret))
108                         return ret;
109                 start += prefix;
110         }
111
112         words = (end - start) / 8;
113         while (words) {
114                 if (unlikely(*(u64 *)start))
115                         return bytes_is_nonzero(start, 8);
116                 start += 8;
117                 words--;
118         }
119
120         return bytes_is_nonzero(start, (end - start) % 8);
121 }
122
123 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
124                                                 size_t size)
125 {
126         unsigned long ret;
127
128         ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
129                         kasan_mem_to_shadow((void *)addr + size - 1) + 1);
130
131         if (unlikely(ret)) {
132                 unsigned long last_byte = addr + size - 1;
133                 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
134
135                 if (unlikely(ret != (unsigned long)last_shadow ||
136                         ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
137                         return true;
138         }
139         return false;
140 }
141
142 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
143 {
144         if (__builtin_constant_p(size)) {
145                 switch (size) {
146                 case 1:
147                         return memory_is_poisoned_1(addr);
148                 case 2:
149                 case 4:
150                 case 8:
151                         return memory_is_poisoned_2_4_8(addr, size);
152                 case 16:
153                         return memory_is_poisoned_16(addr);
154                 default:
155                         BUILD_BUG();
156                 }
157         }
158
159         return memory_is_poisoned_n(addr, size);
160 }
161
162 static __always_inline bool check_region_inline(unsigned long addr,
163                                                 size_t size, bool write,
164                                                 unsigned long ret_ip)
165 {
166         if (!kasan_arch_is_ready())
167                 return true;
168
169         if (unlikely(size == 0))
170                 return true;
171
172         if (unlikely(addr + size < addr))
173                 return !kasan_report(addr, size, write, ret_ip);
174
175         if (unlikely((void *)addr <
176                 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
177                 return !kasan_report(addr, size, write, ret_ip);
178         }
179
180         if (likely(!memory_is_poisoned(addr, size)))
181                 return true;
182
183         return !kasan_report(addr, size, write, ret_ip);
184 }
185
186 bool kasan_check_range(unsigned long addr, size_t size, bool write,
187                                         unsigned long ret_ip)
188 {
189         return check_region_inline(addr, size, write, ret_ip);
190 }
191
192 bool kasan_byte_accessible(const void *addr)
193 {
194         s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
195
196         return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
197 }
198
199 void kasan_cache_shrink(struct kmem_cache *cache)
200 {
201         kasan_quarantine_remove_cache(cache);
202 }
203
204 void kasan_cache_shutdown(struct kmem_cache *cache)
205 {
206         if (!__kmem_cache_empty(cache))
207                 kasan_quarantine_remove_cache(cache);
208 }
209
210 static void register_global(struct kasan_global *global)
211 {
212         size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
213
214         kasan_unpoison(global->beg, global->size, false);
215
216         kasan_poison(global->beg + aligned_size,
217                      global->size_with_redzone - aligned_size,
218                      KASAN_GLOBAL_REDZONE, false);
219 }
220
221 void __asan_register_globals(struct kasan_global *globals, size_t size)
222 {
223         int i;
224
225         for (i = 0; i < size; i++)
226                 register_global(&globals[i]);
227 }
228 EXPORT_SYMBOL(__asan_register_globals);
229
230 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
231 {
232 }
233 EXPORT_SYMBOL(__asan_unregister_globals);
234
235 #define DEFINE_ASAN_LOAD_STORE(size)                                    \
236         void __asan_load##size(unsigned long addr)                      \
237         {                                                               \
238                 check_region_inline(addr, size, false, _RET_IP_);       \
239         }                                                               \
240         EXPORT_SYMBOL(__asan_load##size);                               \
241         __alias(__asan_load##size)                                      \
242         void __asan_load##size##_noabort(unsigned long);                \
243         EXPORT_SYMBOL(__asan_load##size##_noabort);                     \
244         void __asan_store##size(unsigned long addr)                     \
245         {                                                               \
246                 check_region_inline(addr, size, true, _RET_IP_);        \
247         }                                                               \
248         EXPORT_SYMBOL(__asan_store##size);                              \
249         __alias(__asan_store##size)                                     \
250         void __asan_store##size##_noabort(unsigned long);               \
251         EXPORT_SYMBOL(__asan_store##size##_noabort)
252
253 DEFINE_ASAN_LOAD_STORE(1);
254 DEFINE_ASAN_LOAD_STORE(2);
255 DEFINE_ASAN_LOAD_STORE(4);
256 DEFINE_ASAN_LOAD_STORE(8);
257 DEFINE_ASAN_LOAD_STORE(16);
258
259 void __asan_loadN(unsigned long addr, size_t size)
260 {
261         kasan_check_range(addr, size, false, _RET_IP_);
262 }
263 EXPORT_SYMBOL(__asan_loadN);
264
265 __alias(__asan_loadN)
266 void __asan_loadN_noabort(unsigned long, size_t);
267 EXPORT_SYMBOL(__asan_loadN_noabort);
268
269 void __asan_storeN(unsigned long addr, size_t size)
270 {
271         kasan_check_range(addr, size, true, _RET_IP_);
272 }
273 EXPORT_SYMBOL(__asan_storeN);
274
275 __alias(__asan_storeN)
276 void __asan_storeN_noabort(unsigned long, size_t);
277 EXPORT_SYMBOL(__asan_storeN_noabort);
278
279 /* to shut up compiler complaints */
280 void __asan_handle_no_return(void) {}
281 EXPORT_SYMBOL(__asan_handle_no_return);
282
283 /* Emitted by compiler to poison alloca()ed objects. */
284 void __asan_alloca_poison(unsigned long addr, size_t size)
285 {
286         size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
287         size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
288                         rounded_up_size;
289         size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
290
291         const void *left_redzone = (const void *)(addr -
292                         KASAN_ALLOCA_REDZONE_SIZE);
293         const void *right_redzone = (const void *)(addr + rounded_up_size);
294
295         WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
296
297         kasan_unpoison((const void *)(addr + rounded_down_size),
298                         size - rounded_down_size, false);
299         kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
300                      KASAN_ALLOCA_LEFT, false);
301         kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
302                      KASAN_ALLOCA_RIGHT, false);
303 }
304 EXPORT_SYMBOL(__asan_alloca_poison);
305
306 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
307 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
308 {
309         if (unlikely(!stack_top || stack_top > stack_bottom))
310                 return;
311
312         kasan_unpoison(stack_top, stack_bottom - stack_top, false);
313 }
314 EXPORT_SYMBOL(__asan_allocas_unpoison);
315
316 /* Emitted by the compiler to [un]poison local variables. */
317 #define DEFINE_ASAN_SET_SHADOW(byte) \
318         void __asan_set_shadow_##byte(const void *addr, size_t size)    \
319         {                                                               \
320                 __memset((void *)addr, 0x##byte, size);                 \
321         }                                                               \
322         EXPORT_SYMBOL(__asan_set_shadow_##byte)
323
324 DEFINE_ASAN_SET_SHADOW(00);
325 DEFINE_ASAN_SET_SHADOW(f1);
326 DEFINE_ASAN_SET_SHADOW(f2);
327 DEFINE_ASAN_SET_SHADOW(f3);
328 DEFINE_ASAN_SET_SHADOW(f5);
329 DEFINE_ASAN_SET_SHADOW(f8);
330
331 static void __kasan_record_aux_stack(void *addr, bool can_alloc)
332 {
333         struct slab *slab = kasan_addr_to_slab(addr);
334         struct kmem_cache *cache;
335         struct kasan_alloc_meta *alloc_meta;
336         void *object;
337
338         if (is_kfence_address(addr) || !slab)
339                 return;
340
341         cache = slab->slab_cache;
342         object = nearest_obj(cache, slab, addr);
343         alloc_meta = kasan_get_alloc_meta(cache, object);
344         if (!alloc_meta)
345                 return;
346
347         alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
348         alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT, can_alloc);
349 }
350
351 void kasan_record_aux_stack(void *addr)
352 {
353         return __kasan_record_aux_stack(addr, true);
354 }
355
356 void kasan_record_aux_stack_noalloc(void *addr)
357 {
358         return __kasan_record_aux_stack(addr, false);
359 }
360
361 void kasan_set_free_info(struct kmem_cache *cache,
362                                 void *object, u8 tag)
363 {
364         struct kasan_free_meta *free_meta;
365
366         free_meta = kasan_get_free_meta(cache, object);
367         if (!free_meta)
368                 return;
369
370         kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
371         /* The object was freed and has free track set. */
372         *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
373 }
374
375 struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
376                                 void *object, u8 tag)
377 {
378         if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREETRACK)
379                 return NULL;
380         /* Free meta must be present with KASAN_SLAB_FREETRACK. */
381         return &kasan_get_free_meta(cache, object)->free_track;
382 }