riscv: Split early and final KASAN population functions
[platform/kernel/linux-rpi.git] / arch / riscv / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13
14 /*
15  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16  * which is right before the kernel.
17  *
18  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19  * the page global directory with kasan_early_shadow_pmd.
20  *
21  * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22  * must be divided as follows:
23  * - the first PGD entry, although incomplete, is populated with
24  *   kasan_early_shadow_pud/p4d
25  * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26  * - the last PGD entry is shared with the kernel mapping so populated at the
27  *   lower levels pud/p4d
28  *
29  * In addition, when shallow populating a kasan region (for example vmalloc),
30  * this region may also not be aligned on PGDIR size, so we must go down to the
31  * pud level too.
32  */
33
34 extern pgd_t early_pg_dir[PTRS_PER_PGD];
35
36 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
37 {
38         phys_addr_t phys_addr;
39         pte_t *ptep, *base_pte;
40
41         if (pmd_none(*pmd))
42                 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
43         else
44                 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
45
46         ptep = base_pte + pte_index(vaddr);
47
48         do {
49                 if (pte_none(*ptep)) {
50                         phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51                         set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
52                 }
53         } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
54
55         set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
56 }
57
58 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
59 {
60         phys_addr_t phys_addr;
61         pmd_t *pmdp, *base_pmd;
62         unsigned long next;
63
64         if (pud_none(*pud)) {
65                 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
66         } else {
67                 base_pmd = (pmd_t *)pud_pgtable(*pud);
68                 if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69                         base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
70         }
71
72         pmdp = base_pmd + pmd_index(vaddr);
73
74         do {
75                 next = pmd_addr_end(vaddr, end);
76
77                 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78                         phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
79                         if (phys_addr) {
80                                 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
81                                 continue;
82                         }
83                 }
84
85                 kasan_populate_pte(pmdp, vaddr, next);
86         } while (pmdp++, vaddr = next, vaddr != end);
87
88         /*
89          * Wait for the whole PGD to be populated before setting the PGD in
90          * the page table, otherwise, if we did set the PGD before populating
91          * it entirely, memblock could allocate a page at a physical address
92          * where KASAN is not populated yet and then we'd get a page fault.
93          */
94         set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
95 }
96
97 static void __init kasan_populate_pud(pgd_t *pgd,
98                                       unsigned long vaddr, unsigned long end)
99 {
100         phys_addr_t phys_addr;
101         pud_t *pudp, *base_pud;
102         unsigned long next;
103
104         if (pgd_none(*pgd)) {
105                 base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
106                 memcpy(base_pud, (void *)kasan_early_shadow_pud,
107                         sizeof(pud_t) * PTRS_PER_PUD);
108         } else {
109                 base_pud = (pud_t *)pgd_page_vaddr(*pgd);
110                 if (base_pud == lm_alias(kasan_early_shadow_pud)) {
111                         base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
112                         memcpy(base_pud, (void *)kasan_early_shadow_pud,
113                                sizeof(pud_t) * PTRS_PER_PUD);
114                 }
115         }
116
117         pudp = base_pud + pud_index(vaddr);
118
119         do {
120                 next = pud_addr_end(vaddr, end);
121
122                 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
123                         phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
124                         if (phys_addr) {
125                                 set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
126                                 continue;
127                         }
128                 }
129
130                 kasan_populate_pmd(pudp, vaddr, next);
131         } while (pudp++, vaddr = next, vaddr != end);
132
133         /*
134          * Wait for the whole PGD to be populated before setting the PGD in
135          * the page table, otherwise, if we did set the PGD before populating
136          * it entirely, memblock could allocate a page at a physical address
137          * where KASAN is not populated yet and then we'd get a page fault.
138          */
139         set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
140 }
141
142 static void __init kasan_populate_p4d(pgd_t *pgd,
143                                       unsigned long vaddr, unsigned long end)
144 {
145         phys_addr_t phys_addr;
146         p4d_t *p4dp, *base_p4d;
147         unsigned long next;
148
149         base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
150         if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
151                 base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
152                 memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
153                        sizeof(p4d_t) * PTRS_PER_P4D);
154         }
155
156         p4dp = base_p4d + p4d_index(vaddr);
157
158         do {
159                 next = p4d_addr_end(vaddr, end);
160
161                 if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
162                         phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
163                         if (phys_addr) {
164                                 set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
165                                 continue;
166                         }
167                 }
168
169                 kasan_populate_pud((pgd_t *)p4dp, vaddr, next);
170         } while (p4dp++, vaddr = next, vaddr != end);
171
172         /*
173          * Wait for the whole P4D to be populated before setting the P4D in
174          * the page table, otherwise, if we did set the P4D before populating
175          * it entirely, memblock could allocate a page at a physical address
176          * where KASAN is not populated yet and then we'd get a page fault.
177          */
178         set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
179 }
180
181 #define kasan_early_shadow_pgd_next                     (pgtable_l5_enabled ?   \
182                                 (uintptr_t)kasan_early_shadow_p4d :             \
183                                                         (pgtable_l4_enabled ?   \
184                                 (uintptr_t)kasan_early_shadow_pud :             \
185                                 (uintptr_t)kasan_early_shadow_pmd))
186 #define kasan_populate_pgd_next(pgdp, vaddr, next)                              \
187                 (pgtable_l5_enabled ?                                           \
188                 kasan_populate_p4d(pgdp, vaddr, next) :                         \
189                 (pgtable_l4_enabled ?                                           \
190                         kasan_populate_pud(pgdp, vaddr, next) :                 \
191                         kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
192
193 static void __init kasan_populate_pgd(pgd_t *pgdp,
194                                       unsigned long vaddr, unsigned long end)
195 {
196         phys_addr_t phys_addr;
197         unsigned long next;
198
199         do {
200                 next = pgd_addr_end(vaddr, end);
201
202                 if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
203                         if (pgd_page_vaddr(*pgdp) ==
204                                    (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
205                                 /*
206                                  * pgdp can't be none since kasan_early_init
207                                  * initialized all KASAN shadow region with
208                                  * kasan_early_shadow_pud: if this is still the
209                                  * case, that means we can try to allocate a
210                                  * hugepage as a replacement.
211                                  */
212                                 phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
213                                 if (phys_addr) {
214                                         set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
215                                         continue;
216                                 }
217                         }
218                 }
219
220                 kasan_populate_pgd_next(pgdp, vaddr, next);
221         } while (pgdp++, vaddr = next, vaddr != end);
222 }
223
224 static void __init kasan_early_populate_pud(p4d_t *p4dp,
225                                             unsigned long vaddr,
226                                             unsigned long end)
227 {
228         pud_t *pudp, *base_pud;
229         phys_addr_t phys_addr;
230         unsigned long next;
231
232         if (!pgtable_l4_enabled) {
233                 pudp = (pud_t *)p4dp;
234         } else {
235                 base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
236                 pudp = base_pud + pud_index(vaddr);
237         }
238
239         do {
240                 next = pud_addr_end(vaddr, end);
241
242                 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
243                     (next - vaddr) >= PUD_SIZE) {
244                         phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
245                         set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
246                         continue;
247                 }
248
249                 BUG();
250         } while (pudp++, vaddr = next, vaddr != end);
251 }
252
253 static void __init kasan_early_populate_p4d(pgd_t *pgdp,
254                                             unsigned long vaddr,
255                                             unsigned long end)
256 {
257         p4d_t *p4dp, *base_p4d;
258         phys_addr_t phys_addr;
259         unsigned long next;
260
261         /*
262          * We can't use pgd_page_vaddr here as it would return a linear
263          * mapping address but it is not mapped yet, but when populating
264          * early_pg_dir, we need the physical address and when populating
265          * swapper_pg_dir, we need the kernel virtual address so use
266          * pt_ops facility.
267          * Note that this test is then completely equivalent to
268          * p4dp = p4d_offset(pgdp, vaddr)
269          */
270         if (!pgtable_l5_enabled) {
271                 p4dp = (p4d_t *)pgdp;
272         } else {
273                 base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
274                 p4dp = base_p4d + p4d_index(vaddr);
275         }
276
277         do {
278                 next = p4d_addr_end(vaddr, end);
279
280                 if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
281                     (next - vaddr) >= P4D_SIZE) {
282                         phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
283                         set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
284                         continue;
285                 }
286
287                 kasan_early_populate_pud(p4dp, vaddr, next);
288         } while (p4dp++, vaddr = next, vaddr != end);
289 }
290
291 static void __init kasan_early_populate_pgd(pgd_t *pgdp,
292                                             unsigned long vaddr,
293                                             unsigned long end)
294 {
295         phys_addr_t phys_addr;
296         unsigned long next;
297
298         do {
299                 next = pgd_addr_end(vaddr, end);
300
301                 if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
302                     (next - vaddr) >= PGDIR_SIZE) {
303                         phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
304                         set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
305                         continue;
306                 }
307
308                 kasan_early_populate_p4d(pgdp, vaddr, next);
309         } while (pgdp++, vaddr = next, vaddr != end);
310 }
311
312 asmlinkage void __init kasan_early_init(void)
313 {
314         uintptr_t i;
315
316         BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
317                 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
318
319         for (i = 0; i < PTRS_PER_PTE; ++i)
320                 set_pte(kasan_early_shadow_pte + i,
321                         pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
322
323         for (i = 0; i < PTRS_PER_PMD; ++i)
324                 set_pmd(kasan_early_shadow_pmd + i,
325                         pfn_pmd(PFN_DOWN
326                                 (__pa((uintptr_t)kasan_early_shadow_pte)),
327                                 PAGE_TABLE));
328
329         if (pgtable_l4_enabled) {
330                 for (i = 0; i < PTRS_PER_PUD; ++i)
331                         set_pud(kasan_early_shadow_pud + i,
332                                 pfn_pud(PFN_DOWN
333                                         (__pa(((uintptr_t)kasan_early_shadow_pmd))),
334                                         PAGE_TABLE));
335         }
336
337         if (pgtable_l5_enabled) {
338                 for (i = 0; i < PTRS_PER_P4D; ++i)
339                         set_p4d(kasan_early_shadow_p4d + i,
340                                 pfn_p4d(PFN_DOWN
341                                         (__pa(((uintptr_t)kasan_early_shadow_pud))),
342                                         PAGE_TABLE));
343         }
344
345         kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
346                                  KASAN_SHADOW_START, KASAN_SHADOW_END);
347
348         local_flush_tlb_all();
349 }
350
351 void __init kasan_swapper_init(void)
352 {
353         kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
354                                  KASAN_SHADOW_START, KASAN_SHADOW_END);
355
356         local_flush_tlb_all();
357 }
358
359 static void __init kasan_populate(void *start, void *end)
360 {
361         unsigned long vaddr = (unsigned long)start & PAGE_MASK;
362         unsigned long vend = PAGE_ALIGN((unsigned long)end);
363
364         kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
365
366         local_flush_tlb_all();
367         memset(start, KASAN_SHADOW_INIT, end - start);
368 }
369
370 static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
371                                               unsigned long vaddr, unsigned long end)
372 {
373         unsigned long next;
374         pmd_t *pmdp, *base_pmd;
375         bool is_kasan_pte;
376
377         base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
378         pmdp = base_pmd + pmd_index(vaddr);
379
380         do {
381                 next = pmd_addr_end(vaddr, end);
382                 is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
383
384                 if (is_kasan_pte)
385                         pmd_clear(pmdp);
386         } while (pmdp++, vaddr = next, vaddr != end);
387 }
388
389 static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
390                                               unsigned long vaddr, unsigned long end)
391 {
392         unsigned long next;
393         pud_t *pudp, *base_pud;
394         pmd_t *base_pmd;
395         bool is_kasan_pmd;
396
397         base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
398         pudp = base_pud + pud_index(vaddr);
399
400         do {
401                 next = pud_addr_end(vaddr, end);
402                 is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
403
404                 if (!is_kasan_pmd)
405                         continue;
406
407                 base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
408                 set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
409
410                 if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
411                         continue;
412
413                 memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
414                 kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
415         } while (pudp++, vaddr = next, vaddr != end);
416 }
417
418 static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
419                                               unsigned long vaddr, unsigned long end)
420 {
421         unsigned long next;
422         p4d_t *p4dp, *base_p4d;
423         pud_t *base_pud;
424         bool is_kasan_pud;
425
426         base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
427         p4dp = base_p4d + p4d_index(vaddr);
428
429         do {
430                 next = p4d_addr_end(vaddr, end);
431                 is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
432
433                 if (!is_kasan_pud)
434                         continue;
435
436                 base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
437                 set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
438
439                 if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
440                         continue;
441
442                 memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
443                 kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
444         } while (p4dp++, vaddr = next, vaddr != end);
445 }
446
447 #define kasan_shallow_populate_pgd_next(pgdp, vaddr, next)                      \
448                 (pgtable_l5_enabled ?                                           \
449                 kasan_shallow_populate_p4d(pgdp, vaddr, next) :                 \
450                 (pgtable_l4_enabled ?                                           \
451                 kasan_shallow_populate_pud(pgdp, vaddr, next) :                 \
452                 kasan_shallow_populate_pmd(pgdp, vaddr, next)))
453
454 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
455 {
456         unsigned long next;
457         void *p;
458         pgd_t *pgd_k = pgd_offset_k(vaddr);
459         bool is_kasan_pgd_next;
460
461         do {
462                 next = pgd_addr_end(vaddr, end);
463                 is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
464                                      (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
465
466                 if (is_kasan_pgd_next) {
467                         p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
468                         set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
469                 }
470
471                 if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
472                         continue;
473
474                 memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
475                 kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
476         } while (pgd_k++, vaddr = next, vaddr != end);
477 }
478
479 static void __init kasan_shallow_populate(void *start, void *end)
480 {
481         unsigned long vaddr = (unsigned long)start & PAGE_MASK;
482         unsigned long vend = PAGE_ALIGN((unsigned long)end);
483
484         kasan_shallow_populate_pgd(vaddr, vend);
485         local_flush_tlb_all();
486 }
487
488 void __init kasan_init(void)
489 {
490         phys_addr_t p_start, p_end;
491         u64 i;
492
493         if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
494                 kasan_shallow_populate(
495                         (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
496                         (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
497
498         /* Populate the linear mapping */
499         for_each_mem_range(i, &p_start, &p_end) {
500                 void *start = (void *)__va(p_start);
501                 void *end = (void *)__va(p_end);
502
503                 if (start >= end)
504                         break;
505
506                 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
507         }
508
509         /* Populate kernel, BPF, modules mapping */
510         kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
511                        kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
512
513         for (i = 0; i < PTRS_PER_PTE; i++)
514                 set_pte(&kasan_early_shadow_pte[i],
515                         mk_pte(virt_to_page(kasan_early_shadow_page),
516                                __pgprot(_PAGE_PRESENT | _PAGE_READ |
517                                         _PAGE_ACCESSED)));
518
519         memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
520         init_task.kasan_depth = 0;
521 }