1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
15 * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16 * which is right before the kernel.
18 * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19 * the page global directory with kasan_early_shadow_pmd.
21 * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22 * must be divided as follows:
23 * - the first PGD entry, although incomplete, is populated with
24 * kasan_early_shadow_pud/p4d
25 * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26 * - the last PGD entry is shared with the kernel mapping so populated at the
27 * lower levels pud/p4d
29 * In addition, when shallow populating a kasan region (for example vmalloc),
30 * this region may also not be aligned on PGDIR size, so we must go down to the
34 extern pgd_t early_pg_dir[PTRS_PER_PGD];
36 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
38 phys_addr_t phys_addr;
39 pte_t *ptep, *base_pte;
42 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
44 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
46 ptep = base_pte + pte_index(vaddr);
49 if (pte_none(*ptep)) {
50 phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51 set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
53 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
55 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
58 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
60 phys_addr_t phys_addr;
61 pmd_t *pmdp, *base_pmd;
65 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
67 base_pmd = (pmd_t *)pud_pgtable(*pud);
68 if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
72 pmdp = base_pmd + pmd_index(vaddr);
75 next = pmd_addr_end(vaddr, end);
77 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
80 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
85 kasan_populate_pte(pmdp, vaddr, next);
86 } while (pmdp++, vaddr = next, vaddr != end);
89 * Wait for the whole PGD to be populated before setting the PGD in
90 * the page table, otherwise, if we did set the PGD before populating
91 * it entirely, memblock could allocate a page at a physical address
92 * where KASAN is not populated yet and then we'd get a page fault.
94 set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
97 static void __init kasan_populate_pud(pgd_t *pgd,
98 unsigned long vaddr, unsigned long end)
100 phys_addr_t phys_addr;
101 pud_t *pudp, *base_pud;
104 if (pgd_none(*pgd)) {
105 base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
106 memcpy(base_pud, (void *)kasan_early_shadow_pud,
107 sizeof(pud_t) * PTRS_PER_PUD);
109 base_pud = (pud_t *)pgd_page_vaddr(*pgd);
110 if (base_pud == lm_alias(kasan_early_shadow_pud)) {
111 base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
112 memcpy(base_pud, (void *)kasan_early_shadow_pud,
113 sizeof(pud_t) * PTRS_PER_PUD);
117 pudp = base_pud + pud_index(vaddr);
120 next = pud_addr_end(vaddr, end);
122 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
123 phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
125 set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
130 kasan_populate_pmd(pudp, vaddr, next);
131 } while (pudp++, vaddr = next, vaddr != end);
134 * Wait for the whole PGD to be populated before setting the PGD in
135 * the page table, otherwise, if we did set the PGD before populating
136 * it entirely, memblock could allocate a page at a physical address
137 * where KASAN is not populated yet and then we'd get a page fault.
139 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
142 static void __init kasan_populate_p4d(pgd_t *pgd,
143 unsigned long vaddr, unsigned long end)
145 phys_addr_t phys_addr;
146 p4d_t *p4dp, *base_p4d;
149 base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
150 if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
151 base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
152 memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
153 sizeof(p4d_t) * PTRS_PER_P4D);
156 p4dp = base_p4d + p4d_index(vaddr);
159 next = p4d_addr_end(vaddr, end);
161 if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
162 phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
164 set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
169 kasan_populate_pud((pgd_t *)p4dp, vaddr, next);
170 } while (p4dp++, vaddr = next, vaddr != end);
173 * Wait for the whole P4D to be populated before setting the P4D in
174 * the page table, otherwise, if we did set the P4D before populating
175 * it entirely, memblock could allocate a page at a physical address
176 * where KASAN is not populated yet and then we'd get a page fault.
178 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
181 #define kasan_early_shadow_pgd_next (pgtable_l5_enabled ? \
182 (uintptr_t)kasan_early_shadow_p4d : \
183 (pgtable_l4_enabled ? \
184 (uintptr_t)kasan_early_shadow_pud : \
185 (uintptr_t)kasan_early_shadow_pmd))
186 #define kasan_populate_pgd_next(pgdp, vaddr, next) \
187 (pgtable_l5_enabled ? \
188 kasan_populate_p4d(pgdp, vaddr, next) : \
189 (pgtable_l4_enabled ? \
190 kasan_populate_pud(pgdp, vaddr, next) : \
191 kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
193 static void __init kasan_populate_pgd(pgd_t *pgdp,
194 unsigned long vaddr, unsigned long end)
196 phys_addr_t phys_addr;
200 next = pgd_addr_end(vaddr, end);
202 if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
203 if (pgd_page_vaddr(*pgdp) ==
204 (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
206 * pgdp can't be none since kasan_early_init
207 * initialized all KASAN shadow region with
208 * kasan_early_shadow_pud: if this is still the
209 * case, that means we can try to allocate a
210 * hugepage as a replacement.
212 phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
214 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
220 kasan_populate_pgd_next(pgdp, vaddr, next);
221 } while (pgdp++, vaddr = next, vaddr != end);
224 static void __init kasan_early_populate_pud(p4d_t *p4dp,
228 pud_t *pudp, *base_pud;
229 phys_addr_t phys_addr;
232 if (!pgtable_l4_enabled) {
233 pudp = (pud_t *)p4dp;
235 base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
236 pudp = base_pud + pud_index(vaddr);
240 next = pud_addr_end(vaddr, end);
242 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
243 (next - vaddr) >= PUD_SIZE) {
244 phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
245 set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
250 } while (pudp++, vaddr = next, vaddr != end);
253 static void __init kasan_early_populate_p4d(pgd_t *pgdp,
257 p4d_t *p4dp, *base_p4d;
258 phys_addr_t phys_addr;
262 * We can't use pgd_page_vaddr here as it would return a linear
263 * mapping address but it is not mapped yet, but when populating
264 * early_pg_dir, we need the physical address and when populating
265 * swapper_pg_dir, we need the kernel virtual address so use
267 * Note that this test is then completely equivalent to
268 * p4dp = p4d_offset(pgdp, vaddr)
270 if (!pgtable_l5_enabled) {
271 p4dp = (p4d_t *)pgdp;
273 base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
274 p4dp = base_p4d + p4d_index(vaddr);
278 next = p4d_addr_end(vaddr, end);
280 if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
281 (next - vaddr) >= P4D_SIZE) {
282 phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
283 set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
287 kasan_early_populate_pud(p4dp, vaddr, next);
288 } while (p4dp++, vaddr = next, vaddr != end);
291 static void __init kasan_early_populate_pgd(pgd_t *pgdp,
295 phys_addr_t phys_addr;
299 next = pgd_addr_end(vaddr, end);
301 if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
302 (next - vaddr) >= PGDIR_SIZE) {
303 phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
304 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
308 kasan_early_populate_p4d(pgdp, vaddr, next);
309 } while (pgdp++, vaddr = next, vaddr != end);
312 asmlinkage void __init kasan_early_init(void)
316 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
317 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
319 for (i = 0; i < PTRS_PER_PTE; ++i)
320 set_pte(kasan_early_shadow_pte + i,
321 pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
323 for (i = 0; i < PTRS_PER_PMD; ++i)
324 set_pmd(kasan_early_shadow_pmd + i,
326 (__pa((uintptr_t)kasan_early_shadow_pte)),
329 if (pgtable_l4_enabled) {
330 for (i = 0; i < PTRS_PER_PUD; ++i)
331 set_pud(kasan_early_shadow_pud + i,
333 (__pa(((uintptr_t)kasan_early_shadow_pmd))),
337 if (pgtable_l5_enabled) {
338 for (i = 0; i < PTRS_PER_P4D; ++i)
339 set_p4d(kasan_early_shadow_p4d + i,
341 (__pa(((uintptr_t)kasan_early_shadow_pud))),
345 kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
346 KASAN_SHADOW_START, KASAN_SHADOW_END);
348 local_flush_tlb_all();
351 void __init kasan_swapper_init(void)
353 kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
354 KASAN_SHADOW_START, KASAN_SHADOW_END);
356 local_flush_tlb_all();
359 static void __init kasan_populate(void *start, void *end)
361 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
362 unsigned long vend = PAGE_ALIGN((unsigned long)end);
364 kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
366 local_flush_tlb_all();
367 memset(start, KASAN_SHADOW_INIT, end - start);
370 static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
371 unsigned long vaddr, unsigned long end)
374 pmd_t *pmdp, *base_pmd;
377 base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
378 pmdp = base_pmd + pmd_index(vaddr);
381 next = pmd_addr_end(vaddr, end);
382 is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
386 } while (pmdp++, vaddr = next, vaddr != end);
389 static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
390 unsigned long vaddr, unsigned long end)
393 pud_t *pudp, *base_pud;
397 base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
398 pudp = base_pud + pud_index(vaddr);
401 next = pud_addr_end(vaddr, end);
402 is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
407 base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
408 set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
410 if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
413 memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
414 kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
415 } while (pudp++, vaddr = next, vaddr != end);
418 static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
419 unsigned long vaddr, unsigned long end)
422 p4d_t *p4dp, *base_p4d;
426 base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
427 p4dp = base_p4d + p4d_index(vaddr);
430 next = p4d_addr_end(vaddr, end);
431 is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
436 base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
437 set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
439 if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
442 memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
443 kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
444 } while (p4dp++, vaddr = next, vaddr != end);
447 #define kasan_shallow_populate_pgd_next(pgdp, vaddr, next) \
448 (pgtable_l5_enabled ? \
449 kasan_shallow_populate_p4d(pgdp, vaddr, next) : \
450 (pgtable_l4_enabled ? \
451 kasan_shallow_populate_pud(pgdp, vaddr, next) : \
452 kasan_shallow_populate_pmd(pgdp, vaddr, next)))
454 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
458 pgd_t *pgd_k = pgd_offset_k(vaddr);
459 bool is_kasan_pgd_next;
462 next = pgd_addr_end(vaddr, end);
463 is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
464 (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
466 if (is_kasan_pgd_next) {
467 p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
468 set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
471 if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
474 memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
475 kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
476 } while (pgd_k++, vaddr = next, vaddr != end);
479 static void __init kasan_shallow_populate(void *start, void *end)
481 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
482 unsigned long vend = PAGE_ALIGN((unsigned long)end);
484 kasan_shallow_populate_pgd(vaddr, vend);
485 local_flush_tlb_all();
488 void __init kasan_init(void)
490 phys_addr_t p_start, p_end;
493 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
494 kasan_shallow_populate(
495 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
496 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
498 /* Populate the linear mapping */
499 for_each_mem_range(i, &p_start, &p_end) {
500 void *start = (void *)__va(p_start);
501 void *end = (void *)__va(p_end);
506 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
509 /* Populate kernel, BPF, modules mapping */
510 kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
511 kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
513 for (i = 0; i < PTRS_PER_PTE; i++)
514 set_pte(&kasan_early_shadow_pte[i],
515 mk_pte(virt_to_page(kasan_early_shadow_page),
516 __pgprot(_PAGE_PRESENT | _PAGE_READ |
519 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
520 init_task.kasan_depth = 0;