1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
15 * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16 * which is right before the kernel.
18 * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19 * the page global directory with kasan_early_shadow_pmd.
21 * For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
22 * region is not and then we have to go down to the PUD level.
25 extern pgd_t early_pg_dir[PTRS_PER_PGD];
26 pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
27 p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
28 pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
30 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
32 phys_addr_t phys_addr;
36 p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
37 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
40 ptep = pte_offset_kernel(pmd, vaddr);
43 if (pte_none(*ptep)) {
44 phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
45 set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
46 memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
48 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
51 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
53 phys_addr_t phys_addr;
58 p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
59 set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
62 pmdp = pmd_offset(pud, vaddr);
65 next = pmd_addr_end(vaddr, end);
67 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
68 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
70 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
71 memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE);
76 kasan_populate_pte(pmdp, vaddr, next);
77 } while (pmdp++, vaddr = next, vaddr != end);
80 static void __init kasan_populate_pud(p4d_t *p4d,
81 unsigned long vaddr, unsigned long end)
83 phys_addr_t phys_addr;
88 p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
89 set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
92 pudp = pud_offset(p4d, vaddr);
95 next = pud_addr_end(vaddr, end);
97 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
98 phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
100 set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
101 memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE);
106 kasan_populate_pmd(pudp, vaddr, next);
107 } while (pudp++, vaddr = next, vaddr != end);
110 static void __init kasan_populate_p4d(pgd_t *pgd,
111 unsigned long vaddr, unsigned long end)
113 phys_addr_t phys_addr;
117 if (pgd_none(*pgd)) {
118 p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
119 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
122 p4dp = p4d_offset(pgd, vaddr);
125 next = p4d_addr_end(vaddr, end);
127 if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
128 phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
130 set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
131 memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE);
136 kasan_populate_pud(p4dp, vaddr, next);
137 } while (p4dp++, vaddr = next, vaddr != end);
140 static void __init kasan_populate_pgd(pgd_t *pgdp,
141 unsigned long vaddr, unsigned long end)
143 phys_addr_t phys_addr;
147 next = pgd_addr_end(vaddr, end);
149 if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
150 (next - vaddr) >= PGDIR_SIZE) {
151 phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
153 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
154 memset(__va(phys_addr), KASAN_SHADOW_INIT, PGDIR_SIZE);
159 kasan_populate_p4d(pgdp, vaddr, next);
160 } while (pgdp++, vaddr = next, vaddr != end);
163 static void __init kasan_early_clear_pud(p4d_t *p4dp,
164 unsigned long vaddr, unsigned long end)
166 pud_t *pudp, *base_pud;
169 if (!pgtable_l4_enabled) {
170 pudp = (pud_t *)p4dp;
172 base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
173 pudp = base_pud + pud_index(vaddr);
177 next = pud_addr_end(vaddr, end);
179 if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
185 } while (pudp++, vaddr = next, vaddr != end);
188 static void __init kasan_early_clear_p4d(pgd_t *pgdp,
189 unsigned long vaddr, unsigned long end)
191 p4d_t *p4dp, *base_p4d;
194 if (!pgtable_l5_enabled) {
195 p4dp = (p4d_t *)pgdp;
197 base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
198 p4dp = base_p4d + p4d_index(vaddr);
202 next = p4d_addr_end(vaddr, end);
204 if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) &&
205 (next - vaddr) >= P4D_SIZE) {
210 kasan_early_clear_pud(p4dp, vaddr, next);
211 } while (p4dp++, vaddr = next, vaddr != end);
214 static void __init kasan_early_clear_pgd(pgd_t *pgdp,
215 unsigned long vaddr, unsigned long end)
220 next = pgd_addr_end(vaddr, end);
222 if (pgtable_l5_enabled && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
223 (next - vaddr) >= PGDIR_SIZE) {
228 kasan_early_clear_p4d(pgdp, vaddr, next);
229 } while (pgdp++, vaddr = next, vaddr != end);
232 static void __init kasan_early_populate_pud(p4d_t *p4dp,
236 pud_t *pudp, *base_pud;
237 phys_addr_t phys_addr;
240 if (!pgtable_l4_enabled) {
241 pudp = (pud_t *)p4dp;
243 base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
244 pudp = base_pud + pud_index(vaddr);
248 next = pud_addr_end(vaddr, end);
250 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
251 (next - vaddr) >= PUD_SIZE) {
252 phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
253 set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
258 } while (pudp++, vaddr = next, vaddr != end);
261 static void __init kasan_early_populate_p4d(pgd_t *pgdp,
265 p4d_t *p4dp, *base_p4d;
266 phys_addr_t phys_addr;
270 * We can't use pgd_page_vaddr here as it would return a linear
271 * mapping address but it is not mapped yet, but when populating
272 * early_pg_dir, we need the physical address and when populating
273 * swapper_pg_dir, we need the kernel virtual address so use
275 * Note that this test is then completely equivalent to
276 * p4dp = p4d_offset(pgdp, vaddr)
278 if (!pgtable_l5_enabled) {
279 p4dp = (p4d_t *)pgdp;
281 base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
282 p4dp = base_p4d + p4d_index(vaddr);
286 next = p4d_addr_end(vaddr, end);
288 if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
289 (next - vaddr) >= P4D_SIZE) {
290 phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
291 set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
295 kasan_early_populate_pud(p4dp, vaddr, next);
296 } while (p4dp++, vaddr = next, vaddr != end);
299 static void __init kasan_early_populate_pgd(pgd_t *pgdp,
303 phys_addr_t phys_addr;
307 next = pgd_addr_end(vaddr, end);
309 if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
310 (next - vaddr) >= PGDIR_SIZE) {
311 phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
312 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
316 kasan_early_populate_p4d(pgdp, vaddr, next);
317 } while (pgdp++, vaddr = next, vaddr != end);
320 asmlinkage void __init kasan_early_init(void)
324 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
325 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
327 for (i = 0; i < PTRS_PER_PTE; ++i)
328 set_pte(kasan_early_shadow_pte + i,
329 pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
331 for (i = 0; i < PTRS_PER_PMD; ++i)
332 set_pmd(kasan_early_shadow_pmd + i,
334 (__pa((uintptr_t)kasan_early_shadow_pte)),
337 if (pgtable_l4_enabled) {
338 for (i = 0; i < PTRS_PER_PUD; ++i)
339 set_pud(kasan_early_shadow_pud + i,
341 (__pa(((uintptr_t)kasan_early_shadow_pmd))),
345 if (pgtable_l5_enabled) {
346 for (i = 0; i < PTRS_PER_P4D; ++i)
347 set_p4d(kasan_early_shadow_p4d + i,
349 (__pa(((uintptr_t)kasan_early_shadow_pud))),
353 kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
354 KASAN_SHADOW_START, KASAN_SHADOW_END);
356 local_flush_tlb_all();
359 void __init kasan_swapper_init(void)
361 kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
362 KASAN_SHADOW_START, KASAN_SHADOW_END);
364 local_flush_tlb_all();
367 static void __init kasan_populate(void *start, void *end)
369 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
370 unsigned long vend = PAGE_ALIGN((unsigned long)end);
372 kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
375 static void __init kasan_shallow_populate_pud(p4d_t *p4d,
376 unsigned long vaddr, unsigned long end)
380 pud_t *pud_k = pud_offset(p4d, vaddr);
383 next = pud_addr_end(vaddr, end);
385 if (pud_none(*pud_k)) {
386 p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
387 set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
392 } while (pud_k++, vaddr = next, vaddr != end);
395 static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
396 unsigned long vaddr, unsigned long end)
400 p4d_t *p4d_k = p4d_offset(pgd, vaddr);
403 next = p4d_addr_end(vaddr, end);
405 if (p4d_none(*p4d_k)) {
406 p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
407 set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
411 kasan_shallow_populate_pud(p4d_k, vaddr, end);
412 } while (p4d_k++, vaddr = next, vaddr != end);
415 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
419 pgd_t *pgd_k = pgd_offset_k(vaddr);
422 next = pgd_addr_end(vaddr, end);
424 if (pgd_none(*pgd_k)) {
425 p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
426 set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
430 kasan_shallow_populate_p4d(pgd_k, vaddr, next);
431 } while (pgd_k++, vaddr = next, vaddr != end);
434 static void __init kasan_shallow_populate(void *start, void *end)
436 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
437 unsigned long vend = PAGE_ALIGN((unsigned long)end);
439 kasan_shallow_populate_pgd(vaddr, vend);
442 static void create_tmp_mapping(void)
448 * We need to clean the early mapping: this is hard to achieve "in-place",
449 * so install a temporary mapping like arm64 and x86 do.
451 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(pgd_t) * PTRS_PER_PGD);
453 /* Copy the last p4d since it is shared with the kernel mapping. */
454 if (pgtable_l5_enabled) {
455 ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
456 memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
457 set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
458 pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
461 base_p4d = (p4d_t *)tmp_pg_dir;
464 /* Copy the last pud since it is shared with the kernel mapping. */
465 if (pgtable_l4_enabled) {
466 ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
467 memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
468 set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
469 pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
473 void __init kasan_init(void)
475 phys_addr_t p_start, p_end;
478 create_tmp_mapping();
479 csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode);
481 kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START),
482 KASAN_SHADOW_START, KASAN_SHADOW_END);
484 kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)FIXADDR_START),
485 (void *)kasan_mem_to_shadow((void *)VMALLOC_START));
487 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
488 kasan_shallow_populate(
489 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
490 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
491 /* Shallow populate modules and BPF which are vmalloc-allocated */
492 kasan_shallow_populate(
493 (void *)kasan_mem_to_shadow((void *)MODULES_VADDR),
494 (void *)kasan_mem_to_shadow((void *)MODULES_END));
496 kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)VMALLOC_START),
497 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
500 /* Populate the linear mapping */
501 for_each_mem_range(i, &p_start, &p_end) {
502 void *start = (void *)__va(p_start);
503 void *end = (void *)__va(p_end);
508 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
511 /* Populate kernel */
512 kasan_populate(kasan_mem_to_shadow((const void *)MODULES_END),
513 kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
515 for (i = 0; i < PTRS_PER_PTE; i++)
516 set_pte(&kasan_early_shadow_pte[i],
517 mk_pte(virt_to_page(kasan_early_shadow_page),
518 __pgprot(_PAGE_PRESENT | _PAGE_READ |
521 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
522 init_task.kasan_depth = 0;
524 csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
525 local_flush_tlb_all();