1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
14 extern pgd_t early_pg_dir[PTRS_PER_PGD];
15 asmlinkage void __init kasan_early_init(void)
18 pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
20 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
21 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
23 for (i = 0; i < PTRS_PER_PTE; ++i)
24 set_pte(kasan_early_shadow_pte + i,
25 mk_pte(virt_to_page(kasan_early_shadow_page),
28 for (i = 0; i < PTRS_PER_PMD; ++i)
29 set_pmd(kasan_early_shadow_pmd + i,
31 (__pa((uintptr_t) kasan_early_shadow_pte)),
32 __pgprot(_PAGE_TABLE)));
34 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
35 i += PGDIR_SIZE, ++pgd)
38 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
39 __pgprot(_PAGE_TABLE)));
41 /* init for swapper_pg_dir */
42 pgd = pgd_offset_k(KASAN_SHADOW_START);
44 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
45 i += PGDIR_SIZE, ++pgd)
48 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
49 __pgprot(_PAGE_TABLE)));
51 local_flush_tlb_all();
54 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
56 phys_addr_t phys_addr;
57 pte_t *ptep, *base_pte;
60 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
62 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
64 ptep = base_pte + pte_index(vaddr);
67 if (pte_none(*ptep)) {
68 phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
69 set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
71 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
73 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
76 static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
78 phys_addr_t phys_addr;
79 pmd_t *pmdp, *base_pmd;
82 base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
83 if (base_pmd == lm_alias(kasan_early_shadow_pmd))
84 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
86 pmdp = base_pmd + pmd_index(vaddr);
89 next = pmd_addr_end(vaddr, end);
91 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
92 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
94 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
99 kasan_populate_pte(pmdp, vaddr, next);
100 } while (pmdp++, vaddr = next, vaddr != end);
103 * Wait for the whole PGD to be populated before setting the PGD in
104 * the page table, otherwise, if we did set the PGD before populating
105 * it entirely, memblock could allocate a page at a physical address
106 * where KASAN is not populated yet and then we'd get a page fault.
108 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
111 static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
113 phys_addr_t phys_addr;
114 pgd_t *pgdp = pgd_offset_k(vaddr);
118 next = pgd_addr_end(vaddr, end);
121 * pgdp can't be none since kasan_early_init initialized all KASAN
122 * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
123 * that means we can try to allocate a hugepage as a replacement.
125 if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
126 IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
127 phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
129 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
134 kasan_populate_pmd(pgdp, vaddr, next);
135 } while (pgdp++, vaddr = next, vaddr != end);
138 static void __init kasan_populate(void *start, void *end)
140 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
141 unsigned long vend = PAGE_ALIGN((unsigned long)end);
143 kasan_populate_pgd(vaddr, vend);
145 local_flush_tlb_all();
146 memset(start, KASAN_SHADOW_INIT, end - start);
149 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
153 pgd_t *pgd_k = pgd_offset_k(vaddr);
156 next = pgd_addr_end(vaddr, end);
157 if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
158 p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
159 set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
161 } while (pgd_k++, vaddr = next, vaddr != end);
164 static void __init kasan_shallow_populate(void *start, void *end)
166 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
167 unsigned long vend = PAGE_ALIGN((unsigned long)end);
169 kasan_shallow_populate_pgd(vaddr, vend);
170 local_flush_tlb_all();
173 void __init kasan_init(void)
175 phys_addr_t p_start, p_end;
178 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
179 kasan_shallow_populate(
180 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
181 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
183 /* Populate the linear mapping */
184 for_each_mem_range(i, &p_start, &p_end) {
185 void *start = (void *)__va(p_start);
186 void *end = (void *)__va(p_end);
191 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
194 /* Populate kernel, BPF, modules mapping */
195 kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
196 kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
198 for (i = 0; i < PTRS_PER_PTE; i++)
199 set_pte(&kasan_early_shadow_pte[i],
200 mk_pte(virt_to_page(kasan_early_shadow_page),
201 __pgprot(_PAGE_PRESENT | _PAGE_READ |
204 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
205 init_task.kasan_depth = 0;