powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / arch / loongarch / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5 #define pr_fmt(fmt) "kasan: " fmt
6 #include <linux/kasan.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9
10 #include <asm/tlbflush.h>
11 #include <asm/pgalloc.h>
12 #include <asm-generic/sections.h>
13
14 static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
15
16 #ifdef __PAGETABLE_PUD_FOLDED
17 #define __p4d_none(early, p4d) (0)
18 #else
19 #define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
20 (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
21 #endif
22
23 #ifdef __PAGETABLE_PMD_FOLDED
24 #define __pud_none(early, pud) (0)
25 #else
26 #define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
27 (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
28 #endif
29
30 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
31 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
32
33 #define __pte_none(early, pte) (early ? pte_none(pte) : \
34 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
35
36 bool kasan_early_stage = true;
37
38 /*
39  * Alloc memory for shadow memory page table.
40  */
41 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
42 {
43         void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
44                                         __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
45         if (!p)
46                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
47                         __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
48
49         return __pa(p);
50 }
51
52 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
53 {
54         if (__pmd_none(early, READ_ONCE(*pmdp))) {
55                 phys_addr_t pte_phys = early ?
56                                 __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
57                 if (!early)
58                         memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
59                 pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
60         }
61
62         return pte_offset_kernel(pmdp, addr);
63 }
64
65 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
66 {
67         if (__pud_none(early, READ_ONCE(*pudp))) {
68                 phys_addr_t pmd_phys = early ?
69                                 __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
70                 if (!early)
71                         memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
72                 pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
73         }
74
75         return pmd_offset(pudp, addr);
76 }
77
78 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
79 {
80         if (__p4d_none(early, READ_ONCE(*p4dp))) {
81                 phys_addr_t pud_phys = early ?
82                         __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
83                 if (!early)
84                         memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
85                 p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
86         }
87
88         return pud_offset(p4dp, addr);
89 }
90
91 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
92                                       unsigned long end, int node, bool early)
93 {
94         unsigned long next;
95         pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
96
97         do {
98                 phys_addr_t page_phys = early ?
99                                         __pa_symbol(kasan_early_shadow_page)
100                                               : kasan_alloc_zeroed_page(node);
101                 next = addr + PAGE_SIZE;
102                 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
103         } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
104 }
105
106 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
107                                       unsigned long end, int node, bool early)
108 {
109         unsigned long next;
110         pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
111
112         do {
113                 next = pmd_addr_end(addr, end);
114                 kasan_pte_populate(pmdp, addr, next, node, early);
115         } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
116 }
117
118 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
119                                             unsigned long end, int node, bool early)
120 {
121         unsigned long next;
122         pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
123
124         do {
125                 next = pud_addr_end(addr, end);
126                 kasan_pmd_populate(pudp, addr, next, node, early);
127         } while (pudp++, addr = next, addr != end);
128 }
129
130 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
131                                             unsigned long end, int node, bool early)
132 {
133         unsigned long next;
134         p4d_t *p4dp = p4d_offset(pgdp, addr);
135
136         do {
137                 next = p4d_addr_end(addr, end);
138                 kasan_pud_populate(p4dp, addr, next, node, early);
139         } while (p4dp++, addr = next, addr != end);
140 }
141
142 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
143                                       int node, bool early)
144 {
145         unsigned long next;
146         pgd_t *pgdp;
147
148         pgdp = pgd_offset_k(addr);
149
150         do {
151                 next = pgd_addr_end(addr, end);
152                 kasan_p4d_populate(pgdp, addr, next, node, early);
153         } while (pgdp++, addr = next, addr != end);
154
155 }
156
157 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
158 static void __init kasan_map_populate(unsigned long start, unsigned long end,
159                                       int node)
160 {
161         kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
162 }
163
164 asmlinkage void __init kasan_early_init(void)
165 {
166         BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
167         BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
168 }
169
170 static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
171 {
172         WRITE_ONCE(*pgdp, pgdval);
173 }
174
175 static void __init clear_pgds(unsigned long start, unsigned long end)
176 {
177         /*
178          * Remove references to kasan page tables from
179          * swapper_pg_dir. pgd_clear() can't be used
180          * here because it's nop on 2,3-level pagetable setups
181          */
182         for (; start < end; start += PGDIR_SIZE)
183                 kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
184 }
185
186 void __init kasan_init(void)
187 {
188         u64 i;
189         phys_addr_t pa_start, pa_end;
190
191         /*
192          * PGD was populated as invalid_pmd_table or invalid_pud_table
193          * in pagetable_init() which depends on how many levels of page
194          * table you are using, but we had to clean the gpd of kasan
195          * shadow memory, as the pgd value is none-zero.
196          * The assertion pgd_none is going to be false and the formal populate
197          * afterwards is not going to create any new pgd at all.
198          */
199         memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
200         csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
201         local_flush_tlb_all();
202
203         clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
204
205         /* Maps everything to a single page of zeroes */
206         kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
207
208         kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
209                                         kasan_mem_to_shadow((void *)KFENCE_AREA_END));
210
211         kasan_early_stage = false;
212
213         /* Populate the linear mapping */
214         for_each_mem_range(i, &pa_start, &pa_end) {
215                 void *start = (void *)phys_to_virt(pa_start);
216                 void *end   = (void *)phys_to_virt(pa_end);
217
218                 if (start >= end)
219                         break;
220
221                 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
222                         (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
223         }
224
225         /* Populate modules mapping */
226         kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
227                 (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
228         /*
229          * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
230          * should make sure that it maps the zero page read-only.
231          */
232         for (i = 0; i < PTRS_PER_PTE; i++)
233                 set_pte(&kasan_early_shadow_pte[i],
234                         pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
235
236         memset(kasan_early_shadow_page, 0, PAGE_SIZE);
237         csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
238         local_flush_tlb_all();
239
240         /* At this point kasan is fully initialized. Enable error messages */
241         init_task.kasan_depth = 0;
242         pr_info("KernelAddressSanitizer initialized.\n");
243 }