1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains KASAN shadow initialization code.
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
9 #include <linux/memblock.h>
10 #include <linux/init.h>
11 #include <linux/kasan.h>
12 #include <linux/kernel.h>
14 #include <linux/pfn.h>
15 #include <linux/slab.h>
18 #include <asm/pgalloc.h>
23 * This page serves two purposes:
24 * - It used as early shadow memory. The entire shadow region populated
25 * with this page, before we will be able to setup normal shadow memory.
26 * - Latter it reused it as zero shadow to cover large ranges of memory
27 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
29 unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
31 #if CONFIG_PGTABLE_LEVELS > 4
32 p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
33 static inline bool kasan_p4d_table(pgd_t pgd)
35 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
38 static inline bool kasan_p4d_table(pgd_t pgd)
43 #if CONFIG_PGTABLE_LEVELS > 3
44 pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD] __page_aligned_bss;
45 static inline bool kasan_pud_table(p4d_t p4d)
47 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
50 static inline bool kasan_pud_table(p4d_t p4d)
55 #if CONFIG_PGTABLE_LEVELS > 2
56 pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss;
57 static inline bool kasan_pmd_table(pud_t pud)
59 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
62 static inline bool kasan_pmd_table(pud_t pud)
67 pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]
70 static inline bool kasan_pte_table(pmd_t pmd)
72 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
75 static inline bool kasan_early_shadow_page_entry(pte_t pte)
77 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
80 static __init void *early_alloc(size_t size, int node)
82 void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
83 MEMBLOCK_ALLOC_ACCESSIBLE, node);
86 panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
87 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
92 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
95 pte_t *pte = pte_offset_kernel(pmd, addr);
98 zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
100 zero_pte = pte_wrprotect(zero_pte);
102 while (addr + PAGE_SIZE <= end) {
103 set_pte_at(&init_mm, addr, pte, zero_pte);
105 pte = pte_offset_kernel(pmd, addr);
109 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
112 pmd_t *pmd = pmd_offset(pud, addr);
116 next = pmd_addr_end(addr, end);
118 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
119 pmd_populate_kernel(&init_mm, pmd,
120 lm_alias(kasan_early_shadow_pte));
124 if (pmd_none(*pmd)) {
127 if (slab_is_available())
128 p = pte_alloc_one_kernel(&init_mm);
130 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
134 pmd_populate_kernel(&init_mm, pmd, p);
136 zero_pte_populate(pmd, addr, next);
137 } while (pmd++, addr = next, addr != end);
142 static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
145 pud_t *pud = pud_offset(p4d, addr);
149 next = pud_addr_end(addr, end);
150 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
153 pud_populate(&init_mm, pud,
154 lm_alias(kasan_early_shadow_pmd));
155 pmd = pmd_offset(pud, addr);
156 pmd_populate_kernel(&init_mm, pmd,
157 lm_alias(kasan_early_shadow_pte));
161 if (pud_none(*pud)) {
164 if (slab_is_available()) {
165 p = pmd_alloc(&init_mm, pud, addr);
169 pud_populate(&init_mm, pud,
170 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
173 zero_pmd_populate(pud, addr, next);
174 } while (pud++, addr = next, addr != end);
179 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
182 p4d_t *p4d = p4d_offset(pgd, addr);
186 next = p4d_addr_end(addr, end);
187 if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
191 p4d_populate(&init_mm, p4d,
192 lm_alias(kasan_early_shadow_pud));
193 pud = pud_offset(p4d, addr);
194 pud_populate(&init_mm, pud,
195 lm_alias(kasan_early_shadow_pmd));
196 pmd = pmd_offset(pud, addr);
197 pmd_populate_kernel(&init_mm, pmd,
198 lm_alias(kasan_early_shadow_pte));
202 if (p4d_none(*p4d)) {
205 if (slab_is_available()) {
206 p = pud_alloc(&init_mm, p4d, addr);
210 p4d_populate(&init_mm, p4d,
211 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
214 zero_pud_populate(p4d, addr, next);
215 } while (p4d++, addr = next, addr != end);
221 * kasan_populate_early_shadow - populate shadow memory region with
222 * kasan_early_shadow_page
223 * @shadow_start: start of the memory range to populate
224 * @shadow_end: end of the memory range to populate
226 int __ref kasan_populate_early_shadow(const void *shadow_start,
227 const void *shadow_end)
229 unsigned long addr = (unsigned long)shadow_start;
230 unsigned long end = (unsigned long)shadow_end;
231 pgd_t *pgd = pgd_offset_k(addr);
235 next = pgd_addr_end(addr, end);
237 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
243 * kasan_early_shadow_pud should be populated with pmds
245 * [pud,pmd]_populate*() below needed only for
246 * 3,2 - level page tables where we don't have
247 * puds,pmds, so pgd_populate(), pud_populate()
250 pgd_populate(&init_mm, pgd,
251 lm_alias(kasan_early_shadow_p4d));
252 p4d = p4d_offset(pgd, addr);
253 p4d_populate(&init_mm, p4d,
254 lm_alias(kasan_early_shadow_pud));
255 pud = pud_offset(p4d, addr);
256 pud_populate(&init_mm, pud,
257 lm_alias(kasan_early_shadow_pmd));
258 pmd = pmd_offset(pud, addr);
259 pmd_populate_kernel(&init_mm, pmd,
260 lm_alias(kasan_early_shadow_pte));
264 if (pgd_none(*pgd)) {
267 if (slab_is_available()) {
268 p = p4d_alloc(&init_mm, pgd, addr);
272 pgd_populate(&init_mm, pgd,
273 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
276 zero_p4d_populate(pgd, addr, next);
277 } while (pgd++, addr = next, addr != end);
282 static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
287 for (i = 0; i < PTRS_PER_PTE; i++) {
289 if (!pte_none(ptep_get(pte)))
293 pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
297 static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
302 for (i = 0; i < PTRS_PER_PMD; i++) {
308 pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
312 static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
317 for (i = 0; i < PTRS_PER_PUD; i++) {
323 pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
327 static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
332 for (i = 0; i < PTRS_PER_P4D; i++) {
338 p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
342 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
348 for (; addr < end; addr = next, pte++) {
349 next = (addr + PAGE_SIZE) & PAGE_MASK;
353 ptent = ptep_get(pte);
355 if (!pte_present(ptent))
358 if (WARN_ON(!kasan_early_shadow_page_entry(ptent)))
360 pte_clear(&init_mm, addr, pte);
364 static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
369 for (; addr < end; addr = next, pmd++) {
372 next = pmd_addr_end(addr, end);
374 if (!pmd_present(*pmd))
377 if (kasan_pte_table(*pmd)) {
378 if (IS_ALIGNED(addr, PMD_SIZE) &&
379 IS_ALIGNED(next, PMD_SIZE)) {
384 pte = pte_offset_kernel(pmd, addr);
385 kasan_remove_pte_table(pte, addr, next);
386 kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
390 static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
395 for (; addr < end; addr = next, pud++) {
396 pmd_t *pmd, *pmd_base;
398 next = pud_addr_end(addr, end);
400 if (!pud_present(*pud))
403 if (kasan_pmd_table(*pud)) {
404 if (IS_ALIGNED(addr, PUD_SIZE) &&
405 IS_ALIGNED(next, PUD_SIZE)) {
410 pmd = pmd_offset(pud, addr);
411 pmd_base = pmd_offset(pud, 0);
412 kasan_remove_pmd_table(pmd, addr, next);
413 kasan_free_pmd(pmd_base, pud);
417 static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
422 for (; addr < end; addr = next, p4d++) {
425 next = p4d_addr_end(addr, end);
427 if (!p4d_present(*p4d))
430 if (kasan_pud_table(*p4d)) {
431 if (IS_ALIGNED(addr, P4D_SIZE) &&
432 IS_ALIGNED(next, P4D_SIZE)) {
437 pud = pud_offset(p4d, addr);
438 kasan_remove_pud_table(pud, addr, next);
439 kasan_free_pud(pud_offset(p4d, 0), p4d);
443 void kasan_remove_zero_shadow(void *start, unsigned long size)
445 unsigned long addr, end, next;
448 addr = (unsigned long)kasan_mem_to_shadow(start);
449 end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
451 if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
452 WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
455 for (; addr < end; addr = next) {
458 next = pgd_addr_end(addr, end);
460 pgd = pgd_offset_k(addr);
461 if (!pgd_present(*pgd))
464 if (kasan_p4d_table(*pgd)) {
465 if (IS_ALIGNED(addr, PGDIR_SIZE) &&
466 IS_ALIGNED(next, PGDIR_SIZE)) {
472 p4d = p4d_offset(pgd, addr);
473 kasan_remove_p4d_table(p4d, addr, next);
474 kasan_free_p4d(p4d_offset(pgd, 0), pgd);
478 int kasan_add_zero_shadow(void *start, unsigned long size)
481 void *shadow_start, *shadow_end;
483 shadow_start = kasan_mem_to_shadow(start);
484 shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
486 if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
487 WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
490 ret = kasan_populate_early_shadow(shadow_start, shadow_end);
492 kasan_remove_zero_shadow(start, size);