1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/sched/task.h>
3 #include <linux/pgtable.h>
4 #include <linux/kasan.h>
5 #include <asm/pgalloc.h>
6 #include <asm/facility.h>
7 #include <asm/sections.h>
8 #include <asm/physmem_info.h>
9 #include <asm/maccess.h>
10 #include <asm/abs_lowcore.h>
11 #include "decompressor.h"
14 unsigned long __bootdata_preserved(s390_invalid_asce);
17 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
20 #define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
21 #define swapper_pg_dir vmlinux.swapper_pg_dir_off
22 #define invalid_pg_dir vmlinux.invalid_pg_dir_off
29 POPULATE_KASAN_MAP_SHADOW,
30 POPULATE_KASAN_ZERO_SHADOW,
31 POPULATE_KASAN_SHALLOW
35 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
39 #define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
40 #define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
41 #define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
42 #define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
43 #define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
44 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
48 static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
50 start = PAGE_ALIGN_DOWN(__sha(start));
51 end = PAGE_ALIGN(__sha(end));
52 pgtable_populate(start, end, mode);
55 static void kasan_populate_shadow(void)
57 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
58 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
59 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
60 unsigned long memgap_start = 0;
61 unsigned long untracked_end;
62 unsigned long start, end;
65 pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
67 pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
68 crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
69 crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
70 crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
71 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
74 * Current memory layout:
75 * +- 0 -------------+ +- shadow start -+
76 * |1:1 ident mapping| /|1/8 of ident map|
78 * +-end of ident map+ / +----------------+
79 * | ... gap ... | / | kasan |
81 * +- vmalloc area -+ / | mapping |
82 * | vmalloc_size | / | (untracked) |
83 * +- modules vaddr -+ / +----------------+
84 * | 2Gb |/ | unmapped | allocated per module
85 * +- shadow start -+ +----------------+
86 * | 1/8 addr space | | zero pg mapping| (untracked)
87 * +- shadow end ----+---------+- shadow end ---+
89 * Current memory layout (KASAN_VMALLOC):
90 * +- 0 -------------+ +- shadow start -+
91 * |1:1 ident mapping| /|1/8 of ident map|
93 * +-end of ident map+ / +----------------+
94 * | ... gap ... | / | kasan zero page| (untracked)
96 * +- vmalloc area -+ / +----------------+
97 * | vmalloc_size | / |shallow populate|
98 * +- modules vaddr -+ / +----------------+
99 * | 2Gb |/ |shallow populate|
100 * +- shadow start -+ +----------------+
101 * | 1/8 addr space | | zero pg mapping| (untracked)
102 * +- shadow end ----+---------+- shadow end ---+
105 for_each_physmem_usable_range(i, &start, &end) {
106 kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
107 if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260)
108 kasan_populate(memgap_start, start, POPULATE_KASAN_ZERO_SHADOW);
111 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
112 untracked_end = VMALLOC_START;
113 /* shallowly populate kasan shadow for vmalloc and modules */
114 kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
116 untracked_end = MODULES_VADDR;
118 /* populate kasan shadow for untracked memory */
119 kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
120 kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
123 static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
124 unsigned long end, enum populate_mode mode)
126 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
127 IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
128 pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
134 static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
135 unsigned long end, enum populate_mode mode)
137 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
138 IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
139 p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
145 static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
146 unsigned long end, enum populate_mode mode)
148 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
149 IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
150 pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
156 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
157 unsigned long end, enum populate_mode mode)
159 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
160 IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
161 pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
167 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
171 if (mode == POPULATE_KASAN_ZERO_SHADOW) {
179 static inline void kasan_populate_shadow(void) {}
181 static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
182 unsigned long end, enum populate_mode mode)
187 static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
188 unsigned long end, enum populate_mode mode)
193 static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
194 unsigned long end, enum populate_mode mode)
199 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
200 unsigned long end, enum populate_mode mode)
205 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
213 * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
215 static inline pte_t *__virt_to_kpte(unsigned long va)
217 return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
220 static void *boot_crst_alloc(unsigned long val)
222 unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
223 unsigned long *table;
225 table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
226 crst_table_init(table, val);
230 static pte_t *boot_pte_alloc(void)
232 static void *pte_leftover;
236 * handling pte_leftovers this way helps to avoid memory fragmentation
237 * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
240 pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
241 pte = pte_leftover + _PAGE_TABLE_SIZE;
247 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
251 static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
256 case POPULATE_DIRECT:
258 case POPULATE_ABS_LOWCORE:
259 return __abs_lowcore_pa(addr);
261 case POPULATE_KASAN_MAP_SHADOW:
262 addr = physmem_alloc_top_down(RR_VMEM, size, size);
263 memset((void *)addr, 0, size);
271 static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
273 return machine.has_edat2 &&
274 IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
277 static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
279 return machine.has_edat1 &&
280 IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
283 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
284 enum populate_mode mode)
286 unsigned long pages = 0;
289 pte = pte_offset_kernel(pmd, addr);
290 for (; addr < end; addr += PAGE_SIZE, pte++) {
291 if (pte_none(*pte)) {
292 if (kasan_pte_populate_zero_shadow(pte, mode))
294 entry = __pte(_pa(addr, PAGE_SIZE, mode));
295 entry = set_pte_bit(entry, PAGE_KERNEL);
297 entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
302 if (mode == POPULATE_DIRECT)
303 update_page_count(PG_DIRECT_MAP_4K, pages);
306 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
307 enum populate_mode mode)
309 unsigned long next, pages = 0;
313 pmd = pmd_offset(pud, addr);
314 for (; addr < end; addr = next, pmd++) {
315 next = pmd_addr_end(addr, end);
316 if (pmd_none(*pmd)) {
317 if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
319 if (can_large_pmd(pmd, addr, next)) {
320 entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
321 entry = set_pmd_bit(entry, SEGMENT_KERNEL);
323 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
328 pte = boot_pte_alloc();
329 pmd_populate(&init_mm, pmd, pte);
330 } else if (pmd_large(*pmd)) {
333 pgtable_pte_populate(pmd, addr, next, mode);
335 if (mode == POPULATE_DIRECT)
336 update_page_count(PG_DIRECT_MAP_1M, pages);
339 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
340 enum populate_mode mode)
342 unsigned long next, pages = 0;
346 pud = pud_offset(p4d, addr);
347 for (; addr < end; addr = next, pud++) {
348 next = pud_addr_end(addr, end);
349 if (pud_none(*pud)) {
350 if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
352 if (can_large_pud(pud, addr, next)) {
353 entry = __pud(_pa(addr, _REGION3_SIZE, mode));
354 entry = set_pud_bit(entry, REGION3_KERNEL);
356 entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
361 pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
362 pud_populate(&init_mm, pud, pmd);
363 } else if (pud_large(*pud)) {
366 pgtable_pmd_populate(pud, addr, next, mode);
368 if (mode == POPULATE_DIRECT)
369 update_page_count(PG_DIRECT_MAP_2G, pages);
372 static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
373 enum populate_mode mode)
379 p4d = p4d_offset(pgd, addr);
380 for (; addr < end; addr = next, p4d++) {
381 next = p4d_addr_end(addr, end);
382 if (p4d_none(*p4d)) {
383 if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
385 pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
386 p4d_populate(&init_mm, p4d, pud);
388 pgtable_pud_populate(p4d, addr, next, mode);
392 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
398 pgd = pgd_offset(&init_mm, addr);
399 for (; addr < end; addr = next, pgd++) {
400 next = pgd_addr_end(addr, end);
401 if (pgd_none(*pgd)) {
402 if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
404 p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
405 pgd_populate(&init_mm, pgd, p4d);
408 if (mode == POPULATE_KASAN_SHALLOW)
411 pgtable_p4d_populate(pgd, addr, next, mode);
415 void setup_vmem(unsigned long asce_limit)
417 unsigned long start, end;
418 unsigned long asce_type;
419 unsigned long asce_bits;
422 if (asce_limit == _REGION1_SIZE) {
423 asce_type = _REGION2_ENTRY_EMPTY;
424 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
426 asce_type = _REGION3_ENTRY_EMPTY;
427 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
429 s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
431 crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
432 crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
435 * To allow prefixing the lowcore must be mapped with 4KB pages.
436 * To prevent creation of a large page at address 0 first map
437 * the lowcore and create the identity mapping only afterwards.
439 pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
440 for_each_physmem_usable_range(i, &start, &end)
441 pgtable_populate(start, end, POPULATE_DIRECT);
442 pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
443 POPULATE_ABS_LOWCORE);
444 pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
446 memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
448 kasan_populate_shadow();
450 S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
451 S390_lowcore.user_asce = s390_invalid_asce;
453 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
454 __ctl_load(S390_lowcore.user_asce, 7, 7);
455 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
457 init_mm.context.asce = S390_lowcore.kernel_asce;