powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / arch / s390 / boot / vmem.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/sched/task.h>
3 #include <linux/pgtable.h>
4 #include <linux/kasan.h>
5 #include <asm/pgalloc.h>
6 #include <asm/facility.h>
7 #include <asm/sections.h>
8 #include <asm/physmem_info.h>
9 #include <asm/maccess.h>
10 #include <asm/abs_lowcore.h>
11 #include "decompressor.h"
12 #include "boot.h"
13
14 unsigned long __bootdata_preserved(s390_invalid_asce);
15
16 #ifdef CONFIG_PROC_FS
17 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
18 #endif
19
20 #define init_mm                 (*(struct mm_struct *)vmlinux.init_mm_off)
21 #define swapper_pg_dir          vmlinux.swapper_pg_dir_off
22 #define invalid_pg_dir          vmlinux.invalid_pg_dir_off
23
24 enum populate_mode {
25         POPULATE_NONE,
26         POPULATE_DIRECT,
27         POPULATE_ABS_LOWCORE,
28 #ifdef CONFIG_KASAN
29         POPULATE_KASAN_MAP_SHADOW,
30         POPULATE_KASAN_ZERO_SHADOW,
31         POPULATE_KASAN_SHALLOW
32 #endif
33 };
34
35 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
36
37 #ifdef CONFIG_KASAN
38
39 #define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
40 #define kasan_early_shadow_pte  ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
41 #define kasan_early_shadow_pmd  ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
42 #define kasan_early_shadow_pud  ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
43 #define kasan_early_shadow_p4d  ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
44 #define __sha(x)                ((unsigned long)kasan_mem_to_shadow((void *)x))
45
46 static pte_t pte_z;
47
48 static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
49 {
50         start = PAGE_ALIGN_DOWN(__sha(start));
51         end = PAGE_ALIGN(__sha(end));
52         pgtable_populate(start, end, mode);
53 }
54
55 static void kasan_populate_shadow(void)
56 {
57         pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
58         pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
59         p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
60         unsigned long untracked_end;
61         unsigned long start, end;
62         int i;
63
64         pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
65         if (!machine.has_nx)
66                 pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
67         crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
68         crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
69         crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
70         memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
71
72         /*
73          * Current memory layout:
74          * +- 0 -------------+         +- shadow start -+
75          * |1:1 ident mapping|        /|1/8 of ident map|
76          * |                 |       / |                |
77          * +-end of ident map+      /  +----------------+
78          * | ... gap ...     |     /   |    kasan       |
79          * |                 |    /    |  zero page     |
80          * +- vmalloc area  -+   /     |   mapping      |
81          * | vmalloc_size    |  /      | (untracked)    |
82          * +- modules vaddr -+ /       +----------------+
83          * | 2Gb             |/        |    unmapped    | allocated per module
84          * +- shadow start  -+         +----------------+
85          * | 1/8 addr space  |         | zero pg mapping| (untracked)
86          * +- shadow end ----+---------+- shadow end ---+
87          *
88          * Current memory layout (KASAN_VMALLOC):
89          * +- 0 -------------+         +- shadow start -+
90          * |1:1 ident mapping|        /|1/8 of ident map|
91          * |                 |       / |                |
92          * +-end of ident map+      /  +----------------+
93          * | ... gap ...     |     /   | kasan zero page| (untracked)
94          * |                 |    /    | mapping        |
95          * +- vmalloc area  -+   /     +----------------+
96          * | vmalloc_size    |  /      |shallow populate|
97          * +- modules vaddr -+ /       +----------------+
98          * | 2Gb             |/        |shallow populate|
99          * +- shadow start  -+         +----------------+
100          * | 1/8 addr space  |         | zero pg mapping| (untracked)
101          * +- shadow end ----+---------+- shadow end ---+
102          */
103
104         for_each_physmem_usable_range(i, &start, &end)
105                 kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
106         if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
107                 untracked_end = VMALLOC_START;
108                 /* shallowly populate kasan shadow for vmalloc and modules */
109                 kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
110         } else {
111                 untracked_end = MODULES_VADDR;
112         }
113         /* populate kasan shadow for untracked memory */
114         kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
115         kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
116 }
117
118 static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
119                                            unsigned long end, enum populate_mode mode)
120 {
121         if (mode == POPULATE_KASAN_ZERO_SHADOW &&
122             IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
123                 pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
124                 return true;
125         }
126         return false;
127 }
128
129 static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
130                                            unsigned long end, enum populate_mode mode)
131 {
132         if (mode == POPULATE_KASAN_ZERO_SHADOW &&
133             IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
134                 p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
135                 return true;
136         }
137         return false;
138 }
139
140 static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
141                                            unsigned long end, enum populate_mode mode)
142 {
143         if (mode == POPULATE_KASAN_ZERO_SHADOW &&
144             IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
145                 pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
146                 return true;
147         }
148         return false;
149 }
150
151 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
152                                            unsigned long end, enum populate_mode mode)
153 {
154         if (mode == POPULATE_KASAN_ZERO_SHADOW &&
155             IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
156                 pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
157                 return true;
158         }
159         return false;
160 }
161
162 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
163 {
164         pte_t entry;
165
166         if (mode == POPULATE_KASAN_ZERO_SHADOW) {
167                 set_pte(pte, pte_z);
168                 return true;
169         }
170         return false;
171 }
172 #else
173
174 static inline void kasan_populate_shadow(void) {}
175
176 static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
177                                                   unsigned long end, enum populate_mode mode)
178 {
179         return false;
180 }
181
182 static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
183                                                   unsigned long end, enum populate_mode mode)
184 {
185         return false;
186 }
187
188 static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
189                                                   unsigned long end, enum populate_mode mode)
190 {
191         return false;
192 }
193
194 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
195                                                   unsigned long end, enum populate_mode mode)
196 {
197         return false;
198 }
199
200 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
201 {
202         return false;
203 }
204
205 #endif
206
207 /*
208  * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
209  */
210 static inline pte_t *__virt_to_kpte(unsigned long va)
211 {
212         return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
213 }
214
215 static void *boot_crst_alloc(unsigned long val)
216 {
217         unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
218         unsigned long *table;
219
220         table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
221         crst_table_init(table, val);
222         return table;
223 }
224
225 static pte_t *boot_pte_alloc(void)
226 {
227         static void *pte_leftover;
228         pte_t *pte;
229
230         /*
231          * handling pte_leftovers this way helps to avoid memory fragmentation
232          * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
233          */
234         if (!pte_leftover) {
235                 pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
236                 pte = pte_leftover + _PAGE_TABLE_SIZE;
237         } else {
238                 pte = pte_leftover;
239                 pte_leftover = NULL;
240         }
241
242         memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
243         return pte;
244 }
245
246 static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
247 {
248         switch (mode) {
249         case POPULATE_NONE:
250                 return -1;
251         case POPULATE_DIRECT:
252                 return addr;
253         case POPULATE_ABS_LOWCORE:
254                 return __abs_lowcore_pa(addr);
255 #ifdef CONFIG_KASAN
256         case POPULATE_KASAN_MAP_SHADOW:
257                 addr = physmem_alloc_top_down(RR_VMEM, size, size);
258                 memset((void *)addr, 0, size);
259                 return addr;
260 #endif
261         default:
262                 return -1;
263         }
264 }
265
266 static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
267 {
268         return machine.has_edat2 &&
269                IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
270 }
271
272 static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
273 {
274         return machine.has_edat1 &&
275                IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
276 }
277
278 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
279                                  enum populate_mode mode)
280 {
281         unsigned long pages = 0;
282         pte_t *pte, entry;
283
284         pte = pte_offset_kernel(pmd, addr);
285         for (; addr < end; addr += PAGE_SIZE, pte++) {
286                 if (pte_none(*pte)) {
287                         if (kasan_pte_populate_zero_shadow(pte, mode))
288                                 continue;
289                         entry = __pte(_pa(addr, PAGE_SIZE, mode));
290                         entry = set_pte_bit(entry, PAGE_KERNEL);
291                         if (!machine.has_nx)
292                                 entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
293                         set_pte(pte, entry);
294                         pages++;
295                 }
296         }
297         if (mode == POPULATE_DIRECT)
298                 update_page_count(PG_DIRECT_MAP_4K, pages);
299 }
300
301 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
302                                  enum populate_mode mode)
303 {
304         unsigned long next, pages = 0;
305         pmd_t *pmd, entry;
306         pte_t *pte;
307
308         pmd = pmd_offset(pud, addr);
309         for (; addr < end; addr = next, pmd++) {
310                 next = pmd_addr_end(addr, end);
311                 if (pmd_none(*pmd)) {
312                         if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
313                                 continue;
314                         if (can_large_pmd(pmd, addr, next)) {
315                                 entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
316                                 entry = set_pmd_bit(entry, SEGMENT_KERNEL);
317                                 if (!machine.has_nx)
318                                         entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
319                                 set_pmd(pmd, entry);
320                                 pages++;
321                                 continue;
322                         }
323                         pte = boot_pte_alloc();
324                         pmd_populate(&init_mm, pmd, pte);
325                 } else if (pmd_large(*pmd)) {
326                         continue;
327                 }
328                 pgtable_pte_populate(pmd, addr, next, mode);
329         }
330         if (mode == POPULATE_DIRECT)
331                 update_page_count(PG_DIRECT_MAP_1M, pages);
332 }
333
334 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
335                                  enum populate_mode mode)
336 {
337         unsigned long next, pages = 0;
338         pud_t *pud, entry;
339         pmd_t *pmd;
340
341         pud = pud_offset(p4d, addr);
342         for (; addr < end; addr = next, pud++) {
343                 next = pud_addr_end(addr, end);
344                 if (pud_none(*pud)) {
345                         if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
346                                 continue;
347                         if (can_large_pud(pud, addr, next)) {
348                                 entry = __pud(_pa(addr, _REGION3_SIZE, mode));
349                                 entry = set_pud_bit(entry, REGION3_KERNEL);
350                                 if (!machine.has_nx)
351                                         entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
352                                 set_pud(pud, entry);
353                                 pages++;
354                                 continue;
355                         }
356                         pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
357                         pud_populate(&init_mm, pud, pmd);
358                 } else if (pud_large(*pud)) {
359                         continue;
360                 }
361                 pgtable_pmd_populate(pud, addr, next, mode);
362         }
363         if (mode == POPULATE_DIRECT)
364                 update_page_count(PG_DIRECT_MAP_2G, pages);
365 }
366
367 static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
368                                  enum populate_mode mode)
369 {
370         unsigned long next;
371         p4d_t *p4d;
372         pud_t *pud;
373
374         p4d = p4d_offset(pgd, addr);
375         for (; addr < end; addr = next, p4d++) {
376                 next = p4d_addr_end(addr, end);
377                 if (p4d_none(*p4d)) {
378                         if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
379                                 continue;
380                         pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
381                         p4d_populate(&init_mm, p4d, pud);
382                 }
383                 pgtable_pud_populate(p4d, addr, next, mode);
384         }
385 }
386
387 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
388 {
389         unsigned long next;
390         pgd_t *pgd;
391         p4d_t *p4d;
392
393         pgd = pgd_offset(&init_mm, addr);
394         for (; addr < end; addr = next, pgd++) {
395                 next = pgd_addr_end(addr, end);
396                 if (pgd_none(*pgd)) {
397                         if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
398                                 continue;
399                         p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
400                         pgd_populate(&init_mm, pgd, p4d);
401                 }
402 #ifdef CONFIG_KASAN
403                 if (mode == POPULATE_KASAN_SHALLOW)
404                         continue;
405 #endif
406                 pgtable_p4d_populate(pgd, addr, next, mode);
407         }
408 }
409
410 void setup_vmem(unsigned long asce_limit)
411 {
412         unsigned long start, end;
413         unsigned long asce_type;
414         unsigned long asce_bits;
415         int i;
416
417         if (asce_limit == _REGION1_SIZE) {
418                 asce_type = _REGION2_ENTRY_EMPTY;
419                 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
420         } else {
421                 asce_type = _REGION3_ENTRY_EMPTY;
422                 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
423         }
424         s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
425
426         crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
427         crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
428
429         /*
430          * To allow prefixing the lowcore must be mapped with 4KB pages.
431          * To prevent creation of a large page at address 0 first map
432          * the lowcore and create the identity mapping only afterwards.
433          */
434         pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
435         for_each_physmem_usable_range(i, &start, &end)
436                 pgtable_populate(start, end, POPULATE_DIRECT);
437         pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
438                          POPULATE_ABS_LOWCORE);
439         pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
440                          POPULATE_NONE);
441         memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
442
443         kasan_populate_shadow();
444
445         S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
446         S390_lowcore.user_asce = s390_invalid_asce;
447
448         __ctl_load(S390_lowcore.kernel_asce, 1, 1);
449         __ctl_load(S390_lowcore.user_asce, 7, 7);
450         __ctl_load(S390_lowcore.kernel_asce, 13, 13);
451
452         init_mm.context.asce = S390_lowcore.kernel_asce;
453 }