powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / arch / riscv / kernel / setup.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4  *  Chen Liqin <liqin.chen@sunplusct.com>
5  *  Lennox Wu <lennox.wu@sunplusct.com>
6  * Copyright (C) 2012 Regents of the University of California
7  * Copyright (C) 2020 FORTH-ICS/CARV
8  *  Nick Kossifidis <mick@ics.forth.gr>
9  */
10
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/memblock.h>
16 #include <linux/sched.h>
17 #include <linux/console.h>
18 #include <linux/screen_info.h>
19 #include <linux/of_fdt.h>
20 #include <linux/sched/task.h>
21 #include <linux/smp.h>
22 #include <linux/efi.h>
23 #include <linux/crash_dump.h>
24 #include <linux/panic_notifier.h>
25
26 #include <asm/acpi.h>
27 #include <asm/alternative.h>
28 #include <asm/cacheflush.h>
29 #include <asm/cpu_ops.h>
30 #include <asm/early_ioremap.h>
31 #include <asm/pgtable.h>
32 #include <asm/setup.h>
33 #include <asm/set_memory.h>
34 #include <asm/sections.h>
35 #include <asm/sbi.h>
36 #include <asm/tlbflush.h>
37 #include <asm/thread_info.h>
38 #include <asm/kasan.h>
39 #include <asm/efi.h>
40
41 #include "head.h"
42
43 #if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI)
44 struct screen_info screen_info __section(".data") = {
45         .orig_video_lines       = 30,
46         .orig_video_cols        = 80,
47         .orig_video_mode        = 0,
48         .orig_video_ega_bx      = 0,
49         .orig_video_isVGA       = 1,
50         .orig_video_points      = 8
51 };
52 #endif
53
54 /*
55  * The lucky hart to first increment this variable will boot the other cores.
56  * This is used before the kernel initializes the BSS so it can't be in the
57  * BSS.
58  */
59 atomic_t hart_lottery __section(".sdata")
60 #ifdef CONFIG_XIP_KERNEL
61 = ATOMIC_INIT(0xC001BEEF)
62 #endif
63 ;
64 unsigned long boot_cpu_hartid;
65 static DEFINE_PER_CPU(struct cpu, cpu_devices);
66
67 /*
68  * Place kernel memory regions on the resource tree so that
69  * kexec-tools can retrieve them from /proc/iomem. While there
70  * also add "System RAM" regions for compatibility with other
71  * archs, and the rest of the known regions for completeness.
72  */
73 static struct resource kimage_res = { .name = "Kernel image", };
74 static struct resource code_res = { .name = "Kernel code", };
75 static struct resource data_res = { .name = "Kernel data", };
76 static struct resource rodata_res = { .name = "Kernel rodata", };
77 static struct resource bss_res = { .name = "Kernel bss", };
78 #ifdef CONFIG_CRASH_DUMP
79 static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
80 #endif
81
82 static int __init add_resource(struct resource *parent,
83                                 struct resource *res)
84 {
85         int ret = 0;
86
87         ret = insert_resource(parent, res);
88         if (ret < 0) {
89                 pr_err("Failed to add a %s resource at %llx\n",
90                         res->name, (unsigned long long) res->start);
91                 return ret;
92         }
93
94         return 1;
95 }
96
97 static int __init add_kernel_resources(void)
98 {
99         int ret = 0;
100
101         /*
102          * The memory region of the kernel image is continuous and
103          * was reserved on setup_bootmem, register it here as a
104          * resource, with the various segments of the image as
105          * child nodes.
106          */
107
108         code_res.start = __pa_symbol(_text);
109         code_res.end = __pa_symbol(_etext) - 1;
110         code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
111
112         rodata_res.start = __pa_symbol(__start_rodata);
113         rodata_res.end = __pa_symbol(__end_rodata) - 1;
114         rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
115
116         data_res.start = __pa_symbol(_data);
117         data_res.end = __pa_symbol(_edata) - 1;
118         data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
119
120         bss_res.start = __pa_symbol(__bss_start);
121         bss_res.end = __pa_symbol(__bss_stop) - 1;
122         bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
123
124         kimage_res.start = code_res.start;
125         kimage_res.end = bss_res.end;
126         kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
127
128         ret = add_resource(&iomem_resource, &kimage_res);
129         if (ret < 0)
130                 return ret;
131
132         ret = add_resource(&kimage_res, &code_res);
133         if (ret < 0)
134                 return ret;
135
136         ret = add_resource(&kimage_res, &rodata_res);
137         if (ret < 0)
138                 return ret;
139
140         ret = add_resource(&kimage_res, &data_res);
141         if (ret < 0)
142                 return ret;
143
144         ret = add_resource(&kimage_res, &bss_res);
145
146         return ret;
147 }
148
149 static void __init init_resources(void)
150 {
151         struct memblock_region *region = NULL;
152         struct resource *res = NULL;
153         struct resource *mem_res = NULL;
154         size_t mem_res_sz = 0;
155         int num_resources = 0, res_idx = 0;
156         int ret = 0;
157
158         /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
159         num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1;
160         res_idx = num_resources - 1;
161
162         mem_res_sz = num_resources * sizeof(*mem_res);
163         mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
164         if (!mem_res)
165                 panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
166
167         /*
168          * Start by adding the reserved regions, if they overlap
169          * with /memory regions, insert_resource later on will take
170          * care of it.
171          */
172         ret = add_kernel_resources();
173         if (ret < 0)
174                 goto error;
175
176 #ifdef CONFIG_KEXEC_CORE
177         if (crashk_res.start != crashk_res.end) {
178                 ret = add_resource(&iomem_resource, &crashk_res);
179                 if (ret < 0)
180                         goto error;
181         }
182         if (crashk_low_res.start != crashk_low_res.end) {
183                 ret = add_resource(&iomem_resource, &crashk_low_res);
184                 if (ret < 0)
185                         goto error;
186         }
187 #endif
188
189 #ifdef CONFIG_CRASH_DUMP
190         if (elfcorehdr_size > 0) {
191                 elfcorehdr_res.start = elfcorehdr_addr;
192                 elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1;
193                 elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
194                 add_resource(&iomem_resource, &elfcorehdr_res);
195         }
196 #endif
197
198         for_each_reserved_mem_region(region) {
199                 res = &mem_res[res_idx--];
200
201                 res->name = "Reserved";
202                 res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
203                 res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
204                 res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
205
206                 /*
207                  * Ignore any other reserved regions within
208                  * system memory.
209                  */
210                 if (memblock_is_memory(res->start)) {
211                         /* Re-use this pre-allocated resource */
212                         res_idx++;
213                         continue;
214                 }
215
216                 ret = add_resource(&iomem_resource, res);
217                 if (ret < 0)
218                         goto error;
219         }
220
221         /* Add /memory regions to the resource tree */
222         for_each_mem_region(region) {
223                 res = &mem_res[res_idx--];
224
225                 if (unlikely(memblock_is_nomap(region))) {
226                         res->name = "Reserved";
227                         res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
228                 } else {
229                         res->name = "System RAM";
230                         res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
231                 }
232
233                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
234                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
235
236                 ret = add_resource(&iomem_resource, res);
237                 if (ret < 0)
238                         goto error;
239         }
240
241         /* Clean-up any unused pre-allocated resources */
242         if (res_idx >= 0)
243                 memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
244         return;
245
246  error:
247         /* Better an empty resource tree than an inconsistent one */
248         release_child_resources(&iomem_resource);
249         memblock_free(mem_res, mem_res_sz);
250 }
251
252
253 static void __init parse_dtb(void)
254 {
255         /* Early scan of device tree from init memory */
256         if (early_init_dt_scan(dtb_early_va)) {
257                 const char *name = of_flat_dt_get_machine_name();
258
259                 if (name) {
260                         pr_info("Machine model: %s\n", name);
261                         dump_stack_set_arch_desc("%s (DT)", name);
262                 }
263         } else {
264                 pr_err("No DTB passed to the kernel\n");
265         }
266
267 #ifdef CONFIG_CMDLINE_FORCE
268         strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
269         pr_info("Forcing kernel command line to: %s\n", boot_command_line);
270 #endif
271 }
272
273 extern void __init init_rt_signal_env(void);
274
275 void __init setup_arch(char **cmdline_p)
276 {
277         parse_dtb();
278         setup_initial_init_mm(_stext, _etext, _edata, _end);
279
280         *cmdline_p = boot_command_line;
281
282         early_ioremap_setup();
283         sbi_init();
284         jump_label_init();
285         parse_early_param();
286
287         efi_init();
288         paging_init();
289
290         /* Parse the ACPI tables for possible boot-time configuration */
291         acpi_boot_table_init();
292
293 #if IS_ENABLED(CONFIG_BUILTIN_DTB)
294         unflatten_and_copy_device_tree();
295 #else
296         unflatten_device_tree();
297 #endif
298         misc_mem_init();
299
300         init_resources();
301
302 #ifdef CONFIG_KASAN
303         kasan_init();
304 #endif
305
306 #ifdef CONFIG_SMP
307         setup_smp();
308 #endif
309
310         if (!acpi_disabled)
311                 acpi_init_rintc_map();
312
313         riscv_init_cbo_blocksizes();
314         riscv_fill_hwcap();
315         init_rt_signal_env();
316         apply_boot_alternatives();
317         if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
318             riscv_isa_extension_available(NULL, ZICBOM))
319                 riscv_noncoherent_supported();
320         riscv_set_dma_cache_alignment();
321 }
322
323 static int __init topology_init(void)
324 {
325         int i, ret;
326
327         for_each_possible_cpu(i) {
328                 struct cpu *cpu = &per_cpu(cpu_devices, i);
329
330                 cpu->hotpluggable = cpu_has_hotplug(i);
331                 ret = register_cpu(cpu, i);
332                 if (unlikely(ret))
333                         pr_warn("Warning: %s: register_cpu %d failed (%d)\n",
334                                __func__, i, ret);
335         }
336
337         return 0;
338 }
339 subsys_initcall(topology_init);
340
341 void free_initmem(void)
342 {
343         if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
344                 set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx);
345                 if (IS_ENABLED(CONFIG_64BIT))
346                         set_kernel_memory(__init_begin, __init_end, set_memory_nx);
347         }
348
349         free_initmem_default(POISON_FREE_INITMEM);
350 }
351
352 static int dump_kernel_offset(struct notifier_block *self,
353                               unsigned long v, void *p)
354 {
355         pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
356                  kernel_map.virt_offset,
357                  KERNEL_LINK_ADDR);
358
359         return 0;
360 }
361
362 static struct notifier_block kernel_offset_notifier = {
363         .notifier_call = dump_kernel_offset
364 };
365
366 static int __init register_kernel_offset_dumper(void)
367 {
368         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
369                 atomic_notifier_chain_register(&panic_notifier_list,
370                                                &kernel_offset_notifier);
371
372         return 0;
373 }
374 device_initcall(register_kernel_offset_dumper);