1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/setup.c
5 * Copyright (C) 1995-2001 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/acpi.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/initrd.h>
16 #include <linux/console.h>
17 #include <linux/cache.h>
18 #include <linux/screen_info.h>
19 #include <linux/init.h>
20 #include <linux/kexec.h>
21 #include <linux/root_dev.h>
22 #include <linux/cpu.h>
23 #include <linux/interrupt.h>
24 #include <linux/smp.h>
26 #include <linux/panic_notifier.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29 #include <linux/of_fdt.h>
30 #include <linux/efi.h>
31 #include <linux/psci.h>
32 #include <linux/sched/task.h>
33 #include <linux/scs.h>
37 #include <asm/fixmap.h>
39 #include <asm/cputype.h>
40 #include <asm/daifflags.h>
42 #include <asm/cpufeature.h>
43 #include <asm/cpu_ops.h>
44 #include <asm/kasan.h>
47 #include <asm/sections.h>
48 #include <asm/setup.h>
49 #include <asm/smp_plat.h>
50 #include <asm/cacheflush.h>
51 #include <asm/tlbflush.h>
52 #include <asm/traps.h>
54 #include <asm/xen/hypervisor.h>
55 #include <asm/mmu_context.h>
57 static int num_standard_resources;
58 static struct resource *standard_resources;
60 phys_addr_t __fdt_pointer __initdata;
63 * Standard memory resources
65 static struct resource mem_res[] = {
67 .name = "Kernel code",
70 .flags = IORESOURCE_SYSTEM_RAM
73 .name = "Kernel data",
76 .flags = IORESOURCE_SYSTEM_RAM
80 #define kernel_code mem_res[0]
81 #define kernel_data mem_res[1]
84 * The recorded values of x0 .. x3 upon kernel entry.
86 u64 __cacheline_aligned boot_args[4];
88 void __init smp_setup_processor_id(void)
90 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
91 set_cpu_logical_map(0, mpidr);
93 pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
94 (unsigned long)mpidr, read_cpuid_id());
97 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
99 return phys_id == cpu_logical_map(cpu);
102 struct mpidr_hash mpidr_hash;
104 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
105 * level in order to build a linear index from an
106 * MPIDR value. Resulting algorithm is a collision
107 * free hash carried out through shifting and ORing
109 static void __init smp_build_mpidr_hash(void)
111 u32 i, affinity, fs[4], bits[4], ls;
114 * Pre-scan the list of MPIDRS and filter out bits that do
115 * not contribute to affinity levels, ie they never toggle.
117 for_each_possible_cpu(i)
118 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
119 pr_debug("mask of set bits %#llx\n", mask);
121 * Find and stash the last and first bit set at all affinity levels to
122 * check how many bits are required to represent them.
124 for (i = 0; i < 4; i++) {
125 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
127 * Find the MSB bit and LSB bits position
128 * to determine how many bits are required
129 * to express the affinity level.
132 fs[i] = affinity ? ffs(affinity) - 1 : 0;
133 bits[i] = ls - fs[i];
136 * An index can be created from the MPIDR_EL1 by isolating the
137 * significant bits at each affinity level and by shifting
138 * them in order to compress the 32 bits values space to a
139 * compressed set of values. This is equivalent to hashing
140 * the MPIDR_EL1 through shifting and ORing. It is a collision free
141 * hash though not minimal since some levels might contain a number
142 * of CPUs that is not an exact power of 2 and their bit
143 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
145 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
146 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
147 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
149 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
150 fs[3] - (bits[2] + bits[1] + bits[0]);
151 mpidr_hash.mask = mask;
152 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
153 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
154 mpidr_hash.shift_aff[0],
155 mpidr_hash.shift_aff[1],
156 mpidr_hash.shift_aff[2],
157 mpidr_hash.shift_aff[3],
161 * 4x is an arbitrary value used to warn on a hash table much bigger
162 * than expected on most systems.
164 if (mpidr_hash_size() > 4 * num_possible_cpus())
165 pr_warn("Large number of MPIDR hash buckets detected\n");
168 static void *early_fdt_ptr __initdata;
170 void __init *get_early_fdt_ptr(void)
172 return early_fdt_ptr;
175 asmlinkage void __init early_fdt_map(u64 dt_phys)
180 early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
183 static void __init setup_machine_fdt(phys_addr_t dt_phys)
186 void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
190 memblock_reserve(dt_phys, size);
192 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
194 "Error: invalid device tree blob at physical address %pa (virtual address 0x%px)\n"
195 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
196 "\nPlease check your bootloader.",
200 * Note that in this _really_ early stage we cannot even BUG()
201 * or oops, so the least terrible thing to do is cpu_relax(),
202 * or else we could end-up printing non-initialized data, etc.
208 /* Early fixups are done, map the FDT as read-only now */
209 fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
211 name = of_flat_dt_get_machine_name();
215 pr_info("Machine model: %s\n", name);
216 dump_stack_set_arch_desc("%s (DT)", name);
219 static void __init request_standard_resources(void)
221 struct memblock_region *region;
222 struct resource *res;
226 kernel_code.start = __pa_symbol(_stext);
227 kernel_code.end = __pa_symbol(__init_begin - 1);
228 kernel_data.start = __pa_symbol(_sdata);
229 kernel_data.end = __pa_symbol(_end - 1);
230 insert_resource(&iomem_resource, &kernel_code);
231 insert_resource(&iomem_resource, &kernel_data);
233 num_standard_resources = memblock.memory.cnt;
234 res_size = num_standard_resources * sizeof(*standard_resources);
235 standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
236 if (!standard_resources)
237 panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
239 for_each_mem_region(region) {
240 res = &standard_resources[i++];
241 if (memblock_is_nomap(region)) {
242 res->name = "reserved";
243 res->flags = IORESOURCE_MEM;
244 res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
245 res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
247 res->name = "System RAM";
248 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
249 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
250 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
253 insert_resource(&iomem_resource, res);
257 static int __init reserve_memblock_reserved_regions(void)
261 for (i = 0; i < num_standard_resources; ++i) {
262 struct resource *mem = &standard_resources[i];
263 phys_addr_t r_start, r_end, mem_size = resource_size(mem);
265 if (!memblock_is_region_reserved(mem->start, mem_size))
268 for_each_reserved_mem_range(j, &r_start, &r_end) {
269 resource_size_t start, end;
271 start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
272 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
274 if (start > mem->end || end < mem->start)
277 reserve_region_with_split(mem, start, end, "reserved");
283 arch_initcall(reserve_memblock_reserved_regions);
285 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
287 u64 cpu_logical_map(unsigned int cpu)
289 return __cpu_logical_map[cpu];
292 void __init __no_sanitize_address setup_arch(char **cmdline_p)
294 setup_initial_init_mm(_stext, _etext, _edata, _end);
296 *cmdline_p = boot_command_line;
299 * If know now we are going to need KPTI then use non-global
300 * mappings from the start, avoiding the cost of rewriting
303 arm64_use_ng_mappings = kaslr_requires_kpti();
306 early_ioremap_init();
308 setup_machine_fdt(__fdt_pointer);
311 * Initialise the static keys early as they may be enabled by the
312 * cpufeature code and early parameters.
320 * Unmask asynchronous aborts and fiq after bringing up possible
321 * earlycon. (Report possible System Errors once we can report this
324 local_daif_restore(DAIF_PROCCTX_NOIRQ);
327 * TTBR0 is only used for the identity mapping at this stage. Make it
328 * point to zero page to avoid speculatively fetching new entries.
330 cpu_uninstall_idmap();
335 if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
336 pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
338 arm64_memblock_init();
342 acpi_table_upgrade();
344 /* Parse the ACPI tables for possible boot-time configuration */
345 acpi_boot_table_init();
348 unflatten_device_tree();
354 request_standard_resources();
356 early_ioremap_reset();
365 smp_build_mpidr_hash();
367 /* Init percpu seeds for random tags after cpus are set up. */
368 kasan_init_sw_tags();
370 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
372 * Make sure init_thread_info.ttbr0 always generates translation
373 * faults in case uaccess_enable() is inadvertently called by the init
376 init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
379 if (boot_args[1] || boot_args[2] || boot_args[3]) {
380 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
381 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
382 "This indicates a broken bootloader or old kernel\n",
383 boot_args[1], boot_args[2], boot_args[3]);
387 static inline bool cpu_can_disable(unsigned int cpu)
389 #ifdef CONFIG_HOTPLUG_CPU
390 const struct cpu_operations *ops = get_cpu_ops(cpu);
392 if (ops && ops->cpu_can_disable)
393 return ops->cpu_can_disable(cpu);
398 static int __init topology_init(void)
402 for_each_possible_cpu(i) {
403 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
404 cpu->hotpluggable = cpu_can_disable(i);
405 register_cpu(cpu, i);
410 subsys_initcall(topology_init);
412 static void dump_kernel_offset(void)
414 const unsigned long offset = kaslr_offset();
416 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
417 pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
418 offset, KIMAGE_VADDR);
419 pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
421 pr_emerg("Kernel Offset: disabled\n");
425 static int arm64_panic_block_dump(struct notifier_block *self,
426 unsigned long v, void *p)
428 dump_kernel_offset();
434 static struct notifier_block arm64_panic_block = {
435 .notifier_call = arm64_panic_block_dump
438 static int __init register_arm64_panic_block(void)
440 atomic_notifier_chain_register(&panic_notifier_list,
444 device_initcall(register_arm64_panic_block);