1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1995 Linus Torvalds
5 * This file contains the setup_arch() code, which handles the architecture-dependent
6 * parts of early kernel initialization.
8 #include <linux/acpi.h>
9 #include <linux/console.h>
10 #include <linux/crash_dump.h>
11 #include <linux/dma-map-ops.h>
12 #include <linux/dmi.h>
13 #include <linux/efi.h>
14 #include <linux/ima.h>
15 #include <linux/init_ohci1394_dma.h>
16 #include <linux/initrd.h>
17 #include <linux/iscsi_ibft.h>
18 #include <linux/memblock.h>
19 #include <linux/panic_notifier.h>
20 #include <linux/pci.h>
21 #include <linux/root_dev.h>
22 #include <linux/hugetlb.h>
23 #include <linux/tboot.h>
24 #include <linux/usb/xhci-dbgp.h>
25 #include <linux/static_call.h>
26 #include <linux/swiotlb.h>
27 #include <linux/random.h>
29 #include <uapi/linux/mount.h>
36 #include <asm/bios_ebda.h>
38 #include <asm/cacheinfo.h>
42 #include <asm/hypervisor.h>
43 #include <asm/io_apic.h>
44 #include <asm/kasan.h>
45 #include <asm/kaslr.h>
47 #include <asm/memtype.h>
49 #include <asm/realmode.h>
50 #include <asm/olpc_ofw.h>
51 #include <asm/pci-direct.h>
53 #include <asm/proto.h>
54 #include <asm/thermal.h>
55 #include <asm/unwind.h>
56 #include <asm/vsyscall.h>
57 #include <linux/vmalloc.h>
60 * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
61 * max_pfn_mapped: highest directly mapped pfn > 4 GB
63 * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are
64 * represented by pfn_mapped[].
66 unsigned long max_low_pfn_mapped;
67 unsigned long max_pfn_mapped;
70 RESERVE_BRK(dmi_alloc, 65536);
74 unsigned long _brk_start = (unsigned long)__brk_base;
75 unsigned long _brk_end = (unsigned long)__brk_base;
77 struct boot_params boot_params;
80 * These are the four main kernel memory regions, we put them into
81 * the resource tree so that kdump tools and other debugging tools
85 static struct resource rodata_resource = {
86 .name = "Kernel rodata",
89 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
92 static struct resource data_resource = {
93 .name = "Kernel data",
96 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
99 static struct resource code_resource = {
100 .name = "Kernel code",
103 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
106 static struct resource bss_resource = {
107 .name = "Kernel bss",
110 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
115 /* CPU data as detected by the assembly code in head_32.S */
116 struct cpuinfo_x86 new_cpu_data;
118 struct apm_info apm_info;
119 EXPORT_SYMBOL(apm_info);
121 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
122 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
123 struct ist_info ist_info;
124 EXPORT_SYMBOL(ist_info);
126 struct ist_info ist_info;
131 struct cpuinfo_x86 boot_cpu_data __read_mostly;
132 EXPORT_SYMBOL(boot_cpu_data);
134 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
135 __visible unsigned long mmu_cr4_features __ro_after_init;
137 __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
141 static phys_addr_t ima_kexec_buffer_phys;
142 static size_t ima_kexec_buffer_size;
145 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
146 int bootloader_type, bootloader_version;
151 struct screen_info screen_info;
152 EXPORT_SYMBOL(screen_info);
153 struct edid_info edid_info;
154 EXPORT_SYMBOL_GPL(edid_info);
156 extern int root_mountflags;
158 unsigned long saved_video_mode;
160 #define RAMDISK_IMAGE_START_MASK 0x07FF
161 #define RAMDISK_PROMPT_FLAG 0x8000
162 #define RAMDISK_LOAD_FLAG 0x4000
164 static char __initdata command_line[COMMAND_LINE_SIZE];
165 #ifdef CONFIG_CMDLINE_BOOL
166 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
169 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
171 #ifdef CONFIG_EDD_MODULE
175 * copy_edd() - Copy the BIOS EDD information
176 * from boot_params into a safe place.
179 static inline void __init copy_edd(void)
181 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
182 sizeof(edd.mbr_signature));
183 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
184 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
185 edd.edd_info_nr = boot_params.eddbuf_entries;
188 static inline void __init copy_edd(void)
193 void * __init extend_brk(size_t size, size_t align)
195 size_t mask = align - 1;
198 BUG_ON(_brk_start == 0);
199 BUG_ON(align & mask);
201 _brk_end = (_brk_end + mask) & ~mask;
202 BUG_ON((char *)(_brk_end + size) > __brk_limit);
204 ret = (void *)_brk_end;
207 memset(ret, 0, size);
213 static void __init cleanup_highmap(void)
218 static void __init reserve_brk(void)
220 if (_brk_end > _brk_start)
221 memblock_reserve(__pa_symbol(_brk_start),
222 _brk_end - _brk_start);
224 /* Mark brk area as locked down and no longer taking any
229 u64 relocated_ramdisk;
231 #ifdef CONFIG_BLK_DEV_INITRD
233 static u64 __init get_ramdisk_image(void)
235 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
237 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
239 if (ramdisk_image == 0)
240 ramdisk_image = phys_initrd_start;
242 return ramdisk_image;
244 static u64 __init get_ramdisk_size(void)
246 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
248 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
250 if (ramdisk_size == 0)
251 ramdisk_size = phys_initrd_size;
256 static void __init relocate_initrd(void)
258 /* Assume only end is not page aligned */
259 u64 ramdisk_image = get_ramdisk_image();
260 u64 ramdisk_size = get_ramdisk_size();
261 u64 area_size = PAGE_ALIGN(ramdisk_size);
263 /* We need to move the initrd down into directly mapped mem */
264 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
265 PFN_PHYS(max_pfn_mapped));
266 if (!relocated_ramdisk)
267 panic("Cannot find place for new RAMDISK of size %lld\n",
270 initrd_start = relocated_ramdisk + PAGE_OFFSET;
271 initrd_end = initrd_start + ramdisk_size;
272 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
273 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
275 copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
277 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
278 " [mem %#010llx-%#010llx]\n",
279 ramdisk_image, ramdisk_image + ramdisk_size - 1,
280 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
283 static void __init early_reserve_initrd(void)
285 /* Assume only end is not page aligned */
286 u64 ramdisk_image = get_ramdisk_image();
287 u64 ramdisk_size = get_ramdisk_size();
288 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
290 if (!boot_params.hdr.type_of_loader ||
291 !ramdisk_image || !ramdisk_size)
292 return; /* No initrd provided by bootloader */
294 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
297 static void __init reserve_initrd(void)
299 /* Assume only end is not page aligned */
300 u64 ramdisk_image = get_ramdisk_image();
301 u64 ramdisk_size = get_ramdisk_size();
302 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
304 if (!boot_params.hdr.type_of_loader ||
305 !ramdisk_image || !ramdisk_size)
306 return; /* No initrd provided by bootloader */
310 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
313 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
314 PFN_DOWN(ramdisk_end))) {
315 /* All are mapped, easy case */
316 initrd_start = ramdisk_image + PAGE_OFFSET;
317 initrd_end = initrd_start + ramdisk_size;
323 memblock_phys_free(ramdisk_image, ramdisk_end - ramdisk_image);
327 static void __init early_reserve_initrd(void)
330 static void __init reserve_initrd(void)
333 #endif /* CONFIG_BLK_DEV_INITRD */
335 static void __init add_early_ima_buffer(u64 phys_addr)
338 struct ima_setup_data *data;
340 data = early_memremap(phys_addr + sizeof(struct setup_data), sizeof(*data));
342 pr_warn("setup: failed to memremap ima_setup_data entry\n");
347 memblock_reserve(data->addr, data->size);
348 ima_kexec_buffer_phys = data->addr;
349 ima_kexec_buffer_size = data->size;
352 early_memunmap(data, sizeof(*data));
354 pr_warn("Passed IMA kexec data, but CONFIG_IMA not set. Ignoring.\n");
358 #if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
359 int __init ima_free_kexec_buffer(void)
363 if (!ima_kexec_buffer_size)
366 rc = memblock_phys_free(ima_kexec_buffer_phys,
367 ima_kexec_buffer_size);
371 ima_kexec_buffer_phys = 0;
372 ima_kexec_buffer_size = 0;
377 int __init ima_get_kexec_buffer(void **addr, size_t *size)
379 if (!ima_kexec_buffer_size)
382 *addr = __va(ima_kexec_buffer_phys);
383 *size = ima_kexec_buffer_size;
389 static void __init parse_setup_data(void)
391 struct setup_data *data;
392 u64 pa_data, pa_next;
394 pa_data = boot_params.hdr.setup_data;
396 u32 data_len, data_type;
398 data = early_memremap(pa_data, sizeof(*data));
399 data_len = data->len + sizeof(struct setup_data);
400 data_type = data->type;
401 pa_next = data->next;
402 early_memunmap(data, sizeof(*data));
406 e820__memory_setup_extended(pa_data, data_len);
412 parse_efi_setup(pa_data, data_len);
415 add_early_ima_buffer(pa_data);
418 data = early_memremap(pa_data, data_len);
419 add_bootloader_randomness(data->data, data->len);
420 /* Zero seed for forward secrecy. */
421 memzero_explicit(data->data, data->len);
422 /* Zero length in case we find ourselves back here by accident. */
423 memzero_explicit(&data->len, sizeof(data->len));
424 early_memunmap(data, data_len);
433 static void __init memblock_x86_reserve_range_setup_data(void)
435 struct setup_indirect *indirect;
436 struct setup_data *data;
437 u64 pa_data, pa_next;
440 pa_data = boot_params.hdr.setup_data;
442 data = early_memremap(pa_data, sizeof(*data));
444 pr_warn("setup: failed to memremap setup_data entry\n");
449 pa_next = data->next;
451 memblock_reserve(pa_data, sizeof(*data) + data->len);
453 if (data->type == SETUP_INDIRECT) {
455 early_memunmap(data, sizeof(*data));
456 data = early_memremap(pa_data, len);
458 pr_warn("setup: failed to memremap indirect setup_data\n");
462 indirect = (struct setup_indirect *)data->data;
464 if (indirect->type != SETUP_INDIRECT)
465 memblock_reserve(indirect->addr, indirect->len);
469 early_memunmap(data, len);
474 * --------- Crashkernel reservation ------------------------------
477 /* 16M alignment for crash kernel regions */
478 #define CRASH_ALIGN SZ_16M
481 * Keep the crash kernel below this limit.
483 * Earlier 32-bits kernels would limit the kernel to the low 512 MB range
484 * due to mapping restrictions.
486 * 64-bit kdump kernels need to be restricted to be under 64 TB, which is
487 * the upper limit of system RAM in 4-level paging mode. Since the kdump
488 * jump could be from 5-level paging to 4-level paging, the jump will fail if
489 * the kernel is put above 64 TB, and during the 1st kernel bootup there's
490 * no good way to detect the paging mode of the target kernel which will be
491 * loaded for dumping.
494 # define CRASH_ADDR_LOW_MAX SZ_512M
495 # define CRASH_ADDR_HIGH_MAX SZ_512M
497 # define CRASH_ADDR_LOW_MAX SZ_4G
498 # define CRASH_ADDR_HIGH_MAX SZ_64T
501 static int __init reserve_crashkernel_low(void)
504 unsigned long long base, low_base = 0, low_size = 0;
505 unsigned long low_mem_limit;
508 low_mem_limit = min(memblock_phys_mem_size(), CRASH_ADDR_LOW_MAX);
510 /* crashkernel=Y,low */
511 ret = parse_crashkernel_low(boot_command_line, low_mem_limit, &low_size, &base);
514 * two parts from kernel/dma/swiotlb.c:
515 * -swiotlb size: user-specified with swiotlb= or default.
517 * -swiotlb overflow buffer: now hardcoded to 32k. We round it
518 * to 8M for other buffers that may need to stay low too. Also
519 * make sure we allocate enough extra low memory so that we
520 * don't run out of DMA buffers for 32-bit devices.
522 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
524 /* passed with crashkernel=0,low ? */
529 low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
531 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
532 (unsigned long)(low_size >> 20));
536 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (low RAM limit: %ldMB)\n",
537 (unsigned long)(low_size >> 20),
538 (unsigned long)(low_base >> 20),
539 (unsigned long)(low_mem_limit >> 20));
541 crashk_low_res.start = low_base;
542 crashk_low_res.end = low_base + low_size - 1;
543 insert_resource(&iomem_resource, &crashk_low_res);
548 static void __init reserve_crashkernel(void)
550 unsigned long long crash_size, crash_base, total_mem;
554 if (!IS_ENABLED(CONFIG_KEXEC_CORE))
557 total_mem = memblock_phys_mem_size();
560 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
561 if (ret != 0 || crash_size <= 0) {
562 /* crashkernel=X,high */
563 ret = parse_crashkernel_high(boot_command_line, total_mem,
564 &crash_size, &crash_base);
565 if (ret != 0 || crash_size <= 0)
570 if (xen_pv_domain()) {
571 pr_info("Ignoring crashkernel for a Xen PV domain\n");
575 /* 0 means: find the address automatically */
578 * Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
579 * crashkernel=x,high reserves memory over 4G, also allocates
580 * 256M extra low memory for DMA buffers and swiotlb.
581 * But the extra memory is not required for all machines.
582 * So try low memory first and fall back to high memory
583 * unless "crashkernel=size[KMG],high" is specified.
586 crash_base = memblock_phys_alloc_range(crash_size,
587 CRASH_ALIGN, CRASH_ALIGN,
590 crash_base = memblock_phys_alloc_range(crash_size,
591 CRASH_ALIGN, CRASH_ALIGN,
592 CRASH_ADDR_HIGH_MAX);
594 pr_info("crashkernel reservation failed - No suitable area found.\n");
598 unsigned long long start;
600 start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base,
601 crash_base + crash_size);
602 if (start != crash_base) {
603 pr_info("crashkernel reservation failed - memory is in use.\n");
608 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
609 memblock_phys_free(crash_base, crash_size);
613 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
614 (unsigned long)(crash_size >> 20),
615 (unsigned long)(crash_base >> 20),
616 (unsigned long)(total_mem >> 20));
618 crashk_res.start = crash_base;
619 crashk_res.end = crash_base + crash_size - 1;
620 insert_resource(&iomem_resource, &crashk_res);
623 static struct resource standard_io_resources[] = {
624 { .name = "dma1", .start = 0x00, .end = 0x1f,
625 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
626 { .name = "pic1", .start = 0x20, .end = 0x21,
627 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
628 { .name = "timer0", .start = 0x40, .end = 0x43,
629 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
630 { .name = "timer1", .start = 0x50, .end = 0x53,
631 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
632 { .name = "keyboard", .start = 0x60, .end = 0x60,
633 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
634 { .name = "keyboard", .start = 0x64, .end = 0x64,
635 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
636 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
637 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
638 { .name = "pic2", .start = 0xa0, .end = 0xa1,
639 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
640 { .name = "dma2", .start = 0xc0, .end = 0xdf,
641 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
642 { .name = "fpu", .start = 0xf0, .end = 0xff,
643 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
646 void __init reserve_standard_io_resources(void)
650 /* request I/O space for devices used on all i[345]86 PCs */
651 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
652 request_resource(&ioport_resource, &standard_io_resources[i]);
656 static bool __init snb_gfx_workaround_needed(void)
661 static const __initconst u16 snb_ids[] = {
671 /* Assume no if something weird is going on with PCI */
672 if (!early_pci_allowed())
675 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
676 if (vendor != 0x8086)
679 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
680 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
681 if (devid == snb_ids[i])
689 * Sandy Bridge graphics has trouble with certain ranges, exclude
690 * them from allocation.
692 static void __init trim_snb_memory(void)
694 static const __initconst unsigned long bad_pages[] = {
703 if (!snb_gfx_workaround_needed())
706 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
709 * SandyBridge integrated graphics devices have a bug that prevents
710 * them from accessing certain memory ranges, namely anything below
711 * 1M and in the pages listed in bad_pages[] above.
713 * To avoid these pages being ever accessed by SNB gfx devices reserve
714 * bad_pages that have not already been reserved at boot time.
715 * All memory below the 1 MB mark is anyway reserved later during
716 * setup_arch(), so there is no need to reserve it here.
719 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
720 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
721 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
726 static void __init trim_bios_range(void)
729 * A special case is the first 4Kb of memory;
730 * This is a BIOS owned area, not kernel ram, but generally
731 * not listed as such in the E820 table.
733 * This typically reserves additional memory (64KiB by default)
734 * since some BIOSes are known to corrupt low memory. See the
735 * Kconfig help text for X86_RESERVE_LOW.
737 e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
740 * special case: Some BIOSes report the PC BIOS
741 * area (640Kb -> 1Mb) as RAM even though it is not.
744 e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
746 e820__update_table(e820_table);
749 /* called before trim_bios_range() to spare extra sanitize */
750 static void __init e820_add_kernel_range(void)
752 u64 start = __pa_symbol(_text);
753 u64 size = __pa_symbol(_end) - start;
756 * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and
757 * attempt to fix it by adding the range. We may have a confused BIOS,
758 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
759 * exclude kernel range. If we really are running on top non-RAM,
760 * we will crash later anyways.
762 if (e820__mapped_all(start, start + size, E820_TYPE_RAM))
765 pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n");
766 e820__range_remove(start, size, E820_TYPE_RAM, 0);
767 e820__range_add(start, size, E820_TYPE_RAM);
770 static void __init early_reserve_memory(void)
773 * Reserve the memory occupied by the kernel between _text and
774 * __end_of_kernel_reserve symbols. Any kernel sections after the
775 * __end_of_kernel_reserve symbol must be explicitly reserved with a
776 * separate memblock_reserve() or they will be discarded.
778 memblock_reserve(__pa_symbol(_text),
779 (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
782 * The first 4Kb of memory is a BIOS owned area, but generally it is
783 * not listed as such in the E820 table.
785 * Reserve the first 64K of memory since some BIOSes are known to
786 * corrupt low memory. After the real mode trampoline is allocated the
787 * rest of the memory below 640k is reserved.
789 * In addition, make sure page 0 is always reserved because on
790 * systems with L1TF its contents can be leaked to user processes.
792 memblock_reserve(0, SZ_64K);
794 early_reserve_initrd();
796 memblock_x86_reserve_range_setup_data();
798 reserve_bios_regions();
803 * Dump out kernel offset information on panic.
806 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
808 if (kaslr_enabled()) {
809 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
815 pr_emerg("Kernel Offset: disabled\n");
821 void x86_configure_nx(void)
823 if (boot_cpu_has(X86_FEATURE_NX))
824 __supported_pte_mask |= _PAGE_NX;
826 __supported_pte_mask &= ~_PAGE_NX;
829 static void __init x86_report_nx(void)
831 if (!boot_cpu_has(X86_FEATURE_NX)) {
832 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
833 "missing in CPU!\n");
835 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
836 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
838 /* 32bit non-PAE kernel, NX cannot be used */
839 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
840 "cannot be enabled: non-PAE kernel!\n");
846 * Determine if we were loaded by an EFI loader. If so, then we have also been
847 * passed the efi memmap, systab, etc., so we should use these data structures
848 * for initialization. Note, the efi init code path is determined by the
849 * global efi_enabled. This allows the same kernel image to be used on existing
850 * systems (with a traditional BIOS) as well as on EFI systems.
853 * setup_arch - architecture-specific boot-time initializations
855 * Note: On x86_64, fixmaps are ready for use even before this is called.
858 void __init setup_arch(char **cmdline_p)
861 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
864 * copy kernel address range established so far and switch
865 * to the proper swapper page table
867 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
868 initial_page_table + KERNEL_PGD_BOUNDARY,
871 load_cr3(swapper_pg_dir);
873 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
874 * a cr3 based tlb flush, so the following __flush_tlb_all()
875 * will not flush anything because the CPU quirk which clears
876 * X86_FEATURE_PGE has not been invoked yet. Though due to the
877 * load_cr3() above the TLB has been flushed already. The
878 * quirk is invoked before subsequent calls to __flush_tlb_all()
879 * so proper operation is guaranteed.
883 printk(KERN_INFO "Command line: %s\n", boot_command_line);
884 boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
888 * If we have OLPC OFW, we might end up relocating the fixmap due to
889 * reserve_top(), so do this before touching the ioremap area.
893 idt_setup_early_traps();
897 early_ioremap_init();
899 setup_olpc_ofw_pgd();
901 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
902 screen_info = boot_params.screen_info;
903 edid_info = boot_params.edid_info;
905 apm_info.bios = boot_params.apm_bios_info;
906 ist_info = boot_params.ist_info;
908 saved_video_mode = boot_params.hdr.vid_mode;
909 bootloader_type = boot_params.hdr.type_of_loader;
910 if ((bootloader_type >> 4) == 0xe) {
911 bootloader_type &= 0xf;
912 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
914 bootloader_version = bootloader_type & 0xf;
915 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
917 #ifdef CONFIG_BLK_DEV_RAM
918 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
921 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
922 EFI32_LOADER_SIGNATURE, 4)) {
923 set_bit(EFI_BOOT, &efi.flags);
924 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
925 EFI64_LOADER_SIGNATURE, 4)) {
926 set_bit(EFI_BOOT, &efi.flags);
927 set_bit(EFI_64BIT, &efi.flags);
931 x86_init.oem.arch_setup();
934 * Do some memory reservations *before* memory is added to memblock, so
935 * memblock allocations won't overwrite it.
937 * After this point, everything still needed from the boot loader or
938 * firmware or kernel text should be early reserved or marked not RAM in
939 * e820. All other memory is free game.
941 * This call needs to happen before e820__memory_setup() which calls the
942 * xen_memory_setup() on Xen dom0 which relies on the fact that those
943 * early reservations have happened already.
945 early_reserve_memory();
947 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
948 e820__memory_setup();
953 if (!boot_params.hdr.root_flags)
954 root_mountflags &= ~MS_RDONLY;
955 setup_initial_init_mm(_text, _etext, _edata, (void *)_brk_end);
957 code_resource.start = __pa_symbol(_text);
958 code_resource.end = __pa_symbol(_etext)-1;
959 rodata_resource.start = __pa_symbol(__start_rodata);
960 rodata_resource.end = __pa_symbol(__end_rodata)-1;
961 data_resource.start = __pa_symbol(_sdata);
962 data_resource.end = __pa_symbol(_edata)-1;
963 bss_resource.start = __pa_symbol(__bss_start);
964 bss_resource.end = __pa_symbol(__bss_stop)-1;
966 #ifdef CONFIG_CMDLINE_BOOL
967 #ifdef CONFIG_CMDLINE_OVERRIDE
968 strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
970 if (builtin_cmdline[0]) {
971 /* append boot loader cmdline to builtin */
972 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
973 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
974 strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
979 strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
980 *cmdline_p = command_line;
983 * x86_configure_nx() is called before parse_early_param() to detect
984 * whether hardware doesn't support NX (so that the early EHCI debug
985 * console setup can safely call set_fixmap()).
991 if (efi_enabled(EFI_BOOT))
992 efi_memblock_x86_reserve_range();
994 #ifdef CONFIG_MEMORY_HOTPLUG
996 * Memory used by the kernel cannot be hot-removed because Linux
997 * cannot migrate the kernel pages. When memory hotplug is
998 * enabled, we should prevent memblock from allocating memory
1001 * ACPI SRAT records all hotpluggable memory ranges. But before
1002 * SRAT is parsed, we don't know about it.
1004 * The kernel image is loaded into memory at very early time. We
1005 * cannot prevent this anyway. So on NUMA system, we set any
1006 * node the kernel resides in as un-hotpluggable.
1008 * Since on modern servers, one node could have double-digit
1009 * gigabytes memory, we can assume the memory around the kernel
1010 * image is also un-hotpluggable. So before SRAT is parsed, just
1011 * allocate memory near the kernel image to try the best to keep
1012 * the kernel away from hotpluggable memory.
1014 if (movable_node_is_enabled())
1015 memblock_set_bottom_up(true);
1020 apic_setup_apic_calls();
1022 if (acpi_mps_check()) {
1023 #ifdef CONFIG_X86_LOCAL_APIC
1024 apic_is_disabled = true;
1026 setup_clear_cpu_cap(X86_FEATURE_APIC);
1029 e820__reserve_setup_data();
1030 e820__finish_early_params();
1032 if (efi_enabled(EFI_BOOT))
1035 reserve_ibft_region();
1039 * VMware detection requires dmi to be available, so this
1040 * needs to be done after dmi_setup(), for the boot CPU.
1041 * For some guest types (Xen PV, SEV-SNP, TDX) it is required to be
1042 * called before cache_bp_init() for setting up MTRR state.
1044 init_hypervisor_platform();
1047 x86_init.resources.probe_roms();
1049 /* after parse_early_param, so could debug it */
1050 insert_resource(&iomem_resource, &code_resource);
1051 insert_resource(&iomem_resource, &rodata_resource);
1052 insert_resource(&iomem_resource, &data_resource);
1053 insert_resource(&iomem_resource, &bss_resource);
1055 e820_add_kernel_range();
1057 #ifdef CONFIG_X86_32
1058 if (ppro_with_ram_bug()) {
1059 e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM,
1060 E820_TYPE_RESERVED);
1061 e820__update_table(e820_table);
1062 printk(KERN_INFO "fixed physical RAM map:\n");
1063 e820__print_table("bad_ppro");
1066 early_gart_iommu_check();
1070 * partially used pages are not usable - thus
1071 * we are rounding upwards:
1073 max_pfn = e820__end_of_ram_pfn();
1075 /* update e820 for memory not covered by WB MTRRs */
1077 if (mtrr_trim_uncached_memory(max_pfn))
1078 max_pfn = e820__end_of_ram_pfn();
1080 max_possible_pfn = max_pfn;
1083 * Define random base addresses for memory sections after max_pfn is
1084 * defined and before each memory section base is used.
1086 kernel_randomize_memory();
1088 #ifdef CONFIG_X86_32
1089 /* max_low_pfn get updated here */
1090 find_low_pfn_range();
1094 /* How many end-of-memory variables you have, grandma! */
1095 /* need this before calling reserve_initrd */
1096 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1097 max_low_pfn = e820__end_of_low_ram_pfn();
1099 max_low_pfn = max_pfn;
1101 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1105 * Find and reserve possible boot-time SMP configuration:
1109 early_alloc_pgt_buf();
1112 * Need to conclude brk, before e820__memblock_setup()
1113 * it could use memblock_find_in_range, could overlap with
1120 memblock_set_current_limit(ISA_END_ADDRESS);
1121 e820__memblock_setup();
1124 * Needs to run after memblock setup because it needs the physical
1132 efi_mokvar_table_init();
1135 * The EFI specification says that boot service code won't be
1136 * called after ExitBootServices(). This is, in fact, a lie.
1138 efi_reserve_boot_services();
1140 /* preallocate 4k for mptable mpc */
1141 e820__memblock_alloc_reserved_mpc_new();
1143 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1144 setup_bios_corruption_check();
1147 #ifdef CONFIG_X86_32
1148 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1149 (max_pfn_mapped<<PAGE_SHIFT) - 1);
1153 * Find free memory for the real mode trampoline and place it there. If
1154 * there is not enough free memory under 1M, on EFI-enabled systems
1155 * there will be additional attempt to reclaim the memory for the real
1156 * mode trampoline at efi_free_boot_services().
1158 * Unconditionally reserve the entire first 1M of RAM because BIOSes
1159 * are known to corrupt low memory and several hundred kilobytes are not
1160 * worth complex detection what memory gets clobbered. Windows does the
1161 * same thing for very similar reasons.
1163 * Moreover, on machines with SandyBridge graphics or in setups that use
1164 * crashkernel the entire 1M is reserved anyway.
1166 x86_platform.realmode_reserve();
1170 idt_setup_early_pf();
1173 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
1174 * with the current CR4 value. This may not be necessary, but
1175 * auditing all the early-boot CR4 manipulation would be needed to
1178 * Mask off features that don't work outside long mode (just
1181 mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE;
1183 memblock_set_current_limit(get_max_mapped());
1186 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1189 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1190 if (init_ohci1394_dma_early)
1191 init_ohci1394_dma_on_all_controllers();
1193 /* Allocate bigger log buffer */
1196 if (efi_enabled(EFI_BOOT)) {
1197 switch (boot_params.secure_boot) {
1198 case efi_secureboot_mode_disabled:
1199 pr_info("Secure boot disabled\n");
1201 case efi_secureboot_mode_enabled:
1202 pr_info("Secure boot enabled\n");
1205 pr_info("Secure boot could not be determined\n");
1212 acpi_table_upgrade();
1213 /* Look for ACPI tables and reserve memory occupied by them. */
1214 acpi_boot_table_init();
1220 early_platform_quirks();
1222 early_acpi_boot_init();
1225 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1227 if (boot_cpu_has(X86_FEATURE_GBPAGES))
1228 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1231 * Reserve memory for crash kernel after SRAT is parsed so that it
1232 * won't consume hotpluggable memory.
1234 reserve_crashkernel();
1236 memblock_find_dma_reserve();
1238 if (!early_xdbc_setup_hardware())
1239 early_xdbc_register_console();
1241 x86_init.paging.pagetable_init();
1246 * Sync back kernel address range.
1248 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
1251 sync_initial_page_table();
1257 x86_32_probe_apic();
1262 * Read APIC and some other early information from ACPI tables.
1268 * get boot-time SMP configuration:
1273 * Systems w/o ACPI and mptables might not have it mapped the local
1274 * APIC yet, but prefill_possible_map() might need to access it.
1276 init_apic_mappings();
1278 prefill_possible_map();
1283 io_apic_init_mappings();
1285 x86_init.hyper.guest_late_init();
1287 e820__reserve_resources();
1288 e820__register_nosave_regions(max_pfn);
1290 x86_init.resources.reserve_resources();
1292 e820__setup_pci_gap();
1295 #if defined(CONFIG_VGA_CONSOLE)
1296 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1297 conswitchp = &vga_con;
1300 x86_init.oem.banner();
1302 x86_init.timers.wallclock_init();
1305 * This needs to run before setup_local_APIC() which soft-disables the
1306 * local APIC temporarily and that masks the thermal LVT interrupt,
1307 * leading to softlockups on machines which have configured SMI
1308 * interrupt delivery.
1314 register_refined_jiffies(CLOCK_TICK_RATE);
1317 if (efi_enabled(EFI_BOOT))
1318 efi_apply_memmap_quirks();
1324 #ifdef CONFIG_X86_32
1326 static struct resource video_ram_resource = {
1327 .name = "Video RAM area",
1330 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1333 void __init i386_reserve_resources(void)
1335 request_resource(&iomem_resource, &video_ram_resource);
1336 reserve_standard_io_resources();
1339 #endif /* CONFIG_X86_32 */
1341 static struct notifier_block kernel_offset_notifier = {
1342 .notifier_call = dump_kernel_offset
1345 static int __init register_kernel_offset_dumper(void)
1347 atomic_notifier_chain_register(&panic_notifier_list,
1348 &kernel_offset_notifier);
1351 __initcall(register_kernel_offset_dumper);