1 // SPDX-License-Identifier: GPL-2.0-only
3 * efi.c - EFI subsystem
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
25 #include <linux/kexec.h>
26 #include <linux/platform_device.h>
27 #include <linux/random.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <linux/ucs2_string.h>
32 #include <linux/memblock.h>
33 #include <linux/security.h>
35 #include <asm/early_ioremap.h>
37 struct efi __read_mostly efi = {
38 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 .acpi = EFI_INVALID_TABLE_ADDR,
40 .acpi20 = EFI_INVALID_TABLE_ADDR,
41 .smbios = EFI_INVALID_TABLE_ADDR,
42 .smbios3 = EFI_INVALID_TABLE_ADDR,
43 .esrt = EFI_INVALID_TABLE_ADDR,
44 .tpm_log = EFI_INVALID_TABLE_ADDR,
45 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
46 #ifdef CONFIG_LOAD_UEFI_KEYS
47 .mokvar_table = EFI_INVALID_TABLE_ADDR,
52 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
53 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
54 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
56 struct mm_struct efi_mm = {
58 .mm_users = ATOMIC_INIT(2),
59 .mm_count = ATOMIC_INIT(1),
60 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
61 MMAP_LOCK_INITIALIZER(efi_mm)
62 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
63 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
64 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
67 struct workqueue_struct *efi_rts_wq;
69 static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT);
70 static int __init setup_noefi(char *arg)
72 disable_runtime = true;
75 early_param("noefi", setup_noefi);
77 bool efi_runtime_disabled(void)
79 return disable_runtime;
82 bool __pure __efi_soft_reserve_enabled(void)
84 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
87 static int __init parse_efi_cmdline(char *str)
90 pr_warn("need at least one option\n");
94 if (parse_option_str(str, "debug"))
95 set_bit(EFI_DBG, &efi.flags);
97 if (parse_option_str(str, "noruntime"))
98 disable_runtime = true;
100 if (parse_option_str(str, "runtime"))
101 disable_runtime = false;
103 if (parse_option_str(str, "nosoftreserve"))
104 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
108 early_param("efi", parse_efi_cmdline);
110 struct kobject *efi_kobj;
113 * Let's not leave out systab information that snuck into
115 * Note, do not add more fields in systab sysfs file as it breaks sysfs
116 * one value per file rule!
118 static ssize_t systab_show(struct kobject *kobj,
119 struct kobj_attribute *attr, char *buf)
126 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
127 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
128 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
129 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
131 * If both SMBIOS and SMBIOS3 entry points are implemented, the
132 * SMBIOS3 entry point shall be preferred, so we list it first to
133 * let applications stop parsing after the first match.
135 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
136 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
137 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
138 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
140 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
141 str = efi_systab_show_arch(str);
146 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
148 static ssize_t fw_platform_size_show(struct kobject *kobj,
149 struct kobj_attribute *attr, char *buf)
151 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
154 extern __weak struct kobj_attribute efi_attr_fw_vendor;
155 extern __weak struct kobj_attribute efi_attr_runtime;
156 extern __weak struct kobj_attribute efi_attr_config_table;
157 static struct kobj_attribute efi_attr_fw_platform_size =
158 __ATTR_RO(fw_platform_size);
160 static struct attribute *efi_subsys_attrs[] = {
161 &efi_attr_systab.attr,
162 &efi_attr_fw_platform_size.attr,
163 &efi_attr_fw_vendor.attr,
164 &efi_attr_runtime.attr,
165 &efi_attr_config_table.attr,
169 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
175 static const struct attribute_group efi_subsys_attr_group = {
176 .attrs = efi_subsys_attrs,
177 .is_visible = efi_attr_is_visible,
180 static struct efivars generic_efivars;
181 static struct efivar_operations generic_ops;
183 static int generic_ops_register(void)
185 generic_ops.get_variable = efi.get_variable;
186 generic_ops.get_next_variable = efi.get_next_variable;
187 generic_ops.query_variable_store = efi_query_variable_store;
189 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
190 generic_ops.set_variable = efi.set_variable;
191 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
193 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
196 static void generic_ops_unregister(void)
198 efivars_unregister(&generic_efivars);
201 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
202 #define EFIVAR_SSDT_NAME_MAX 16
203 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
204 static int __init efivar_ssdt_setup(char *str)
206 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
211 if (strlen(str) < sizeof(efivar_ssdt))
212 memcpy(efivar_ssdt, str, strlen(str));
214 pr_warn("efivar_ssdt: name too long: %s\n", str);
217 __setup("efivar_ssdt=", efivar_ssdt_setup);
219 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
220 unsigned long name_size, void *data)
222 struct efivar_entry *entry;
223 struct list_head *list = data;
224 char utf8_name[EFIVAR_SSDT_NAME_MAX];
225 int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
227 ucs2_as_utf8(utf8_name, name, limit - 1);
228 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
231 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
235 memcpy(entry->var.VariableName, name, name_size);
236 memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
238 efivar_entry_add(entry, list);
243 static __init int efivar_ssdt_load(void)
246 struct efivar_entry *entry, *aux;
254 ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
256 list_for_each_entry_safe(entry, aux, &entries, list) {
257 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
258 &entry->var.VendorGuid);
260 list_del(&entry->list);
262 ret = efivar_entry_size(entry, &size);
264 pr_err("failed to get var size\n");
268 data = kmalloc(size, GFP_KERNEL);
274 ret = efivar_entry_get(entry, NULL, &size, data);
276 pr_err("failed to get var data\n");
280 ret = acpi_load_table(data, NULL);
282 pr_err("failed to load table: %d\n", ret);
298 static inline int efivar_ssdt_load(void) { return 0; }
301 #ifdef CONFIG_DEBUG_FS
303 #define EFI_DEBUGFS_MAX_BLOBS 32
305 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
307 static void __init efi_debugfs_init(void)
309 struct dentry *efi_debugfs;
310 efi_memory_desc_t *md;
312 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
315 efi_debugfs = debugfs_create_dir("efi", NULL);
316 if (IS_ERR_OR_NULL(efi_debugfs))
319 for_each_efi_memory_desc(md) {
321 case EFI_BOOT_SERVICES_CODE:
322 snprintf(name, sizeof(name), "boot_services_code%d",
323 type_count[md->type]++);
325 case EFI_BOOT_SERVICES_DATA:
326 snprintf(name, sizeof(name), "boot_services_data%d",
327 type_count[md->type]++);
333 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
334 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
335 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
339 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
340 debugfs_blob[i].data = memremap(md->phys_addr,
341 debugfs_blob[i].size,
343 if (!debugfs_blob[i].data)
346 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
351 static inline void efi_debugfs_init(void) {}
355 * We register the efi subsystem with the firmware subsystem and the
356 * efivars subsystem with the efi subsystem, if the system was booted with
359 static int __init efisubsys_init(void)
363 if (!efi_enabled(EFI_RUNTIME_SERVICES))
364 efi.runtime_supported_mask = 0;
366 if (!efi_enabled(EFI_BOOT))
369 if (efi.runtime_supported_mask) {
371 * Since we process only one efi_runtime_service() at a time, an
372 * ordered workqueue (which creates only one execution context)
373 * should suffice for all our needs.
375 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
377 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
378 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
379 efi.runtime_supported_mask = 0;
384 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
385 platform_device_register_simple("rtc-efi", 0, NULL, 0);
387 /* We register the efi directory at /sys/firmware/efi */
388 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
390 pr_err("efi: Firmware registration failed.\n");
391 destroy_workqueue(efi_rts_wq);
395 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
396 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
397 error = generic_ops_register();
401 platform_device_register_simple("efivars", 0, NULL, 0);
404 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
406 pr_err("efi: Sysfs attribute export failed with error %d.\n",
411 error = efi_runtime_map_init(efi_kobj);
413 goto err_remove_group;
415 /* and the standard mountpoint for efivarfs */
416 error = sysfs_create_mount_point(efi_kobj, "efivars");
418 pr_err("efivars: Subsystem registration failed.\n");
419 goto err_remove_group;
422 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
428 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
430 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
431 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
432 generic_ops_unregister();
434 kobject_put(efi_kobj);
435 destroy_workqueue(efi_rts_wq);
439 subsys_initcall(efisubsys_init);
442 * Find the efi memory descriptor for a given physical address. Given a
443 * physical address, determine if it exists within an EFI Memory Map entry,
444 * and if so, populate the supplied memory descriptor with the appropriate
447 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
449 efi_memory_desc_t *md;
451 if (!efi_enabled(EFI_MEMMAP)) {
452 pr_err_once("EFI_MEMMAP is not enabled.\n");
457 pr_err_once("out_md is null.\n");
461 for_each_efi_memory_desc(md) {
465 size = md->num_pages << EFI_PAGE_SHIFT;
466 end = md->phys_addr + size;
467 if (phys_addr >= md->phys_addr && phys_addr < end) {
468 memcpy(out_md, md, sizeof(*out_md));
476 * Calculate the highest address of an efi memory descriptor.
478 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
480 u64 size = md->num_pages << EFI_PAGE_SHIFT;
481 u64 end = md->phys_addr + size;
485 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
488 * efi_mem_reserve - Reserve an EFI memory region
489 * @addr: Physical address to reserve
490 * @size: Size of reservation
492 * Mark a region as reserved from general kernel allocation and
493 * prevent it being released by efi_free_boot_services().
495 * This function should be called drivers once they've parsed EFI
496 * configuration tables to figure out where their data lives, e.g.
499 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
501 if (!memblock_is_region_reserved(addr, size))
502 memblock_reserve(addr, size);
505 * Some architectures (x86) reserve all boot services ranges
506 * until efi_free_boot_services() because of buggy firmware
507 * implementations. This means the above memblock_reserve() is
508 * superfluous on x86 and instead what it needs to do is
509 * ensure the @start, @size is not freed.
511 efi_arch_mem_reserve(addr, size);
514 static const efi_config_table_type_t common_tables[] __initconst = {
515 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
516 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
517 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
518 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
519 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
520 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
521 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
522 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
523 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
524 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
525 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
526 #ifdef CONFIG_EFI_RCI2_TABLE
527 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
529 #ifdef CONFIG_LOAD_UEFI_KEYS
530 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
535 static __init int match_config_table(const efi_guid_t *guid,
537 const efi_config_table_type_t *table_types)
541 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
542 if (!efi_guidcmp(*guid, table_types[i].guid)) {
543 *(table_types[i].ptr) = table;
544 if (table_types[i].name[0])
546 table_types[i].name, table);
554 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
556 const efi_config_table_type_t *arch_tables)
558 const efi_config_table_64_t *tbl64 = (void *)config_tables;
559 const efi_config_table_32_t *tbl32 = (void *)config_tables;
560 const efi_guid_t *guid;
565 for (i = 0; i < count; i++) {
566 if (!IS_ENABLED(CONFIG_X86)) {
567 guid = &config_tables[i].guid;
568 table = (unsigned long)config_tables[i].table;
569 } else if (efi_enabled(EFI_64BIT)) {
570 guid = &tbl64[i].guid;
571 table = tbl64[i].table;
573 if (IS_ENABLED(CONFIG_X86_32) &&
574 tbl64[i].table > U32_MAX) {
576 pr_err("Table located above 4GB, disabling EFI.\n");
580 guid = &tbl32[i].guid;
581 table = tbl32[i].table;
584 if (!match_config_table(guid, table, common_tables) && arch_tables)
585 match_config_table(guid, table, arch_tables);
588 set_bit(EFI_CONFIG_TABLES, &efi.flags);
590 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
591 struct linux_efi_random_seed *seed;
594 seed = early_memremap(efi_rng_seed, sizeof(*seed));
596 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
597 early_memunmap(seed, sizeof(*seed));
599 pr_err("Could not map UEFI random seed!\n");
602 seed = early_memremap(efi_rng_seed,
603 sizeof(*seed) + size);
605 pr_notice("seeding entropy pool\n");
606 add_bootloader_randomness(seed->bits, size);
607 early_memunmap(seed, sizeof(*seed) + size);
609 pr_err("Could not map UEFI random seed!\n");
614 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
617 efi_tpm_eventlog_init();
619 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
620 unsigned long prsv = mem_reserve;
623 struct linux_efi_memreserve *rsv;
627 * Just map a full page: that is what we will get
628 * anyway, and it permits us to map the entire entry
629 * before knowing its size.
631 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
634 pr_err("Could not map UEFI memreserve entry!\n");
638 rsv = (void *)(p + prsv % PAGE_SIZE);
640 /* reserve the entry itself */
641 memblock_reserve(prsv,
642 struct_size(rsv, entry, rsv->size));
644 for (i = 0; i < atomic_read(&rsv->count); i++) {
645 memblock_reserve(rsv->entry[i].base,
650 early_memunmap(p, PAGE_SIZE);
654 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
655 efi_rt_properties_table_t *tbl;
657 tbl = early_memremap(rt_prop, sizeof(*tbl));
659 efi.runtime_supported_mask &= tbl->runtime_services_supported;
660 early_memunmap(tbl, sizeof(*tbl));
667 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
668 int min_major_version)
670 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
671 pr_err("System table signature incorrect!\n");
675 if ((systab_hdr->revision >> 16) < min_major_version)
676 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
677 systab_hdr->revision >> 16,
678 systab_hdr->revision & 0xffff,
685 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
688 const efi_char16_t *ret;
690 ret = early_memremap_ro(fw_vendor, size);
692 pr_err("Could not map the firmware vendor!\n");
696 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
698 early_memunmap((void *)fw_vendor, size);
701 #define map_fw_vendor(p, s) __va(p)
702 #define unmap_fw_vendor(v, s)
705 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
706 unsigned long fw_vendor)
708 char vendor[100] = "unknown";
709 const efi_char16_t *c16;
712 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
714 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
718 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
721 pr_info("EFI v%u.%.02u by %s\n",
722 systab_hdr->revision >> 16,
723 systab_hdr->revision & 0xffff,
726 if (IS_ENABLED(CONFIG_X86_64) &&
727 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
728 !strcmp(vendor, "Apple")) {
729 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
730 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
734 static __initdata char memory_type_name[][13] = {
752 char * __init efi_md_typeattr_format(char *buf, size_t size,
753 const efi_memory_desc_t *md)
760 if (md->type >= ARRAY_SIZE(memory_type_name))
761 type_len = snprintf(pos, size, "[type=%u", md->type);
763 type_len = snprintf(pos, size, "[%-*s",
764 (int)(sizeof(memory_type_name[0]) - 1),
765 memory_type_name[md->type]);
766 if (type_len >= size)
772 attr = md->attribute;
773 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
774 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
775 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
776 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
777 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
778 snprintf(pos, size, "|attr=0x%016llx]",
779 (unsigned long long)attr);
782 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
783 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
784 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
785 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
786 attr & EFI_MEMORY_SP ? "SP" : "",
787 attr & EFI_MEMORY_NV ? "NV" : "",
788 attr & EFI_MEMORY_XP ? "XP" : "",
789 attr & EFI_MEMORY_RP ? "RP" : "",
790 attr & EFI_MEMORY_WP ? "WP" : "",
791 attr & EFI_MEMORY_RO ? "RO" : "",
792 attr & EFI_MEMORY_UCE ? "UCE" : "",
793 attr & EFI_MEMORY_WB ? "WB" : "",
794 attr & EFI_MEMORY_WT ? "WT" : "",
795 attr & EFI_MEMORY_WC ? "WC" : "",
796 attr & EFI_MEMORY_UC ? "UC" : "");
801 * IA64 has a funky EFI memory map that doesn't work the same way as
802 * other architectures.
806 * efi_mem_attributes - lookup memmap attributes for physical address
807 * @phys_addr: the physical address to lookup
809 * Search in the EFI memory map for the region covering
810 * @phys_addr. Returns the EFI memory attributes if the region
811 * was found in the memory map, 0 otherwise.
813 u64 efi_mem_attributes(unsigned long phys_addr)
815 efi_memory_desc_t *md;
817 if (!efi_enabled(EFI_MEMMAP))
820 for_each_efi_memory_desc(md) {
821 if ((md->phys_addr <= phys_addr) &&
822 (phys_addr < (md->phys_addr +
823 (md->num_pages << EFI_PAGE_SHIFT))))
824 return md->attribute;
830 * efi_mem_type - lookup memmap type for physical address
831 * @phys_addr: the physical address to lookup
833 * Search in the EFI memory map for the region covering @phys_addr.
834 * Returns the EFI memory type if the region was found in the memory
835 * map, -EINVAL otherwise.
837 int efi_mem_type(unsigned long phys_addr)
839 const efi_memory_desc_t *md;
841 if (!efi_enabled(EFI_MEMMAP))
844 for_each_efi_memory_desc(md) {
845 if ((md->phys_addr <= phys_addr) &&
846 (phys_addr < (md->phys_addr +
847 (md->num_pages << EFI_PAGE_SHIFT))))
854 int efi_status_to_err(efi_status_t status)
862 case EFI_INVALID_PARAMETER:
865 case EFI_OUT_OF_RESOURCES:
868 case EFI_DEVICE_ERROR:
871 case EFI_WRITE_PROTECTED:
874 case EFI_SECURITY_VIOLATION:
890 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
891 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
893 static int __init efi_memreserve_map_root(void)
895 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
898 efi_memreserve_root = memremap(mem_reserve,
899 sizeof(*efi_memreserve_root),
901 if (WARN_ON_ONCE(!efi_memreserve_root))
906 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
908 struct resource *res, *parent;
911 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
915 res->name = "reserved";
916 res->flags = IORESOURCE_MEM;
918 res->end = addr + size - 1;
920 /* we expect a conflict with a 'System RAM' region */
921 parent = request_resource_conflict(&iomem_resource, res);
922 ret = parent ? request_resource(parent, res) : 0;
925 * Given that efi_mem_reserve_iomem() can be called at any
926 * time, only call memblock_reserve() if the architecture
927 * keeps the infrastructure around.
929 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
930 memblock_reserve(addr, size);
935 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
937 struct linux_efi_memreserve *rsv;
941 if (efi_memreserve_root == (void *)ULONG_MAX)
944 if (!efi_memreserve_root) {
945 rc = efi_memreserve_map_root();
950 /* first try to find a slot in an existing linked list entry */
951 for (prsv = efi_memreserve_root->next; prsv; ) {
952 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
953 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
954 if (index < rsv->size) {
955 rsv->entry[index].base = addr;
956 rsv->entry[index].size = size;
959 return efi_mem_reserve_iomem(addr, size);
965 /* no slot found - allocate a new linked list entry */
966 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
970 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
972 free_page((unsigned long)rsv);
977 * The memremap() call above assumes that a linux_efi_memreserve entry
978 * never crosses a page boundary, so let's ensure that this remains true
979 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
980 * using SZ_4K explicitly in the size calculation below.
982 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
983 atomic_set(&rsv->count, 1);
984 rsv->entry[0].base = addr;
985 rsv->entry[0].size = size;
987 spin_lock(&efi_mem_reserve_persistent_lock);
988 rsv->next = efi_memreserve_root->next;
989 efi_memreserve_root->next = __pa(rsv);
990 spin_unlock(&efi_mem_reserve_persistent_lock);
992 return efi_mem_reserve_iomem(addr, size);
995 static int __init efi_memreserve_root_init(void)
997 if (efi_memreserve_root)
999 if (efi_memreserve_map_root())
1000 efi_memreserve_root = (void *)ULONG_MAX;
1003 early_initcall(efi_memreserve_root_init);
1006 static int update_efi_random_seed(struct notifier_block *nb,
1007 unsigned long code, void *unused)
1009 struct linux_efi_random_seed *seed;
1012 if (!kexec_in_progress)
1015 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1017 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1020 pr_err("Could not map UEFI random seed!\n");
1023 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1027 get_random_bytes(seed->bits, seed->size);
1030 pr_err("Could not map UEFI random seed!\n");
1036 static struct notifier_block efi_random_seed_nb = {
1037 .notifier_call = update_efi_random_seed,
1040 static int __init register_update_efi_random_seed(void)
1042 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1044 return register_reboot_notifier(&efi_random_seed_nb);
1046 late_initcall(register_update_efi_random_seed);