*/
static struct cont_desc {
struct microcode_amd *mc;
+ u32 cpuid_1_eax;
u32 psize;
u16 eq_id;
u8 *data;
struct equiv_cpu_entry *eq;
ssize_t orig_size = size;
u32 *hdr = (u32 *)ucode;
- u32 eax, ebx, ecx, edx;
u16 eq_id;
u8 *buf;
eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
- eax = 1;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
/* Find the equivalence ID of our CPU in this table: */
- eq_id = find_equiv_id(eq, eax);
+ eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
buf += hdr[2] + CONTAINER_HDR_SZ;
size -= hdr[2] + CONTAINER_HDR_SZ;
*
* Returns true if container found (sets @desc), false otherwise.
*/
-static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
- struct cont_desc *ret_desc)
+static bool
+apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
+ bool save_patch, struct cont_desc *ret_desc)
{
struct cont_desc desc = { 0 };
u8 (*patch)[PATCH_MAX_SIZE];
if (check_current_patch_level(&rev, true))
return false;
+ desc.cpuid_1_eax = cpuid_1_eax;
+
scan_containers(ucode, size, &desc);
if (!desc.eq_id)
return ret;
#endif
}
-void __init load_ucode_amd_bsp(unsigned int family)
+void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
{
struct ucode_cpu_info *uci;
- u32 eax, ebx, ecx, edx;
struct cpio_data cp;
const char *path;
bool use_pa;
use_pa = false;
}
- if (!get_builtin_microcode(&cp, family))
+ if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
cp = find_microcode_in_initrd(path, use_pa);
if (!(cp.data && cp.size))
return;
- /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
- eax = 1;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
- uci->cpu_sig.sig = eax;
+ /* Needed in load_microcode_amd() */
+ uci->cpu_sig.sig = cpuid_1_eax;
- apply_microcode_early_amd(cp.data, cp.size, true, NULL);
+ apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
}
#ifdef CONFIG_X86_32
* In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
* which is used upon resume from suspend.
*/
-void load_ucode_amd_ap(unsigned int family)
+void load_ucode_amd_ap(unsigned int cpuid_1_eax)
{
struct microcode_amd *mc;
struct cpio_data cp;
return;
}
- if (!get_builtin_microcode(&cp, family))
+ if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
if (!(cp.data && cp.size))
* This would set amd_ucode_patch above so that the following APs can
* use it directly instead of going down this path again.
*/
- apply_microcode_early_amd(cp.data, cp.size, true, NULL);
+ apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
}
#else
-void load_ucode_amd_ap(unsigned int family)
+void load_ucode_amd_ap(unsigned int cpuid_1_eax)
{
struct equiv_cpu_entry *eq;
struct microcode_amd *mc;
- u32 rev, eax;
+ u32 rev;
u16 eq_id;
/* 64-bit runs with paging enabled, thus early==false. */
return;
reget:
- if (!get_builtin_microcode(&cp, family)) {
+ if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) {
#ifdef CONFIG_BLK_DEV_INITRD
cp = find_cpio_data(ucode_path, (void *)initrd_start,
initrd_end - initrd_start, NULL);
}
}
- if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) {
+ if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) {
cont.data = NULL;
cont.size = -1;
return;
}
}
- eax = cpuid_eax(0x00000001);
eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
- eq_id = find_equiv_id(eq, eax);
+ eq_id = find_equiv_id(eq, cpuid_1_eax);
if (!eq_id)
return;
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
-int __init save_microcode_in_initrd_amd(unsigned int fam)
+int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
{
enum ucode_state ret;
int retval = 0;
return -EINVAL;
}
+ cont.cpuid_1_eax = cpuid_1_eax;
+
scan_containers(cp.data, cp.size, &cont);
if (!cont.eq_id) {
cont.size = -1;
return -EINVAL;
}
- ret = load_microcode_amd(smp_processor_id(), fam, cont.data, cont.size);
+ ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), cont.data, cont.size);
if (ret != UCODE_OK)
retval = -EINVAL;
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
-/*
- * Operations that are run on a target cpu:
- */
-
struct cpu_info_ctx {
struct cpu_signature *cpu_sig;
int err;
static bool __init check_loader_disabled_bsp(void)
{
static const char *__dis_opt_str = "dis_ucode_ldr";
- u32 a, b, c, d;
#ifdef CONFIG_X86_32
const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
if (!have_cpuid_p())
return *res;
- a = 1;
- c = 0;
- native_cpuid(&a, &b, &c, &d);
-
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
* that's good enough as they don't land on the BSP path anyway.
*/
- if (c & BIT(31))
+ if (native_cpuid_ecx(1) & BIT(31))
return *res;
if (cmdline_find_option_bool(cmdline, option) <= 0)
void __init load_ucode_bsp(void)
{
- int vendor;
- unsigned int family;
+ unsigned int vendor, cpuid_1_eax;
if (check_loader_disabled_bsp())
return;
- vendor = x86_cpuid_vendor();
- family = x86_cpuid_family();
+ vendor = x86_cpuid_vendor();
+ cpuid_1_eax = native_cpuid_eax(1);
switch (vendor) {
case X86_VENDOR_INTEL:
- if (family >= 6)
+ if (x86_family(cpuid_1_eax) >= 6)
load_ucode_intel_bsp();
break;
case X86_VENDOR_AMD:
- if (family >= 0x10)
- load_ucode_amd_bsp(family);
+ if (x86_family(cpuid_1_eax) >= 0x10)
+ load_ucode_amd_bsp(cpuid_1_eax);
break;
default:
break;
void load_ucode_ap(void)
{
- int vendor, family;
+ unsigned int vendor, cpuid_1_eax;
if (check_loader_disabled_ap())
return;
- vendor = x86_cpuid_vendor();
- family = x86_cpuid_family();
+ vendor = x86_cpuid_vendor();
+ cpuid_1_eax = native_cpuid_eax(1);
switch (vendor) {
case X86_VENDOR_INTEL:
- if (family >= 6)
+ if (x86_family(cpuid_1_eax) >= 6)
load_ucode_intel_ap();
break;
case X86_VENDOR_AMD:
- if (family >= 0x10)
- load_ucode_amd_ap(family);
+ if (x86_family(cpuid_1_eax) >= 0x10)
+ load_ucode_amd_ap(cpuid_1_eax);
break;
default:
break;
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
- return save_microcode_in_initrd_amd(c->x86);
+ return save_microcode_in_initrd_amd(cpuid_eax(1));
break;
default:
break;