From e8c10ef9dde3ab7b7d7db6804859d9daf38f01c4 Mon Sep 17 00:00:00 2001 From: "travis@sgi.com" Date: Wed, 30 Jan 2008 13:33:12 +0100 Subject: [PATCH] x86: change bios_cpu_apicid to percpu data variable Change static bios_cpu_apicid array to a per_cpu data variable. This includes using a static array used during initialization similar to the way x86_cpu_to_apicid[] is handled. There is one early use of bios_cpu_apicid in apic_is_clustered_box(). The other reference in cpu_present_to_apicid() is called after smp_set_apicids() has setup the percpu version of bios_cpu_apicid. [ mingo@elte.hu: build fix ] Signed-off-by: Mike Travis Reviewed-by: Christoph Lameter Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/apic_64.c | 16 ++++++++++++++-- arch/x86/kernel/mpparse_64.c | 17 ++++++++++++----- arch/x86/kernel/setup_64.c | 3 +++ arch/x86/kernel/smpboot_64.c | 7 +++++++ include/asm-x86/smp_64.h | 8 +++++--- 5 files changed, 41 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 01d4ca2..f9919c4 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -1180,14 +1180,26 @@ __cpuinit int apic_is_clustered_box(void) bitmap_zero(clustermap, NUM_APIC_CLUSTERS); for (i = 0; i < NR_CPUS; i++) { - id = bios_cpu_apicid[i]; + /* are we being called early in kernel startup? */ + if (x86_bios_cpu_apicid_early_ptr) { + id = ((u16 *)x86_bios_cpu_apicid_early_ptr)[i]; + } + else if (i < nr_cpu_ids) { + if (cpu_present(i)) + id = per_cpu(x86_bios_cpu_apicid, i); + else + continue; + } + else + break; + if (id != BAD_APICID) __set_bit(APIC_CLUSTERID(id), clustermap); } /* Problem: Partially populated chassis may not have CPUs in some of * the APIC clusters they have been allocated. Only present CPUs have - * bios_cpu_apicid entries, thus causing zeroes in the bitmap. Since + * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap. Since * clusters are allocated sequentially, count zeros only if they are * bounded by ones. */ diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 528ad96..fd67175 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -67,7 +67,11 @@ unsigned disabled_cpus __cpuinitdata; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE; -u16 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; +u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata + = { [0 ... NR_CPUS-1] = BAD_APICID }; +void *x86_bios_cpu_apicid_early_ptr; +DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); /* @@ -118,19 +122,22 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) physid_set(m->mpc_apicid, phys_cpu_present_map); if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { /* - * bios_cpu_apicid is required to have processors listed + * x86_bios_cpu_apicid is required to have processors listed * in same order as logical cpu numbers. Hence the first * entry is BSP, and so on. */ cpu = 0; } - bios_cpu_apicid[cpu] = m->mpc_apicid; /* are we being called early in kernel startup? */ if (x86_cpu_to_apicid_early_ptr) { - u16 *x86_cpu_to_apicid = (u16 *)x86_cpu_to_apicid_early_ptr; - x86_cpu_to_apicid[cpu] = m->mpc_apicid; + u16 *cpu_to_apicid = (u16 *)x86_cpu_to_apicid_early_ptr; + u16 *bios_cpu_apicid = (u16 *)x86_bios_cpu_apicid_early_ptr; + + cpu_to_apicid[cpu] = m->mpc_apicid; + bios_cpu_apicid[cpu] = m->mpc_apicid; } else { per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; + per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; } cpu_set(cpu, cpu_possible_map); diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 529e45c3..71a420c 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -362,8 +362,11 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_SMP /* setup to use the early static init tables during kernel startup */ x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init; +#ifdef CONFIG_NUMA x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init; #endif + x86_bios_cpu_apicid_early_ptr = (void *)&x86_bios_cpu_apicid_init; +#endif #ifdef CONFIG_ACPI /* diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index a8bc2bc..93071cd 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -864,8 +864,12 @@ void __init smp_set_apicids(void) if (per_cpu_offset(cpu)) { per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu]; +#ifdef CONFIG_NUMA per_cpu(x86_cpu_to_node_map, cpu) = x86_cpu_to_node_map_init[cpu]; +#endif + per_cpu(x86_bios_cpu_apicid, cpu) = + x86_bios_cpu_apicid_init[cpu]; } else printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", @@ -874,7 +878,10 @@ void __init smp_set_apicids(void) /* indicate the early static arrays are gone */ x86_cpu_to_apicid_early_ptr = NULL; +#ifdef CONFIG_NUMA x86_cpu_to_node_map_early_ptr = NULL; +#endif + x86_bios_cpu_apicid_early_ptr = NULL; } static void __init smp_cpu_index_default(void) diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 6fa332d..e0a7551 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -27,18 +27,20 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); extern u16 __initdata x86_cpu_to_apicid_init[]; +extern u16 __initdata x86_bios_cpu_apicid_init[]; extern void *x86_cpu_to_apicid_early_ptr; -extern u16 bios_cpu_apicid[]; +extern void *x86_bios_cpu_apicid_early_ptr; DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(u16, x86_cpu_to_apicid); +DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); static inline int cpu_present_to_apicid(int mps_cpu) { - if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; + if (cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; } -- 2.7.4