Merge commit 'v2.6.38-rc4' into x86/cpu
authorIngo Molnar <mingo@elte.hu>
Mon, 14 Feb 2011 12:18:51 +0000 (13:18 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 14 Feb 2011 12:18:56 +0000 (13:18 +0100)
Merge reason: pick up the latest fixes.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
arch/x86/include/asm/smp.h
arch/x86/kernel/smpboot.c

@@@ -23,8 -23,6 +23,8 @@@ extern unsigned int num_processors
  
  DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
 +/* cpus sharing the last level cache: */
 +DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
  DECLARE_PER_CPU(u16, cpu_llc_id);
  DECLARE_PER_CPU(int, cpu_number);
  
@@@ -38,19 -36,11 +38,16 @@@ static inline struct cpumask *cpu_core_
        return per_cpu(cpu_core_map, cpu);
  }
  
 +static inline struct cpumask *cpu_llc_shared_mask(int cpu)
 +{
 +      return per_cpu(cpu_llc_shared_map, cpu);
 +}
 +
  DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
  DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
  
  /* Static state in head.S used to set up a CPU */
- extern struct {
-       void *sp;
-       unsigned short ss;
- } stack_start;
+ extern unsigned long stack_start; /* Initial stack pointer address */
  
  struct smp_ops {
        void (*smp_prepare_boot_cpu)(void);
@@@ -130,8 -130,6 +130,8 @@@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map)
  DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  
 +DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
 +
  /* Per CPU bogomips and other parameters */
  DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
  EXPORT_PER_CPU_SYMBOL(cpu_info);
@@@ -357,6 -355,23 +357,6 @@@ notrace static void __cpuinit start_sec
        cpu_idle();
  }
  
 -#ifdef CONFIG_CPUMASK_OFFSTACK
 -/* In this case, llc_shared_map is a pointer to a cpumask. */
 -static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
 -                                  const struct cpuinfo_x86 *src)
 -{
 -      struct cpumask *llc = dst->llc_shared_map;
 -      *dst = *src;
 -      dst->llc_shared_map = llc;
 -}
 -#else
 -static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
 -                                  const struct cpuinfo_x86 *src)
 -{
 -      *dst = *src;
 -}
 -#endif /* CONFIG_CPUMASK_OFFSTACK */
 -
  /*
   * The bootstrap kernel entry code has set these up. Save them for
   * a given CPU
@@@ -366,7 -381,7 +366,7 @@@ void __cpuinit smp_store_cpu_info(int i
  {
        struct cpuinfo_x86 *c = &cpu_data(id);
  
 -      copy_cpuinfo_x86(c, &boot_cpu_data);
 +      *c = boot_cpu_data;
        c->cpu_index = id;
        if (id != 0)
                identify_secondary_cpu(c);
  
  static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
  {
 -      struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
 -      struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
 -
        cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
        cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
        cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
        cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
 -      cpumask_set_cpu(cpu1, c2->llc_shared_map);
 -      cpumask_set_cpu(cpu2, c1->llc_shared_map);
 +      cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
 +      cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
  }
  
  
@@@ -407,7 -425,7 +407,7 @@@ void __cpuinit set_cpu_sibling_map(int 
                cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
        }
  
 -      cpumask_set_cpu(cpu, c->llc_shared_map);
 +      cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
  
        if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
                cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
        for_each_cpu(i, cpu_sibling_setup_mask) {
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
 -                      cpumask_set_cpu(i, c->llc_shared_map);
 -                      cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
 +                      cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
 +                      cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
                        cpumask_set_cpu(i, cpu_core_mask(cpu));
@@@ -458,7 -476,7 +458,7 @@@ const struct cpumask *cpu_coregroup_mas
            !(cpu_has(c, X86_FEATURE_AMD_DCM)))
                return cpu_core_mask(cpu);
        else
 -              return c->llc_shared_map;
 +              return cpu_llc_shared_mask(cpu);
  }
  
  static void impress_friends(void)
@@@ -620,7 -638,7 +620,7 @@@ wakeup_secondary_cpu_via_init(int phys_
         * target processor state.
         */
        startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
-                        (unsigned long)stack_start.sp);
+                        stack_start);
  
        /*
         * Run STARTUP IPI loop.
@@@ -767,7 -785,7 +767,7 @@@ do_rest
  #endif
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        initial_code = (unsigned long)start_secondary;
-       stack_start.sp = (void *) c_idle.idle->thread.sp;
+       stack_start  = c_idle.idle->thread.sp;
  
        /* start_ip had better be page-aligned! */
        start_ip = setup_trampoline();
@@@ -1071,13 -1089,13 +1071,13 @@@ void __init native_smp_prepare_cpus(uns
  
        preempt_disable();
        smp_cpu_index_default();
 -      memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
 -      cpumask_copy(cpu_callin_mask, cpumask_of(0));
 -      mb();
 +
        /*
         * Setup boot CPU information
         */
        smp_store_cpu_info(0); /* Final full version of the data */
 +      cpumask_copy(cpu_callin_mask, cpumask_of(0));
 +      mb();
  #ifdef CONFIG_X86_32
        boot_cpu_logical_apicid = logical_smp_processor_id();
  #endif
        for_each_possible_cpu(i) {
                zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
                zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
 -              zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
 +              zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
        }
        set_cpu_sibling_map(0);