x86: minor adjustments for do_boot_cpu
authorGlauber de Oliveira Costa <gcosta@redhat.com>
Wed, 19 Mar 2008 17:25:51 +0000 (14:25 -0300)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:41:02 +0000 (17:41 +0200)
This patch provides minor adjustments for do_boot_cpus
in both architectures to allow for integration

Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/smpboot_32.c
arch/x86/kernel/smpboot_64.c

index bd2f886..5165b11 100644 (file)
@@ -556,7 +556,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
  * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
  */
 {
-       unsigned long boot_error;
+       unsigned long boot_error = 0;
        int timeout;
        unsigned long start_eip;
        unsigned short nmi_high = 0, nmi_low = 0;
@@ -566,11 +566,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
        };
        INIT_WORK(&c_idle.work, do_fork_idle);
 
-       /*
-        * Save current MTRR state in case it was changed since early boot
-        * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
-        */
-       mtrr_save_state();
+       alternatives_smp_switch(1);
 
        c_idle.idle = get_idle_for_cpu(cpu);
 
@@ -607,8 +603,6 @@ do_rest:
        /* start_eip had better be page-aligned! */
        start_eip = setup_trampoline();
 
-       alternatives_smp_switch(1);
-
        /* So we see what's up   */
        printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
        /* Stack for startup_32 can be just as for start_secondary onwards */
@@ -628,6 +622,12 @@ do_rest:
        store_NMI_vector(&nmi_high, &nmi_low);
 
        smpboot_setup_warm_reset_vector(start_eip);
+       /*
+        * Be paranoid about clearing APIC errors.
+        */
+       apic_write(APIC_ESR, 0);
+       apic_read(APIC_ESR);
+
 
        /*
         * Starting actual IPI sequence...
@@ -864,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
                return -EINVAL;
        }
 
+       /*
+        * Save current MTRR state in case it was changed since early boot
+        * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
+        */
+       mtrr_save_state();
+
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 
        __smp_prepare_cpu(cpu);
index e93fff4..7d1b4cb 100644 (file)
@@ -432,7 +432,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
  */
 static int __cpuinit do_boot_cpu(int cpu, int apicid)
 {
-       unsigned long boot_error;
+       unsigned long boot_error = 0;
        int timeout;
        unsigned long start_rip;
        struct create_idle c_idle = {
@@ -531,11 +531,6 @@ do_rest:
        apic_read(APIC_ESR);
 
        /*
-        * Status is now clean
-        */
-       boot_error = 0;
-
-       /*
         * Starting actual IPI sequence...
         */
        boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
@@ -564,7 +559,7 @@ do_rest:
                        print_cpu_info(&cpu_data(cpu));
                } else {
                        boot_error = 1;
-                       if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
+                       if (*((volatile unsigned char *)trampoline_base)
                                        == 0xA5)
                                /* trampoline started but...? */
                                printk("Stuck ??\n");
@@ -583,10 +578,12 @@ do_rest:
                cpu_clear(cpu, cpu_present_map);
                cpu_clear(cpu, cpu_possible_map);
                per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
-               return -EIO;
        }
 
-       return 0;
+       /* mark "stuck" area as not stuck */
+       *((volatile unsigned long *)trampoline_base) = 0;
+
+       return boot_error;
 }
 
 cycles_t cacheflush_time;