xen/vcpu: Handle xen_vcpu_setup() failure at boot
authorAnkur Arora <ankur.a.arora@oracle.com>
Sat, 3 Jun 2017 00:06:02 +0000 (17:06 -0700)
committerJuergen Gross <jgross@suse.com>
Tue, 13 Jun 2017 14:10:58 +0000 (16:10 +0200)
On PVH, PVHVM, at failure in the VCPUOP_register_vcpu_info hypercall
we limit the number of cpus to to MAX_VIRT_CPUS. However, if this
failure had occurred for a cpu beyond MAX_VIRT_CPUS, we continue
to function with > MAX_VIRT_CPUS.

This leads to problems at the next save/restore cycle when there
are > MAX_VIRT_CPUS threads going into stop_machine() but coming
back up there's valid state for only the first MAX_VIRT_CPUS.

This patch pulls the excess CPUs down via cpu_down().

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
arch/x86/xen/smp.c
arch/x86/xen/smp.h
arch/x86/xen/smp_hvm.c
arch/x86/xen/smp_pv.c

index 82ac611..e7f02eb 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/smp.h>
+#include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
@@ -114,6 +115,36 @@ int xen_smp_intr_init(unsigned int cpu)
        return rc;
 }
 
+void __init xen_smp_cpus_done(unsigned int max_cpus)
+{
+       int cpu, rc, count = 0;
+
+       if (xen_hvm_domain())
+               native_smp_cpus_done(max_cpus);
+
+       if (xen_have_vcpu_info_placement)
+               return;
+
+       for_each_online_cpu(cpu) {
+               if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
+                       continue;
+
+               rc = cpu_down(cpu);
+
+               if (rc == 0) {
+                       /*
+                        * Reset vcpu_info so this cpu cannot be onlined again.
+                        */
+                       xen_vcpu_info_reset(cpu);
+                       count++;
+               } else {
+                       pr_warn("%s: failed to bring CPU %d down, error %d\n",
+                               __func__, cpu, rc);
+               }
+       }
+       WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
+}
+
 void xen_smp_send_reschedule(int cpu)
 {
        xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
index 8ebb6ac..87d3c76 100644 (file)
@@ -14,6 +14,8 @@ extern void xen_smp_intr_free(unsigned int cpu);
 int xen_smp_intr_init_pv(unsigned int cpu);
 void xen_smp_intr_free_pv(unsigned int cpu);
 
+void xen_smp_cpus_done(unsigned int max_cpus);
+
 void xen_smp_send_reschedule(int cpu);
 void xen_smp_send_call_function_ipi(const struct cpumask *mask);
 void xen_smp_send_call_function_single_ipi(int cpu);
index 6c8a805..fd60abe 100644 (file)
@@ -71,4 +71,5 @@ void __init xen_hvm_smp_init(void)
        smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
        smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
        smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
+       smp_ops.smp_cpus_done = xen_smp_cpus_done;
 }
index aae3253..1ea598e 100644 (file)
@@ -371,10 +371,6 @@ static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
        return 0;
 }
 
-static void xen_pv_smp_cpus_done(unsigned int max_cpus)
-{
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 static int xen_pv_cpu_disable(void)
 {
@@ -469,7 +465,7 @@ static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
 static const struct smp_ops xen_smp_ops __initconst = {
        .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
        .smp_prepare_cpus = xen_pv_smp_prepare_cpus,
-       .smp_cpus_done = xen_pv_smp_cpus_done,
+       .smp_cpus_done = xen_smp_cpus_done,
 
        .cpu_up = xen_pv_cpu_up,
        .cpu_die = xen_pv_cpu_die,