xen/pvh: Piggyback on PVHVM for event channels (v2)
authorMukesh Rathor <mukesh.rathor@oracle.com>
Wed, 11 Dec 2013 20:36:51 +0000 (15:36 -0500)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Mon, 6 Jan 2014 15:44:15 +0000 (10:44 -0500)
PVH is a PV guest with a twist - there are certain things
that work in it like HVM and some like PV. There is
a similar mode - PVHVM where we run in HVM mode with
PV code enabled - and this patch explores that.

The most notable PV interfaces are the XenBus and event channels.

We will piggyback on how the event channel mechanism is
used in PVHVM - that is we want the normal native IRQ mechanism
and we will install a vector (hvm callback) for which we
will call the event channel mechanism.

This means that from a pvops perspective, we can use
native_irq_ops instead of the Xen PV specific. Albeit in the
future we could support pirq_eoi_map. But that is
a feature request that can be shared with PVHVM.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
arch/x86/xen/enlighten.c
arch/x86/xen/irq.c
drivers/xen/events/events_base.c

index 2eca618..a4e2f30 100644 (file)
@@ -1144,8 +1144,9 @@ void xen_setup_vcpu_info_placement(void)
                xen_vcpu_setup(cpu);
 
        /* xen_vcpu_setup managed to place the vcpu_info within the
-          percpu area for all cpus, so make use of it */
-       if (have_vcpu_info_placement) {
+        * percpu area for all cpus, so make use of it. Note that for
+        * PVH we want to use native IRQ mechanism. */
+       if (have_vcpu_info_placement && !xen_pvh_domain()) {
                pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
                pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
                pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
index 0da7f86..76ca326 100644 (file)
@@ -5,6 +5,7 @@
 #include <xen/interface/xen.h>
 #include <xen/interface/sched.h>
 #include <xen/interface/vcpu.h>
+#include <xen/features.h>
 #include <xen/events.h>
 
 #include <asm/xen/hypercall.h>
@@ -128,6 +129,8 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
 
 void __init xen_init_irq_ops(void)
 {
-       pv_irq_ops = xen_irq_ops;
+       /* For PVH we use default pv_irq_ops settings. */
+       if (!xen_feature(XENFEAT_hvm_callback_vector))
+               pv_irq_ops = xen_irq_ops;
        x86_init.irqs.intr_init = xen_init_IRQ;
 }
index 1d16185..4672e00 100644 (file)
@@ -1685,8 +1685,15 @@ void __init xen_init_IRQ(void)
        pirq_needs_eoi = pirq_needs_eoi_flag;
 
 #ifdef CONFIG_X86
-       if (xen_hvm_domain()) {
+       if (xen_pv_domain()) {
+               irq_ctx_init(smp_processor_id());
+               if (xen_initial_domain())
+                       pci_xen_initial_domain();
+       }
+       if (xen_feature(XENFEAT_hvm_callback_vector))
                xen_callback_vector();
+
+       if (xen_hvm_domain()) {
                native_init_IRQ();
                /* pci_xen_hvm_init must be called after native_init_IRQ so that
                 * __acpi_register_gsi can point at the right function */
@@ -1695,13 +1702,10 @@ void __init xen_init_IRQ(void)
                int rc;
                struct physdev_pirq_eoi_gmfn eoi_gmfn;
 
-               irq_ctx_init(smp_processor_id());
-               if (xen_initial_domain())
-                       pci_xen_initial_domain();
-
                pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
                eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
                rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
+               /* TODO: No PVH support for PIRQ EOI */
                if (rc != 0) {
                        free_page((unsigned long) pirq_eoi_map);
                        pirq_eoi_map = NULL;