x86, apicv: add virtual interrupt delivery support
authorYang Zhang <yang.z.zhang@Intel.com>
Fri, 25 Jan 2013 02:18:51 +0000 (10:18 +0800)
committerGleb Natapov <gleb@redhat.com>
Tue, 29 Jan 2013 08:48:19 +0000 (10:48 +0200)
Virtual interrupt delivery avoids KVM to inject vAPIC interrupts
manually, which is fully taken care of by the hardware. This needs
some special awareness into existing interrupr injection path:

- for pending interrupt, instead of direct injection, we may need
  update architecture specific indicators before resuming to guest.

- A pending interrupt, which is masked by ISR, should be also
  considered in above update action, since hardware will decide
  when to inject it at right time. Current has_interrupt and
  get_interrupt only returns a valid vector from injection p.o.v.

Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
14 files changed:
arch/ia64/kvm/lapic.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/vmx.h
arch/x86/kvm/irq.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/ioapic.c
virt/kvm/ioapic.h
virt/kvm/irq_comm.c
virt/kvm/kvm_main.c

index c5f92a9..c3e2935 100644 (file)
@@ -27,4 +27,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
 #define kvm_apic_present(x) (true)
 #define kvm_lapic_enabled(x) (true)
 
+static inline bool kvm_apic_vid_enabled(void)
+{
+       /* IA64 has no apicv supporting, do nothing here */
+       return false;
+}
+
 #endif
index d42c283..635a74d 100644 (file)
@@ -699,6 +699,10 @@ struct kvm_x86_ops {
        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
        void (*enable_irq_window)(struct kvm_vcpu *vcpu);
        void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
+       int (*vm_has_apicv)(struct kvm *kvm);
+       void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
+       void (*hwapic_isr_update)(struct kvm *kvm, int isr);
+       void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
        void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
        int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
        int (*get_tdp_level)(void);
@@ -994,6 +998,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long hva);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
index 0a54df0..694586c 100644 (file)
@@ -62,6 +62,7 @@
 #define EXIT_REASON_MCE_DURING_VMENTRY  41
 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
 #define EXIT_REASON_APIC_ACCESS         44
+#define EXIT_REASON_EOI_INDUCED         45
 #define EXIT_REASON_EPT_VIOLATION       48
 #define EXIT_REASON_EPT_MISCONFIG       49
 #define EXIT_REASON_WBINVD              54
 #define SECONDARY_EXEC_WBINVD_EXITING          0x00000040
 #define SECONDARY_EXEC_UNRESTRICTED_GUEST      0x00000080
 #define SECONDARY_EXEC_APIC_REGISTER_VIRT       0x00000100
+#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY    0x00000200
 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING      0x00000400
 #define SECONDARY_EXEC_ENABLE_INVPCID          0x00001000
 
@@ -181,6 +183,7 @@ enum vmcs_field {
        GUEST_GS_SELECTOR               = 0x0000080a,
        GUEST_LDTR_SELECTOR             = 0x0000080c,
        GUEST_TR_SELECTOR               = 0x0000080e,
+       GUEST_INTR_STATUS               = 0x00000810,
        HOST_ES_SELECTOR                = 0x00000c00,
        HOST_CS_SELECTOR                = 0x00000c02,
        HOST_SS_SELECTOR                = 0x00000c04,
@@ -208,6 +211,14 @@ enum vmcs_field {
        APIC_ACCESS_ADDR_HIGH           = 0x00002015,
        EPT_POINTER                     = 0x0000201a,
        EPT_POINTER_HIGH                = 0x0000201b,
+       EOI_EXIT_BITMAP0                = 0x0000201c,
+       EOI_EXIT_BITMAP0_HIGH           = 0x0000201d,
+       EOI_EXIT_BITMAP1                = 0x0000201e,
+       EOI_EXIT_BITMAP1_HIGH           = 0x0000201f,
+       EOI_EXIT_BITMAP2                = 0x00002020,
+       EOI_EXIT_BITMAP2_HIGH           = 0x00002021,
+       EOI_EXIT_BITMAP3                = 0x00002022,
+       EOI_EXIT_BITMAP3_HIGH           = 0x00002023,
        GUEST_PHYSICAL_ADDRESS          = 0x00002400,
        GUEST_PHYSICAL_ADDRESS_HIGH     = 0x00002401,
        VMCS_LINK_POINTER               = 0x00002800,
index b111aee..484bc87 100644 (file)
@@ -38,6 +38,38 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
 
 /*
+ * check if there is pending interrupt from
+ * non-APIC source without intack.
+ */
+static int kvm_cpu_has_extint(struct kvm_vcpu *v)
+{
+       if (kvm_apic_accept_pic_intr(v))
+               return pic_irqchip(v->kvm)->output;     /* PIC */
+       else
+               return 0;
+}
+
+/*
+ * check if there is injectable interrupt:
+ * when virtual interrupt delivery enabled,
+ * interrupt from apic will handled by hardware,
+ * we don't need to check it here.
+ */
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+{
+       if (!irqchip_in_kernel(v->kvm))
+               return v->arch.interrupt.pending;
+
+       if (kvm_cpu_has_extint(v))
+               return 1;
+
+       if (kvm_apic_vid_enabled(v->kvm))
+               return 0;
+
+       return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
+}
+
+/*
  * check if there is pending interrupt without
  * intack.
  */
@@ -46,27 +78,41 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
        if (!irqchip_in_kernel(v->kvm))
                return v->arch.interrupt.pending;
 
-       if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output)
-               return pic_irqchip(v->kvm)->output;     /* PIC */
+       if (kvm_cpu_has_extint(v))
+               return 1;
 
        return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
 }
 EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
 
 /*
+ * Read pending interrupt(from non-APIC source)
+ * vector and intack.
+ */
+static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+{
+       if (kvm_cpu_has_extint(v))
+               return kvm_pic_read_irq(v->kvm); /* PIC */
+       return -1;
+}
+
+/*
  * Read pending interrupt vector and intack.
  */
 int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
 {
+       int vector;
+
        if (!irqchip_in_kernel(v->kvm))
                return v->arch.interrupt.nr;
 
-       if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output)
-               return kvm_pic_read_irq(v->kvm);        /* PIC */
+       vector = kvm_cpu_get_extint(v);
+
+       if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
+               return vector;                  /* PIC */
 
        return kvm_get_apic_interrupt(v);       /* APIC */
 }
-EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
 
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
 {
index f69fc50..02b51dd 100644 (file)
@@ -145,21 +145,51 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
        return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
 }
 
-static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
+void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+                               struct kvm_lapic_irq *irq,
+                               u64 *eoi_exit_bitmap)
 {
-       u16 cid;
-       ldr >>= 32 - map->ldr_bits;
-       cid = (ldr >> map->cid_shift) & map->cid_mask;
+       struct kvm_lapic **dst;
+       struct kvm_apic_map *map;
+       unsigned long bitmap = 1;
+       int i;
 
-       BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
+       rcu_read_lock();
+       map = rcu_dereference(vcpu->kvm->arch.apic_map);
 
-       return cid;
-}
+       if (unlikely(!map)) {
+               __set_bit(irq->vector, (unsigned long *)eoi_exit_bitmap);
+               goto out;
+       }
 
-static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
-{
-       ldr >>= (32 - map->ldr_bits);
-       return ldr & map->lid_mask;
+       if (irq->dest_mode == 0) { /* physical mode */
+               if (irq->delivery_mode == APIC_DM_LOWEST ||
+                               irq->dest_id == 0xff) {
+                       __set_bit(irq->vector,
+                                 (unsigned long *)eoi_exit_bitmap);
+                       goto out;
+               }
+               dst = &map->phys_map[irq->dest_id & 0xff];
+       } else {
+               u32 mda = irq->dest_id << (32 - map->ldr_bits);
+
+               dst = map->logical_map[apic_cluster_id(map, mda)];
+
+               bitmap = apic_logical_id(map, mda);
+       }
+
+       for_each_set_bit(i, &bitmap, 16) {
+               if (!dst[i])
+                       continue;
+               if (dst[i]->vcpu == vcpu) {
+                       __set_bit(irq->vector,
+                                 (unsigned long *)eoi_exit_bitmap);
+                       break;
+               }
+       }
+
+out:
+       rcu_read_unlock();
 }
 
 static void recalculate_apic_map(struct kvm *kvm)
@@ -225,6 +255,8 @@ out:
 
        if (old)
                kfree_rcu(old, rcu);
+
+       kvm_ioapic_make_eoibitmap_request(kvm);
 }
 
 static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
@@ -340,6 +372,10 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 {
        int result;
 
+       /*
+        * Note that irr_pending is just a hint. It will be always
+        * true with virtual interrupt delivery enabled.
+        */
        if (!apic->irr_pending)
                return -1;
 
@@ -456,6 +492,8 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
 {
        int result;
+
+       /* Note that isr_count is always 1 with vid enabled */
        if (!apic->isr_count)
                return -1;
        if (likely(apic->highest_isr_cache != -1))
@@ -735,6 +773,19 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
        return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
 }
 
+static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
+{
+       if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
+           kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+               int trigger_mode;
+               if (apic_test_vector(vector, apic->regs + APIC_TMR))
+                       trigger_mode = IOAPIC_LEVEL_TRIG;
+               else
+                       trigger_mode = IOAPIC_EDGE_TRIG;
+               kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+       }
+}
+
 static int apic_set_eoi(struct kvm_lapic *apic)
 {
        int vector = apic_find_highest_isr(apic);
@@ -751,19 +802,26 @@ static int apic_set_eoi(struct kvm_lapic *apic)
        apic_clear_isr(vector, apic);
        apic_update_ppr(apic);
 
-       if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
-           kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
-               int trigger_mode;
-               if (apic_test_vector(vector, apic->regs + APIC_TMR))
-                       trigger_mode = IOAPIC_LEVEL_TRIG;
-               else
-                       trigger_mode = IOAPIC_EDGE_TRIG;
-               kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
-       }
+       kvm_ioapic_send_eoi(apic, vector);
        kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
        return vector;
 }
 
+/*
+ * this interface assumes a trap-like exit, which has already finished
+ * desired side effect including vISR and vPPR update.
+ */
+void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
+{
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       trace_kvm_eoi(apic, vector);
+
+       kvm_ioapic_send_eoi(apic, vector);
+       kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
+
 static void apic_send_ipi(struct kvm_lapic *apic)
 {
        u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
@@ -1375,8 +1433,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
                apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
                apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
        }
-       apic->irr_pending = false;
-       apic->isr_count = 0;
+       apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
+       apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
        apic->highest_isr_cache = -1;
        update_divide_count(apic);
        atomic_set(&apic->lapic_timer.pending, 0);
@@ -1591,8 +1649,10 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
        update_divide_count(apic);
        start_apic_timer(apic);
        apic->irr_pending = true;
-       apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+       apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
+                               1 : count_vectors(apic->regs + APIC_ISR);
        apic->highest_isr_cache = -1;
+       kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 
index 22a5397..1676d34 100644 (file)
@@ -65,6 +65,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
 
 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
+void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
 
 void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
@@ -131,4 +132,30 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic)
        return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
 }
 
+static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
+{
+       return kvm_x86_ops->vm_has_apicv(kvm);
+}
+
+static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
+{
+       u16 cid;
+       ldr >>= 32 - map->ldr_bits;
+       cid = (ldr >> map->cid_shift) & map->cid_mask;
+
+       BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
+
+       return cid;
+}
+
+static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
+{
+       ldr >>= (32 - map->ldr_bits);
+       return ldr & map->lid_mask;
+}
+
+void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+                               struct kvm_lapic_irq *irq,
+                               u64 *eoi_bitmap);
+
 #endif
index 38407e9..e1b1ce2 100644 (file)
@@ -3576,6 +3576,21 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
        return;
 }
 
+static int svm_vm_has_apicv(struct kvm *kvm)
+{
+       return 0;
+}
+
+static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+       return;
+}
+
+static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
+{
+       return;
+}
+
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -4296,6 +4311,9 @@ static struct kvm_x86_ops svm_x86_ops = {
        .enable_irq_window = enable_irq_window,
        .update_cr8_intercept = update_cr8_intercept,
        .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
+       .vm_has_apicv = svm_vm_has_apicv,
+       .load_eoi_exitmap = svm_load_eoi_exitmap,
+       .hwapic_isr_update = svm_hwapic_isr_update,
 
        .set_tss_addr = svm_set_tss_addr,
        .get_tdp_level = get_npt_level,
index 3ce8a16..0cf74a6 100644 (file)
@@ -84,8 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
 static bool __read_mostly fasteoi = 1;
 module_param(fasteoi, bool, S_IRUGO);
 
-static bool __read_mostly enable_apicv_reg = 1;
-module_param(enable_apicv_reg, bool, S_IRUGO);
+static bool __read_mostly enable_apicv_reg_vid = 1;
+module_param(enable_apicv_reg_vid, bool, S_IRUGO);
 
 /*
  * If nested=1, nested virtualization is supported, i.e., guests may use
@@ -781,6 +781,12 @@ static inline bool cpu_has_vmx_apic_register_virt(void)
                SECONDARY_EXEC_APIC_REGISTER_VIRT;
 }
 
+static inline bool cpu_has_vmx_virtual_intr_delivery(void)
+{
+       return vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
+}
+
 static inline bool cpu_has_vmx_flexpriority(void)
 {
        return cpu_has_vmx_tpr_shadow() &&
@@ -2571,7 +2577,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                        SECONDARY_EXEC_PAUSE_LOOP_EXITING |
                        SECONDARY_EXEC_RDTSCP |
                        SECONDARY_EXEC_ENABLE_INVPCID |
-                       SECONDARY_EXEC_APIC_REGISTER_VIRT;
+                       SECONDARY_EXEC_APIC_REGISTER_VIRT |
+                       SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
@@ -2586,7 +2593,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
                _cpu_based_2nd_exec_control &= ~(
                                SECONDARY_EXEC_APIC_REGISTER_VIRT |
-                               SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
+                               SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+                               SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
 
        if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
                /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
@@ -2785,8 +2793,14 @@ static __init int hardware_setup(void)
        if (!cpu_has_vmx_ple())
                ple_gap = 0;
 
-       if (!cpu_has_vmx_apic_register_virt())
-               enable_apicv_reg = 0;
+       if (!cpu_has_vmx_apic_register_virt() ||
+                               !cpu_has_vmx_virtual_intr_delivery())
+               enable_apicv_reg_vid = 0;
+
+       if (enable_apicv_reg_vid)
+               kvm_x86_ops->update_cr8_intercept = NULL;
+       else
+               kvm_x86_ops->hwapic_irr_update = NULL;
 
        if (nested)
                nested_vmx_setup_ctls_msrs();
@@ -3928,6 +3942,11 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
        return exec_control;
 }
 
+static int vmx_vm_has_apicv(struct kvm *kvm)
+{
+       return enable_apicv_reg_vid && irqchip_in_kernel(kvm);
+}
+
 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
 {
        u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
@@ -3945,8 +3964,9 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
                exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
        if (!ple_gap)
                exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
-       if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm))
-               exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
+       if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
+               exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
+                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
        exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
        return exec_control;
 }
@@ -3992,6 +4012,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                                vmx_secondary_exec_control(vmx));
        }
 
+       if (enable_apicv_reg_vid) {
+               vmcs_write64(EOI_EXIT_BITMAP0, 0);
+               vmcs_write64(EOI_EXIT_BITMAP1, 0);
+               vmcs_write64(EOI_EXIT_BITMAP2, 0);
+               vmcs_write64(EOI_EXIT_BITMAP3, 0);
+
+               vmcs_write16(GUEST_INTR_STATUS, 0);
+       }
+
        if (ple_gap) {
                vmcs_write32(PLE_GAP, ple_gap);
                vmcs_write32(PLE_WINDOW, ple_window);
@@ -4906,6 +4935,16 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
        return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
+static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
+{
+       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       int vector = exit_qualification & 0xff;
+
+       /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
+       kvm_apic_set_eoi_accelerated(vcpu, vector);
+       return 1;
+}
+
 static int handle_apic_write(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -5851,6 +5890,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
        [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
        [EXIT_REASON_APIC_WRITE]              = handle_apic_write,
+       [EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
        [EXIT_REASON_WBINVD]                  = handle_wbinvd,
        [EXIT_REASON_XSETBV]                  = handle_xsetbv,
        [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
@@ -6208,7 +6248,8 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
         * There is not point to enable virtualize x2apic without enable
         * apicv
         */
-       if (!cpu_has_vmx_virtualize_x2apic_mode() || !enable_apicv_reg)
+       if (!cpu_has_vmx_virtualize_x2apic_mode() ||
+                               !vmx_vm_has_apicv(vcpu->kvm))
                return;
 
        if (!vm_need_tpr_shadow(vcpu->kvm))
@@ -6228,6 +6269,56 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
        vmx_set_msr_bitmap(vcpu);
 }
 
+static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
+{
+       u16 status;
+       u8 old;
+
+       if (!vmx_vm_has_apicv(kvm))
+               return;
+
+       if (isr == -1)
+               isr = 0;
+
+       status = vmcs_read16(GUEST_INTR_STATUS);
+       old = status >> 8;
+       if (isr != old) {
+               status &= 0xff;
+               status |= isr << 8;
+               vmcs_write16(GUEST_INTR_STATUS, status);
+       }
+}
+
+static void vmx_set_rvi(int vector)
+{
+       u16 status;
+       u8 old;
+
+       status = vmcs_read16(GUEST_INTR_STATUS);
+       old = (u8)status & 0xff;
+       if ((u8)vector != old) {
+               status &= ~0xff;
+               status |= (u8)vector;
+               vmcs_write16(GUEST_INTR_STATUS, status);
+       }
+}
+
+static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
+{
+       if (max_irr == -1)
+               return;
+
+       vmx_set_rvi(max_irr);
+}
+
+static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+       vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
+       vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
+       vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
+       vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+}
+
 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
 {
        u32 exit_intr_info;
@@ -7492,6 +7583,10 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .enable_irq_window = enable_irq_window,
        .update_cr8_intercept = update_cr8_intercept,
        .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
+       .vm_has_apicv = vmx_vm_has_apicv,
+       .load_eoi_exitmap = vmx_load_eoi_exitmap,
+       .hwapic_irr_update = vmx_hwapic_irr_update,
+       .hwapic_isr_update = vmx_hwapic_isr_update,
 
        .set_tss_addr = vmx_set_tss_addr,
        .get_tdp_level = get_ept_level,
@@ -7594,7 +7689,7 @@ static int __init vmx_init(void)
        memcpy(vmx_msr_bitmap_longmode_x2apic,
                        vmx_msr_bitmap_longmode, PAGE_SIZE);
 
-       if (enable_apicv_reg) {
+       if (enable_apicv_reg_vid) {
                for (msr = 0x800; msr <= 0x8ff; msr++)
                        vmx_disable_intercept_msr_read_x2apic(msr);
 
@@ -7606,6 +7701,10 @@ static int __init vmx_init(void)
                vmx_enable_intercept_msr_read_x2apic(0x839);
                /* TPR */
                vmx_disable_intercept_msr_write_x2apic(0x808);
+               /* EOI */
+               vmx_disable_intercept_msr_write_x2apic(0x80b);
+               /* SELF-IPI */
+               vmx_disable_intercept_msr_write_x2apic(0x83f);
        }
 
        if (enable_ept) {
index b9f5529..cf512e7 100644 (file)
@@ -5565,7 +5565,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
                        vcpu->arch.nmi_injected = true;
                        kvm_x86_ops->set_nmi(vcpu);
                }
-       } else if (kvm_cpu_has_interrupt(vcpu)) {
+       } else if (kvm_cpu_has_injectable_intr(vcpu)) {
                if (kvm_x86_ops->interrupt_allowed(vcpu)) {
                        kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
                                            false);
@@ -5633,6 +5633,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 #endif
 }
 
+static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
+{
+       u64 eoi_exit_bitmap[4];
+
+       memset(eoi_exit_bitmap, 0, 32);
+
+       kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
+       kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+}
+
 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 {
        int r;
@@ -5686,6 +5696,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_handle_pmu_event(vcpu);
                if (kvm_check_request(KVM_REQ_PMI, vcpu))
                        kvm_deliver_pmi(vcpu);
+               if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu))
+                       update_eoi_exitmap(vcpu);
        }
 
        if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -5694,10 +5706,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                /* enable NMI/IRQ window open exits if needed */
                if (vcpu->arch.nmi_pending)
                        kvm_x86_ops->enable_nmi_window(vcpu);
-               else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+               else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
                        kvm_x86_ops->enable_irq_window(vcpu);
 
                if (kvm_lapic_enabled(vcpu)) {
+                       /*
+                        * Update architecture specific hints for APIC
+                        * virtual interrupt delivery.
+                        */
+                       if (kvm_x86_ops->hwapic_irr_update)
+                               kvm_x86_ops->hwapic_irr_update(vcpu,
+                                       kvm_lapic_find_highest_irr(vcpu));
                        update_cr8_intercept(vcpu);
                        kvm_lapic_sync_to_vapic(vcpu);
                }
index 4dd7d75..0350e0d 100644 (file)
@@ -123,6 +123,7 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQ_MASTERCLOCK_UPDATE 19
 #define KVM_REQ_MCLOCK_INPROGRESS 20
 #define KVM_REQ_EPR_EXIT          21
+#define KVM_REQ_EOIBITMAP         22
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID            0
 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID       1
@@ -538,6 +539,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_flush_remote_tlbs(struct kvm *kvm);
 void kvm_reload_remote_mmus(struct kvm *kvm);
 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
+void kvm_make_update_eoibitmap_request(struct kvm *kvm);
 
 long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg);
@@ -691,6 +693,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
                int irq_source_id, int level);
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
                                   struct kvm_irq_ack_notifier *kian);
index f3abbef..ce82b94 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hrtimer.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/export.h>
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/current.h>
@@ -115,6 +116,42 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic)
        smp_wmb();
 }
 
+void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+                                       u64 *eoi_exit_bitmap)
+{
+       struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
+       union kvm_ioapic_redirect_entry *e;
+       struct kvm_lapic_irq irqe;
+       int index;
+
+       spin_lock(&ioapic->lock);
+       /* traverse ioapic entry to set eoi exit bitmap*/
+       for (index = 0; index < IOAPIC_NUM_PINS; index++) {
+               e = &ioapic->redirtbl[index];
+               if (!e->fields.mask &&
+                       (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+                        kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
+                                index))) {
+                       irqe.dest_id = e->fields.dest_id;
+                       irqe.vector = e->fields.vector;
+                       irqe.dest_mode = e->fields.dest_mode;
+                       irqe.delivery_mode = e->fields.delivery_mode << 8;
+                       kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap);
+               }
+       }
+       spin_unlock(&ioapic->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap);
+
+void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm)
+{
+       struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+       if (!kvm_apic_vid_enabled(kvm) || !ioapic)
+               return;
+       kvm_make_update_eoibitmap_request(kvm);
+}
+
 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 {
        unsigned index;
@@ -156,6 +193,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
                    && ioapic->irr & (1 << index))
                        ioapic_service(ioapic, index);
+               kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
                break;
        }
 }
@@ -455,6 +493,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
        spin_lock(&ioapic->lock);
        memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
        update_handled_vectors(ioapic);
+       kvm_ioapic_make_eoibitmap_request(kvm);
        spin_unlock(&ioapic->lock);
        return 0;
 }
index a30abfe..0400a46 100644 (file)
@@ -82,5 +82,9 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
                struct kvm_lapic_irq *irq);
 int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
+void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm);
+void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+                                       u64 *eoi_exit_bitmap);
+
 
 #endif
index 656fa45..ff6d40e 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
+#include <linux/export.h>
 #include <trace/events/kvm.h>
 
 #include <asm/msidef.h>
@@ -237,6 +238,28 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
        return ret;
 }
 
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+       struct kvm_irq_ack_notifier *kian;
+       struct hlist_node *n;
+       int gsi;
+
+       rcu_read_lock();
+       gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
+       if (gsi != -1)
+               hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
+                                        link)
+                       if (kian->gsi == gsi) {
+                               rcu_read_unlock();
+                               return true;
+                       }
+
+       rcu_read_unlock();
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
 {
        struct kvm_irq_ack_notifier *kian;
@@ -261,6 +284,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
        mutex_lock(&kvm->irq_lock);
        hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
        mutex_unlock(&kvm->irq_lock);
+       kvm_ioapic_make_eoibitmap_request(kvm);
 }
 
 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -270,6 +294,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
        hlist_del_init_rcu(&kian->link);
        mutex_unlock(&kvm->irq_lock);
        synchronize_rcu();
+       kvm_ioapic_make_eoibitmap_request(kvm);
 }
 
 int kvm_request_irq_source_id(struct kvm *kvm)
index 3fec2cd..abc23e2 100644 (file)
@@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)
        make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
 }
 
+void kvm_make_update_eoibitmap_request(struct kvm *kvm)
+{
+       make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
+}
+
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 {
        struct page *page;