nested_svm_vmexit(svm);
}
-static bool nested_exit_on_intr(struct vcpu_svm *svm)
-{
- return (svm->nested.intercept & 1ULL);
-}
-
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
- if (!gif_set(svm) ||
- (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
+ if (!gif_set(svm))
return true;
- if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK))
- return !(svm->vcpu.arch.hflags & HF_HIF_MASK);
- else
- return !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
+ if (is_guest_mode(vcpu)) {
+ /* As long as interrupts are being delivered... */
+ if ((svm->vcpu.arch.hflags & HF_VINTR_MASK)
+ ? !(svm->vcpu.arch.hflags & HF_HIF_MASK)
+ : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
+ return true;
+
+ /* ... vmexits aren't blocked by the interrupt shadow */
+ if (nested_exit_on_intr(svm))
+ return false;
+ } else {
+ if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
+ return true;
+ }
+
+ return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
}
static bool svm_interrupt_allowed(struct kvm_vcpu *vcpu)
return (svm->nested.intercept & (1ULL << INTERCEPT_SMI));
}
+static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
+{
+ return (svm->nested.intercept & (1ULL << INTERCEPT_INTR));
+}
+
static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
{
return (svm->nested.intercept & (1ULL << INTERCEPT_NMI));