Return an actual bool for kvm_x86_ops' {interrupt_nmi}_allowed() hook to
better reflect the return semantics, and to avoid creating an even
bigger mess when the related VMX code is refactored in upcoming patches.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <
20200423022550.15113-5-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
void (*set_nmi)(struct kvm_vcpu *vcpu);
void (*queue_exception)(struct kvm_vcpu *vcpu);
void (*cancel_injection)(struct kvm_vcpu *vcpu);
- int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
- int (*nmi_allowed)(struct kvm_vcpu *vcpu);
+ bool (*interrupt_allowed)(struct kvm_vcpu *vcpu);
+ bool (*nmi_allowed)(struct kvm_vcpu *vcpu);
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*setup_mce)(struct kvm_vcpu *vcpu);
- int (*smi_allowed)(struct kvm_vcpu *vcpu);
+ bool (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
int (*enable_smi_window)(struct kvm_vcpu *vcpu);
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
-static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
+static bool svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
- int ret;
+ bool ret;
ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}
}
-static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
+static bool svm_interrupt_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
if (!gif_set(svm) ||
(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
- return 0;
+ return false;
if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK))
return !!(svm->vcpu.arch.hflags & HF_HIF_MASK);
vcpu->arch.mcg_cap &= 0x1ff;
}
-static int svm_smi_allowed(struct kvm_vcpu *vcpu)
+static bool svm_smi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
/* Per APM Vol.2 15.22.2 "Response to SMI" */
if (!gif_set(svm))
- return 0;
+ return false;
if (is_guest_mode(&svm->vcpu) &&
svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
/* TODO: Might need to set exit_info_1 and exit_info_2 here */
svm->vmcb->control.exit_code = SVM_EXIT_SMI;
svm->nested.exit_required = true;
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
}
}
-static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_nmi_allowed(struct kvm_vcpu *vcpu)
{
if (to_vmx(vcpu)->nested.nested_run_pending)
- return 0;
+ return false;
if (!enable_vnmi &&
to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
- return 0;
+ return false;
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_NMI));
}
-static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
if (to_vmx(vcpu)->nested.nested_run_pending)
return false;
~FEAT_CTL_LMCE_ENABLED;
}
-static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_smi_allowed(struct kvm_vcpu *vcpu)
{
/* we need a nested vmexit to enter SMM, postpone if run is pending */
if (to_vmx(vcpu)->nested.nested_run_pending)
- return 0;
- return 1;
+ return false;
+ return true;
}
static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)