struct x86_exception *exception);
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
- int (*check_nested_events)(struct kvm_vcpu *vcpu);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
+ const struct kvm_x86_nested_ops *nested_ops;
/*
* Architecture specific hooks for vCPU blocking due to
void (*setup_mce)(struct kvm_vcpu *vcpu);
- int (*get_nested_state)(struct kvm_vcpu *vcpu,
- struct kvm_nested_state __user *user_kvm_nested_state,
- unsigned user_data_size);
- int (*set_nested_state)(struct kvm_vcpu *vcpu,
- struct kvm_nested_state __user *user_kvm_nested_state,
- struct kvm_nested_state *kvm_state);
- bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
-
int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
- int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
- uint16_t *vmcs_version);
- uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
-
bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
};
+struct kvm_x86_nested_ops {
+ int (*check_events)(struct kvm_vcpu *vcpu);
+ int (*get_state)(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+ unsigned user_data_size);
+ int (*set_state)(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+ struct kvm_nested_state *kvm_state);
+ bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+
+ int (*enable_evmcs)(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version);
+ uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
+};
+
struct kvm_x86_init_ops {
int (*cpu_has_kvm_support)(void);
int (*disabled_by_bios)(void);
};
int i, nent = ARRAY_SIZE(cpuid_entries);
- if (kvm_x86_ops.nested_get_evmcs_version)
- evmcs_ver = kvm_x86_ops.nested_get_evmcs_version(vcpu);
+ if (kvm_x86_ops.nested_ops->get_evmcs_version)
+ evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
/* Skip NESTED_FEATURES if eVMCS is not supported */
if (!evmcs_ver)
return (svm->nested.intercept & 1ULL);
}
-int svm_check_nested_events(struct kvm_vcpu *vcpu)
+static int svm_check_nested_events(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
bool block_nested_events =
return NESTED_EXIT_CONTINUE;
}
+
+struct kvm_x86_nested_ops svm_nested_ops = {
+ .check_events = svm_check_nested_events,
+};
/*
* TODO: Last condition latch INIT signals on vCPU when
* vCPU is in guest-mode and vmcb12 defines intercept on INIT.
- * To properly emulate the INIT intercept, SVM should implement
- * kvm_x86_ops.check_nested_events() and call nested_svm_vmexit()
- * there if an INIT signal is pending.
+ * To properly emulate the INIT intercept,
+ * svm_check_nested_events() should call nested_svm_vmexit()
+ * if an INIT signal is pending.
*/
return !gif_set(svm) ||
(svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
.sched_in = svm_sched_in,
.pmu_ops = &amd_pmu_ops,
+ .nested_ops = &svm_nested_ops,
+
.deliver_posted_interrupt = svm_deliver_avic_intr,
.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
.update_pi_irte = svm_update_pi_irte,
.mem_enc_reg_region = svm_register_enc_region,
.mem_enc_unreg_region = svm_unregister_enc_region,
- .nested_enable_evmcs = NULL,
- .nested_get_evmcs_version = NULL,
-
.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
-
- .check_nested_events = svm_check_nested_events,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
int nested_svm_check_permissions(struct vcpu_svm *svm);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
-int svm_check_nested_events(struct kvm_vcpu *vcpu);
int nested_svm_exit_special(struct vcpu_svm *svm);
+extern struct kvm_x86_nested_ops svm_nested_ops;
+
/* avic.c */
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
- ops->check_nested_events = vmx_check_nested_events;
- ops->get_nested_state = vmx_get_nested_state;
- ops->set_nested_state = vmx_set_nested_state;
- ops->get_vmcs12_pages = nested_get_vmcs12_pages;
- ops->nested_enable_evmcs = nested_enable_evmcs;
- ops->nested_get_evmcs_version = nested_get_evmcs_version;
-
return 0;
}
+
+struct kvm_x86_nested_ops vmx_nested_ops = {
+ .check_events = vmx_check_nested_events,
+ .get_state = vmx_get_nested_state,
+ .set_state = vmx_set_nested_state,
+ .get_vmcs12_pages = nested_get_vmcs12_pages,
+ .enable_evmcs = nested_enable_evmcs,
+ .get_evmcs_version = nested_get_evmcs_version,
+};
#define nested_guest_cr4_valid nested_cr4_valid
#define nested_host_cr4_valid nested_cr4_valid
+extern struct kvm_x86_nested_ops vmx_nested_ops;
+
#endif /* __KVM_X86_VMX_NESTED_H */
.post_block = vmx_post_block,
.pmu_ops = &intel_pmu_ops,
+ .nested_ops = &vmx_nested_ops,
.update_pi_irte = vmx_update_pi_irte,
.pre_leave_smm = vmx_pre_leave_smm,
.enable_smi_window = enable_smi_window,
- .check_nested_events = NULL,
- .get_nested_state = NULL,
- .set_nested_state = NULL,
- .get_vmcs12_pages = NULL,
- .nested_enable_evmcs = NULL,
- .nested_get_evmcs_version = NULL,
.need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
};
r = KVM_X2APIC_API_VALID_FLAGS;
break;
case KVM_CAP_NESTED_STATE:
- r = kvm_x86_ops.get_nested_state ?
- kvm_x86_ops.get_nested_state(NULL, NULL, 0) : 0;
+ r = kvm_x86_ops.nested_ops->get_state ?
+ kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
break;
case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
r = kvm_x86_ops.enable_direct_tlbflush != NULL;
break;
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
- r = kvm_x86_ops.nested_enable_evmcs != NULL;
+ r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
break;
default:
break;
return kvm_hv_activate_synic(vcpu, cap->cap ==
KVM_CAP_HYPERV_SYNIC2);
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
- if (!kvm_x86_ops.nested_enable_evmcs)
+ if (!kvm_x86_ops.nested_ops->enable_evmcs)
return -ENOTTY;
- r = kvm_x86_ops.nested_enable_evmcs(vcpu, &vmcs_version);
+ r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
if (!r) {
user_ptr = (void __user *)(uintptr_t)cap->args[0];
if (copy_to_user(user_ptr, &vmcs_version,
u32 user_data_size;
r = -EINVAL;
- if (!kvm_x86_ops.get_nested_state)
+ if (!kvm_x86_ops.nested_ops->get_state)
break;
BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
if (get_user(user_data_size, &user_kvm_nested_state->size))
break;
- r = kvm_x86_ops.get_nested_state(vcpu, user_kvm_nested_state,
- user_data_size);
+ r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
+ user_data_size);
if (r < 0)
break;
int idx;
r = -EINVAL;
- if (!kvm_x86_ops.set_nested_state)
+ if (!kvm_x86_ops.nested_ops->set_state)
break;
r = -EFAULT;
break;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvm_x86_ops.set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
+ r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
* from L2 to L1.
*/
if (is_guest_mode(vcpu)) {
- r = kvm_x86_ops.check_nested_events(vcpu);
+ r = kvm_x86_ops.nested_ops->check_events(vcpu);
if (r != 0)
return r;
}
* KVM_REQ_EVENT only on certain events and not unconditionally?
*/
if (is_guest_mode(vcpu)) {
- r = kvm_x86_ops.check_nested_events(vcpu);
+ r = kvm_x86_ops.nested_ops->check_events(vcpu);
if (r != 0)
return r;
}
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
- if (unlikely(!kvm_x86_ops.get_vmcs12_pages(vcpu))) {
+ if (unlikely(!kvm_x86_ops.nested_ops->get_vmcs12_pages(vcpu))) {
r = 0;
goto out;
}
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu))
- kvm_x86_ops.check_nested_events(vcpu);
+ kvm_x86_ops.nested_ops->check_events(vcpu);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);