gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
struct gfn_to_hva_cache data;
u64 msr_val;
+ u32 id;
} apf;
};
};
struct kvm_arch_async_pf {
+ u32 token;
gfn_t gfn;
};
struct kvm_async_pf *work);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
#endif /* _ASM_X86_KVM_HOST_H */
}
}
+static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+{
+
+ return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+ sizeof(val));
+}
+
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
- trace_kvm_async_pf_not_present(work->gva);
-
- kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+ trace_kvm_async_pf_not_present(work->arch.token, work->gva);
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
+
+ if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
+ kvm_x86_ops->get_cpl(vcpu) == 0)
+ kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+ else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+ vcpu->arch.fault.error_code = 0;
+ vcpu->arch.fault.address = work->arch.token;
+ kvm_inject_page_fault(vcpu);
+ }
}
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
- trace_kvm_async_pf_ready(work->gva);
- kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+ trace_kvm_async_pf_ready(work->arch.token, work->gva);
+ if (is_error_page(work->page))
+ work->arch.token = ~0; /* broadcast wakeup */
+ else
+ kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+
+ if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+ !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+ vcpu->arch.fault.error_code = 0;
+ vcpu->arch.fault.address = work->arch.token;
+ kvm_inject_page_fault(vcpu);
+ }
+}
+
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+ return true;
+ else
+ return !kvm_event_needs_reinjection(vcpu) &&
+ kvm_x86_ops->interrupt_allowed(vcpu);
}
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
TRACE_EVENT(
kvm_async_pf_not_present,
- TP_PROTO(u64 gva),
- TP_ARGS(gva),
+ TP_PROTO(u64 token, u64 gva),
+ TP_ARGS(token, gva),
TP_STRUCT__entry(
+ __field(__u64, token)
__field(__u64, gva)
),
TP_fast_assign(
+ __entry->token = token;
__entry->gva = gva;
),
- TP_printk("gva %#llx not present", __entry->gva)
+ TP_printk("token %#llx gva %#llx not present", __entry->token,
+ __entry->gva)
);
TRACE_EVENT(
kvm_async_pf_ready,
- TP_PROTO(u64 gva),
- TP_ARGS(gva),
+ TP_PROTO(u64 token, u64 gva),
+ TP_ARGS(token, gva),
TP_STRUCT__entry(
+ __field(__u64, token)
__field(__u64, gva)
),
TP_fast_assign(
+ __entry->token = token;
__entry->gva = gva;
),
- TP_printk("gva %#llx ready", __entry->gva)
+ TP_printk("token %#llx gva %#llx ready", __entry->token, __entry->gva)
);
TRACE_EVENT(