#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
+#define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
((y) & VPN2_MASK & ~(x).tlb_mask))
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
bool user);
+void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
+void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
+
+enum kvm_mips_fault_result {
+ KVM_MIPS_MAPPED = 0,
+ KVM_MIPS_GVA,
+ KVM_MIPS_GPA,
+ KVM_MIPS_TLB,
+ KVM_MIPS_TLBINV,
+ KVM_MIPS_TLBMOD,
+};
+enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
+ unsigned long gva,
+ bool write);
/* Emulation */
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
local_irq_restore(flags);
}
+/**
+ * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
+ * @vcpu: Virtual CPU.
+ * @gva: Guest virtual address to be accessed.
+ * @write: True if write attempted (must be dirtied and made writable).
+ *
+ * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
+ * dirtying the page if @write so that guest instructions can be modified.
+ *
+ * Returns: KVM_MIPS_MAPPED on success.
+ * KVM_MIPS_GVA if bad guest virtual address.
+ * KVM_MIPS_GPA if bad guest physical address.
+ * KVM_MIPS_TLB if guest TLB not present.
+ * KVM_MIPS_TLBINV if guest TLB present but not valid.
+ * KVM_MIPS_TLBMOD if guest TLB read only.
+ */
+enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
+ unsigned long gva,
+ bool write)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb *tlb;
+ int index;
+
+ if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
+ if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu) < 0)
+ return KVM_MIPS_GPA;
+ } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
+ KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
+ /* Address should be in the guest TLB */
+ index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
+ if (index < 0)
+ return KVM_MIPS_TLB;
+ tlb = &vcpu->arch.guest_tlb[index];
+
+ /* Entry should be valid, and dirty for writes */
+ if (!TLB_IS_VALID(*tlb, gva))
+ return KVM_MIPS_TLBINV;
+ if (write && !TLB_IS_DIRTY(*tlb, gva))
+ return KVM_MIPS_TLBMOD;
+
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva))
+ return KVM_MIPS_GPA;
+ } else {
+ return KVM_MIPS_GVA;
+ }
+
+ return KVM_MIPS_MAPPED;
+}
+
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
{
int err;
}
}
+/**
+ * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
+ * @vcpu: VCPU pointer.
+ *
+ * Call before a GVA space access outside of guest mode, to ensure that
+ * asynchronous TLB flush requests are handled or delayed until completion of
+ * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
+ *
+ * Should be called with IRQs already enabled.
+ */
+void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
+{
+ /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
+ WARN_ON_ONCE(irqs_disabled());
+
+ /*
+ * The caller is about to access the GVA space, so we set the mode to
+ * force TLB flush requests to send an IPI, and also disable IRQs to
+ * delay IPI handling until kvm_trap_emul_gva_lockless_end().
+ */
+ local_irq_disable();
+
+ /*
+ * Make sure the read of VCPU requests is not reordered ahead of the
+ * write to vcpu->mode, or we could miss a TLB flush request while
+ * the requester sees the VCPU as outside of guest mode and not needing
+ * an IPI.
+ */
+ smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
+
+ /*
+ * If a TLB flush has been requested (potentially while
+ * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
+ * before accessing the GVA space, and be sure to reload the ASID if
+ * necessary as it'll be immediately used.
+ *
+ * TLB flush requests after this check will trigger an IPI due to the
+ * mode change above, which will be delayed due to IRQs disabled.
+ */
+ kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
+}
+
+/**
+ * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
+ * @vcpu: VCPU pointer.
+ *
+ * Called after a GVA space access outside of guest mode. Should have a matching
+ * call to kvm_trap_emul_gva_lockless_begin().
+ */
+void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Make sure the write to vcpu->mode is not reordered in front of GVA
+ * accesses, or a TLB flush requester may not think it necessary to send
+ * an IPI.
+ */
+ smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
+
+ /*
+ * Now that the access to GVA space is complete, its safe for pending
+ * TLB flush request IPIs to be handled (which indicates completion).
+ */
+ local_irq_enable();
+}
+
static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
struct kvm_vcpu *vcpu)
{