KVM: MMU: Unify direct map 4K and large page paths
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / x86 / kvm / mmu.c
index 3da2508..3ee856f 100644 (file)
@@ -135,12 +135,7 @@ module_param(dbg, bool, 0644);
 #define ACC_USER_MASK    PT_USER_MASK
 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
 
-struct kvm_pv_mmu_op_buffer {
-       void *ptr;
-       unsigned len;
-       unsigned processed;
-       char buf[512] __aligned(sizeof(long));
-};
+#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
 
 struct kvm_rmap_desc {
        u64 *shadow_ptes[RMAP_EXT];
@@ -955,7 +950,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
                                rmap_remove(kvm, &pt[i]);
                        pt[i] = shadow_trap_nonpresent_pte;
                }
-               kvm_flush_remote_tlbs(kvm);
                return;
        }
 
@@ -974,7 +968,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
                }
                pt[i] = shadow_trap_nonpresent_pte;
        }
-       kvm_flush_remote_tlbs(kvm);
 }
 
 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
@@ -991,11 +984,10 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
                        kvm->vcpus[i]->arch.last_pte_updated = NULL;
 }
 
-static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        u64 *parent_pte;
 
-       ++kvm->stat.mmu_shadow_zapped;
        while (sp->multimapped || sp->parent_pte) {
                if (!sp->multimapped)
                        parent_pte = sp->parent_pte;
@@ -1010,19 +1002,23 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
                kvm_mmu_put_page(sp, parent_pte);
                set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
        }
+}
+
+static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       ++kvm->stat.mmu_shadow_zapped;
        kvm_mmu_page_unlink_children(kvm, sp);
+       kvm_mmu_unlink_parents(kvm, sp);
+       kvm_flush_remote_tlbs(kvm);
+       if (!sp->role.invalid && !sp->role.metaphysical)
+               unaccount_shadowed(kvm, sp->gfn);
        if (!sp->root_count) {
-               if (!sp->role.metaphysical && !sp->role.invalid)
-                       unaccount_shadowed(kvm, sp->gfn);
                hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
        } else {
-               int invalid = sp->role.invalid;
-               list_move(&sp->link, &kvm->arch.active_mmu_pages);
                sp->role.invalid = 1;
+               list_move(&sp->link, &kvm->arch.active_mmu_pages);
                kvm_reload_remote_mmus(kvm);
-               if (!sp->role.metaphysical && !invalid)
-                       unaccount_shadowed(kvm, sp->gfn);
        }
        kvm_mmu_reset_last_pte_updated(kvm);
 }
@@ -1244,15 +1240,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                ASSERT(VALID_PAGE(table_addr));
                table = __va(table_addr);
 
-               if (level == 1) {
-                       mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
-                                    0, write, 1, &pt_write, 0, gfn, pfn, false);
-                       return pt_write;
-               }
-
-               if (largepage && level == 2) {
+               if (level == 1 || (largepage && level == 2)) {
                        mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
-                                    0, write, 1, &pt_write, 1, gfn, pfn, false);
+                                    0, write, 1, &pt_write, largepage,
+                                    gfn, pfn, false);
                        return pt_write;
                }
 
@@ -1837,7 +1828,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        index = kvm_page_table_hashfn(gfn);
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
        hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
-               if (sp->gfn != gfn || sp->role.metaphysical)
+               if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
                        continue;
                pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
                misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
@@ -2291,18 +2282,18 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
                  gpa_t addr, unsigned long *ret)
 {
        int r;
-       struct kvm_pv_mmu_op_buffer buffer;
+       struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
 
-       buffer.ptr = buffer.buf;
-       buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
-       buffer.processed = 0;
+       buffer->ptr = buffer->buf;
+       buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
+       buffer->processed = 0;
 
-       r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
+       r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
        if (r)
                goto out;
 
-       while (buffer.len) {
-               r = kvm_pv_mmu_op_one(vcpu, &buffer);
+       while (buffer->len) {
+               r = kvm_pv_mmu_op_one(vcpu, buffer);
                if (r < 0)
                        goto out;
                if (r == 0)
@@ -2311,7 +2302,7 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
 
        r = 1;
 out:
-       *ret = buffer.processed;
+       *ret = buffer->processed;
        return r;
 }