KVM: arm64: pkvm: Unshare guest structs during teardown
authorQuentin Perret <qperret@google.com>
Wed, 15 Dec 2021 16:12:31 +0000 (16:12 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 16 Dec 2021 12:58:57 +0000 (12:58 +0000)
Make use of the newly introduced unshare hypercall during guest teardown
to unmap guest-related data structures from the hyp stage-1.

Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211215161232.1480836-15-qperret@google.com
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/reset.c

index cf858a7..9360a28 100644 (file)
@@ -321,6 +321,7 @@ struct kvm_vcpu_arch {
        struct kvm_guest_debug_arch external_debug_state;
 
        struct user_fpsimd_state *host_fpsimd_state;    /* hyp VA */
+       struct task_struct *parent_task;
 
        struct {
                /* {Break,watch}point registers */
@@ -737,6 +738,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
+void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
 
 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
 {
index 185d0f6..81839e9 100644 (file)
@@ -151,6 +151,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
 #include <asm/stage2_pgtable.h>
 
 int kvm_share_hyp(void *from, void *to);
+void kvm_unshare_hyp(void *from, void *to);
 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
                           void __iomem **kaddr,
index c202abb..6057f3c 100644 (file)
@@ -188,6 +188,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                }
        }
        atomic_set(&kvm->online_vcpus, 0);
+
+       kvm_unshare_hyp(kvm, kvm + 1);
 }
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
index 86899d3..2f48fd3 100644 (file)
 #include <asm/kvm_mmu.h>
 #include <asm/sysreg.h>
 
+void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
+{
+       struct task_struct *p = vcpu->arch.parent_task;
+       struct user_fpsimd_state *fpsimd;
+
+       if (!is_protected_kvm_enabled() || !p)
+               return;
+
+       fpsimd = &p->thread.uw.fpsimd_state;
+       kvm_unshare_hyp(fpsimd, fpsimd + 1);
+       put_task_struct(p);
+}
+
 /*
  * Called on entry to KVM_RUN unless this vcpu previously ran at least
  * once and the most recent prior KVM_RUN for this vcpu was called from
@@ -29,12 +42,27 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
 
        struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
 
+       kvm_vcpu_unshare_task_fp(vcpu);
+
        /* Make sure the host task fpsimd state is visible to hyp: */
        ret = kvm_share_hyp(fpsimd, fpsimd + 1);
-       if (!ret)
-               vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
+       if (ret)
+               return ret;
+
+       vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
+
+       /*
+        * We need to keep current's task_struct pinned until its data has been
+        * unshared with the hypervisor to make sure it is not re-used by the
+        * kernel and donated to someone else while already shared -- see
+        * kvm_vcpu_unshare_task_fp() for the matching put_task_struct().
+        */
+       if (is_protected_kvm_enabled()) {
+               get_task_struct(current);
+               vcpu->arch.parent_task = current;
+       }
 
-       return ret;
+       return 0;
 }
 
 /*
index cf6c52f..b1edcc9 100644 (file)
@@ -341,6 +341,32 @@ unlock:
        return ret;
 }
 
+static int unshare_pfn_hyp(u64 pfn)
+{
+       struct rb_node **node, *parent;
+       struct hyp_shared_pfn *this;
+       int ret = 0;
+
+       mutex_lock(&hyp_shared_pfns_lock);
+       this = find_shared_pfn(pfn, &node, &parent);
+       if (WARN_ON(!this)) {
+               ret = -ENOENT;
+               goto unlock;
+       }
+
+       this->count--;
+       if (this->count)
+               goto unlock;
+
+       rb_erase(&this->node, &hyp_shared_pfns);
+       kfree(this);
+       ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1);
+unlock:
+       mutex_unlock(&hyp_shared_pfns_lock);
+
+       return ret;
+}
+
 int kvm_share_hyp(void *from, void *to)
 {
        phys_addr_t start, end, cur;
@@ -373,6 +399,22 @@ int kvm_share_hyp(void *from, void *to)
        return 0;
 }
 
+void kvm_unshare_hyp(void *from, void *to)
+{
+       phys_addr_t start, end, cur;
+       u64 pfn;
+
+       if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
+               return;
+
+       start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
+       end = PAGE_ALIGN(__pa(to));
+       for (cur = start; cur < end; cur += PAGE_SIZE) {
+               pfn = __phys_to_pfn(cur);
+               WARN_ON(unshare_pfn_hyp(pfn));
+       }
+}
+
 /**
  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
  * @from:      The virtual kernel start address of the range
index e3e2a79..798a84e 100644 (file)
@@ -150,7 +150,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
 
 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
-       kfree(vcpu->arch.sve_state);
+       void *sve_state = vcpu->arch.sve_state;
+
+       kvm_vcpu_unshare_task_fp(vcpu);
+       kvm_unshare_hyp(vcpu, vcpu + 1);
+       if (sve_state)
+               kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
+       kfree(sve_state);
 }
 
 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)