KVM: SVM: Move SEV code to separate file
authorJoerg Roedel <jroedel@suse.de>
Tue, 24 Mar 2020 09:41:54 +0000 (10:41 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 3 Apr 2020 14:53:56 +0000 (10:53 -0400)
Move the SEV specific parts of svm.c into the new sev.c file.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
Message-Id: <20200324094154.32352-5-joro@8bytes.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/Makefile
arch/x86/kvm/svm/sev.c [new file with mode: 0644]
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 31aca07d8f12d2c007ae373d15e2a6e115fd55f6..e5a71aa0967bdeb5bebfc86b14bd3e56146d1c28 100644 (file)
@@ -14,7 +14,7 @@ kvm-y                 += x86.o emulate.o i8259.o irq.o lapic.o \
                           hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o
 
 kvm-intel-y            += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o
-kvm-amd-y              += svm/svm.o svm/pmu.o svm/nested.o svm/avic.o
+kvm-amd-y              += svm/svm.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
 
 obj-$(CONFIG_KVM)      += kvm.o
 obj-$(CONFIG_KVM_INTEL)        += kvm-intel.o
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
new file mode 100644 (file)
index 0000000..0e3fc31
--- /dev/null
@@ -0,0 +1,1187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ *
+ * AMD SVM-SEV support
+ *
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
+ */
+
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#include <linux/psp-sev.h>
+#include <linux/swap.h>
+
+#include "x86.h"
+#include "svm.h"
+
+static int sev_flush_asids(void);
+static DECLARE_RWSEM(sev_deactivate_lock);
+static DEFINE_MUTEX(sev_bitmap_lock);
+unsigned int max_sev_asid;
+static unsigned int min_sev_asid;
+static unsigned long *sev_asid_bitmap;
+static unsigned long *sev_reclaim_asid_bitmap;
+#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+
+struct enc_region {
+       struct list_head list;
+       unsigned long npages;
+       struct page **pages;
+       unsigned long uaddr;
+       unsigned long size;
+};
+
+static int sev_flush_asids(void)
+{
+       int ret, error = 0;
+
+       /*
+        * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
+        * so it must be guarded.
+        */
+       down_write(&sev_deactivate_lock);
+
+       wbinvd_on_all_cpus();
+       ret = sev_guest_df_flush(&error);
+
+       up_write(&sev_deactivate_lock);
+
+       if (ret)
+               pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
+
+       return ret;
+}
+
+/* Must be called with the sev_bitmap_lock held */
+static bool __sev_recycle_asids(void)
+{
+       int pos;
+
+       /* Check if there are any ASIDs to reclaim before performing a flush */
+       pos = find_next_bit(sev_reclaim_asid_bitmap,
+                           max_sev_asid, min_sev_asid - 1);
+       if (pos >= max_sev_asid)
+               return false;
+
+       if (sev_flush_asids())
+               return false;
+
+       bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
+                  max_sev_asid);
+       bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
+
+       return true;
+}
+
+static int sev_asid_new(void)
+{
+       bool retry = true;
+       int pos;
+
+       mutex_lock(&sev_bitmap_lock);
+
+       /*
+        * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
+        */
+again:
+       pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
+       if (pos >= max_sev_asid) {
+               if (retry && __sev_recycle_asids()) {
+                       retry = false;
+                       goto again;
+               }
+               mutex_unlock(&sev_bitmap_lock);
+               return -EBUSY;
+       }
+
+       __set_bit(pos, sev_asid_bitmap);
+
+       mutex_unlock(&sev_bitmap_lock);
+
+       return pos + 1;
+}
+
+static int sev_get_asid(struct kvm *kvm)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       return sev->asid;
+}
+
+static void sev_asid_free(int asid)
+{
+       struct svm_cpu_data *sd;
+       int cpu, pos;
+
+       mutex_lock(&sev_bitmap_lock);
+
+       pos = asid - 1;
+       __set_bit(pos, sev_reclaim_asid_bitmap);
+
+       for_each_possible_cpu(cpu) {
+               sd = per_cpu(svm_data, cpu);
+               sd->sev_vmcbs[pos] = NULL;
+       }
+
+       mutex_unlock(&sev_bitmap_lock);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
+       struct sev_data_decommission *decommission;
+       struct sev_data_deactivate *data;
+
+       if (!handle)
+               return;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return;
+
+       /* deactivate handle */
+       data->handle = handle;
+
+       /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
+       down_read(&sev_deactivate_lock);
+       sev_guest_deactivate(data, NULL);
+       up_read(&sev_deactivate_lock);
+
+       kfree(data);
+
+       decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
+       if (!decommission)
+               return;
+
+       /* decommission handle */
+       decommission->handle = handle;
+       sev_guest_decommission(decommission, NULL);
+
+       kfree(decommission);
+}
+
+static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       int asid, ret;
+
+       ret = -EBUSY;
+       if (unlikely(sev->active))
+               return ret;
+
+       asid = sev_asid_new();
+       if (asid < 0)
+               return ret;
+
+       ret = sev_platform_init(&argp->error);
+       if (ret)
+               goto e_free;
+
+       sev->active = true;
+       sev->asid = asid;
+       INIT_LIST_HEAD(&sev->regions_list);
+
+       return 0;
+
+e_free:
+       sev_asid_free(asid);
+       return ret;
+}
+
+static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
+{
+       struct sev_data_activate *data;
+       int asid = sev_get_asid(kvm);
+       int ret;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+       if (!data)
+               return -ENOMEM;
+
+       /* activate ASID on the given handle */
+       data->handle = handle;
+       data->asid   = asid;
+       ret = sev_guest_activate(data, error);
+       kfree(data);
+
+       return ret;
+}
+
+static int __sev_issue_cmd(int fd, int id, void *data, int *error)
+{
+       struct fd f;
+       int ret;
+
+       f = fdget(fd);
+       if (!f.file)
+               return -EBADF;
+
+       ret = sev_issue_cmd_external_user(f.file, id, data, error);
+
+       fdput(f);
+       return ret;
+}
+
+static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       return __sev_issue_cmd(sev->fd, id, data, error);
+}
+
+static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_launch_start *start;
+       struct kvm_sev_launch_start params;
+       void *dh_blob, *session_blob;
+       int *error = &argp->error;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+               return -EFAULT;
+
+       start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
+       if (!start)
+               return -ENOMEM;
+
+       dh_blob = NULL;
+       if (params.dh_uaddr) {
+               dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
+               if (IS_ERR(dh_blob)) {
+                       ret = PTR_ERR(dh_blob);
+                       goto e_free;
+               }
+
+               start->dh_cert_address = __sme_set(__pa(dh_blob));
+               start->dh_cert_len = params.dh_len;
+       }
+
+       session_blob = NULL;
+       if (params.session_uaddr) {
+               session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
+               if (IS_ERR(session_blob)) {
+                       ret = PTR_ERR(session_blob);
+                       goto e_free_dh;
+               }
+
+               start->session_address = __sme_set(__pa(session_blob));
+               start->session_len = params.session_len;
+       }
+
+       start->handle = params.handle;
+       start->policy = params.policy;
+
+       /* create memory encryption context */
+       ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
+       if (ret)
+               goto e_free_session;
+
+       /* Bind ASID to this guest */
+       ret = sev_bind_asid(kvm, start->handle, error);
+       if (ret)
+               goto e_free_session;
+
+       /* return handle to userspace */
+       params.handle = start->handle;
+       if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
+               sev_unbind_asid(kvm, start->handle);
+               ret = -EFAULT;
+               goto e_free_session;
+       }
+
+       sev->handle = start->handle;
+       sev->fd = argp->sev_fd;
+
+e_free_session:
+       kfree(session_blob);
+e_free_dh:
+       kfree(dh_blob);
+e_free:
+       kfree(start);
+       return ret;
+}
+
+static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+                                   unsigned long ulen, unsigned long *n,
+                                   int write)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       unsigned long npages, npinned, size;
+       unsigned long locked, lock_limit;
+       struct page **pages;
+       unsigned long first, last;
+
+       if (ulen == 0 || uaddr + ulen < uaddr)
+               return NULL;
+
+       /* Calculate number of pages. */
+       first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
+       last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
+       npages = (last - first + 1);
+
+       locked = sev->pages_locked + npages;
+       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+       if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
+               pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
+               return NULL;
+       }
+
+       /* Avoid using vmalloc for smaller buffers. */
+       size = npages * sizeof(struct page *);
+       if (size > PAGE_SIZE)
+               pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+                                 PAGE_KERNEL);
+       else
+               pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
+
+       if (!pages)
+               return NULL;
+
+       /* Pin the user virtual address. */
+       npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
+       if (npinned != npages) {
+               pr_err("SEV: Failure locking %lu pages.\n", npages);
+               goto err;
+       }
+
+       *n = npages;
+       sev->pages_locked = locked;
+
+       return pages;
+
+err:
+       if (npinned > 0)
+               release_pages(pages, npinned);
+
+       kvfree(pages);
+       return NULL;
+}
+
+static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
+                            unsigned long npages)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       release_pages(pages, npages);
+       kvfree(pages);
+       sev->pages_locked -= npages;
+}
+
+static void sev_clflush_pages(struct page *pages[], unsigned long npages)
+{
+       uint8_t *page_virtual;
+       unsigned long i;
+
+       if (npages == 0 || pages == NULL)
+               return;
+
+       for (i = 0; i < npages; i++) {
+               page_virtual = kmap_atomic(pages[i]);
+               clflush_cache_range(page_virtual, PAGE_SIZE);
+               kunmap_atomic(page_virtual);
+       }
+}
+
+static unsigned long get_num_contig_pages(unsigned long idx,
+                               struct page **inpages, unsigned long npages)
+{
+       unsigned long paddr, next_paddr;
+       unsigned long i = idx + 1, pages = 1;
+
+       /* find the number of contiguous pages starting from idx */
+       paddr = __sme_page_pa(inpages[idx]);
+       while (i < npages) {
+               next_paddr = __sme_page_pa(inpages[i++]);
+               if ((paddr + PAGE_SIZE) == next_paddr) {
+                       pages++;
+                       paddr = next_paddr;
+                       continue;
+               }
+               break;
+       }
+
+       return pages;
+}
+
+static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_launch_update_data params;
+       struct sev_data_launch_update_data *data;
+       struct page **inpages;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+               return -EFAULT;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+       if (!data)
+               return -ENOMEM;
+
+       vaddr = params.uaddr;
+       size = params.len;
+       vaddr_end = vaddr + size;
+
+       /* Lock the user memory. */
+       inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
+       if (!inpages) {
+               ret = -ENOMEM;
+               goto e_free;
+       }
+
+       /*
+        * The LAUNCH_UPDATE command will perform in-place encryption of the
+        * memory content (i.e it will write the same memory region with C=1).
+        * It's possible that the cache may contain the data with C=0, i.e.,
+        * unencrypted so invalidate it first.
+        */
+       sev_clflush_pages(inpages, npages);
+
+       for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
+               int offset, len;
+
+               /*
+                * If the user buffer is not page-aligned, calculate the offset
+                * within the page.
+                */
+               offset = vaddr & (PAGE_SIZE - 1);
+
+               /* Calculate the number of pages that can be encrypted in one go. */
+               pages = get_num_contig_pages(i, inpages, npages);
+
+               len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
+
+               data->handle = sev->handle;
+               data->len = len;
+               data->address = __sme_page_pa(inpages[i]) + offset;
+               ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
+               if (ret)
+                       goto e_unpin;
+
+               size -= len;
+               next_vaddr = vaddr + len;
+       }
+
+e_unpin:
+       /* content of memory is updated, mark pages dirty */
+       for (i = 0; i < npages; i++) {
+               set_page_dirty_lock(inpages[i]);
+               mark_page_accessed(inpages[i]);
+       }
+       /* unlock the user pages */
+       sev_unpin_memory(kvm, inpages, npages);
+e_free:
+       kfree(data);
+       return ret;
+}
+
+static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       void __user *measure = (void __user *)(uintptr_t)argp->data;
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_launch_measure *data;
+       struct kvm_sev_launch_measure params;
+       void __user *p = NULL;
+       void *blob = NULL;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&params, measure, sizeof(params)))
+               return -EFAULT;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+       if (!data)
+               return -ENOMEM;
+
+       /* User wants to query the blob length */
+       if (!params.len)
+               goto cmd;
+
+       p = (void __user *)(uintptr_t)params.uaddr;
+       if (p) {
+               if (params.len > SEV_FW_BLOB_MAX_SIZE) {
+                       ret = -EINVAL;
+                       goto e_free;
+               }
+
+               ret = -ENOMEM;
+               blob = kmalloc(params.len, GFP_KERNEL);
+               if (!blob)
+                       goto e_free;
+
+               data->address = __psp_pa(blob);
+               data->len = params.len;
+       }
+
+cmd:
+       data->handle = sev->handle;
+       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
+
+       /*
+        * If we query the session length, FW responded with expected data.
+        */
+       if (!params.len)
+               goto done;
+
+       if (ret)
+               goto e_free_blob;
+
+       if (blob) {
+               if (copy_to_user(p, blob, params.len))
+                       ret = -EFAULT;
+       }
+
+done:
+       params.len = data->len;
+       if (copy_to_user(measure, &params, sizeof(params)))
+               ret = -EFAULT;
+e_free_blob:
+       kfree(blob);
+e_free:
+       kfree(data);
+       return ret;
+}
+
+static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_launch_finish *data;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+       if (!data)
+               return -ENOMEM;
+
+       data->handle = sev->handle;
+       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
+
+       kfree(data);
+       return ret;
+}
+
+static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_guest_status params;
+       struct sev_data_guest_status *data;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+       if (!data)
+               return -ENOMEM;
+
+       data->handle = sev->handle;
+       ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
+       if (ret)
+               goto e_free;
+
+       params.policy = data->policy;
+       params.state = data->state;
+       params.handle = data->handle;
+
+       if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
+               ret = -EFAULT;
+e_free:
+       kfree(data);
+       return ret;
+}
+
+static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+                              unsigned long dst, int size,
+                              int *error, bool enc)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_dbg *data;
+       int ret;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+       if (!data)
+               return -ENOMEM;
+
+       data->handle = sev->handle;
+       data->dst_addr = dst;
+       data->src_addr = src;
+       data->len = size;
+
+       ret = sev_issue_cmd(kvm,
+                           enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
+                           data, error);
+       kfree(data);
+       return ret;
+}
+
+static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
+                            unsigned long dst_paddr, int sz, int *err)
+{
+       int offset;
+
+       /*
+        * Its safe to read more than we are asked, caller should ensure that
+        * destination has enough space.
+        */
+       src_paddr = round_down(src_paddr, 16);
+       offset = src_paddr & 15;
+       sz = round_up(sz + offset, 16);
+
+       return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
+}
+
+static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
+                                 unsigned long __user dst_uaddr,
+                                 unsigned long dst_paddr,
+                                 int size, int *err)
+{
+       struct page *tpage = NULL;
+       int ret, offset;
+
+       /* if inputs are not 16-byte then use intermediate buffer */
+       if (!IS_ALIGNED(dst_paddr, 16) ||
+           !IS_ALIGNED(paddr,     16) ||
+           !IS_ALIGNED(size,      16)) {
+               tpage = (void *)alloc_page(GFP_KERNEL);
+               if (!tpage)
+                       return -ENOMEM;
+
+               dst_paddr = __sme_page_pa(tpage);
+       }
+
+       ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
+       if (ret)
+               goto e_free;
+
+       if (tpage) {
+               offset = paddr & 15;
+               if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
+                                page_address(tpage) + offset, size))
+                       ret = -EFAULT;
+       }
+
+e_free:
+       if (tpage)
+               __free_page(tpage);
+
+       return ret;
+}
+
+static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
+                                 unsigned long __user vaddr,
+                                 unsigned long dst_paddr,
+                                 unsigned long __user dst_vaddr,
+                                 int size, int *error)
+{
+       struct page *src_tpage = NULL;
+       struct page *dst_tpage = NULL;
+       int ret, len = size;
+
+       /* If source buffer is not aligned then use an intermediate buffer */
+       if (!IS_ALIGNED(vaddr, 16)) {
+               src_tpage = alloc_page(GFP_KERNEL);
+               if (!src_tpage)
+                       return -ENOMEM;
+
+               if (copy_from_user(page_address(src_tpage),
+                               (void __user *)(uintptr_t)vaddr, size)) {
+                       __free_page(src_tpage);
+                       return -EFAULT;
+               }
+
+               paddr = __sme_page_pa(src_tpage);
+       }
+
+       /*
+        *  If destination buffer or length is not aligned then do read-modify-write:
+        *   - decrypt destination in an intermediate buffer
+        *   - copy the source buffer in an intermediate buffer
+        *   - use the intermediate buffer as source buffer
+        */
+       if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+               int dst_offset;
+
+               dst_tpage = alloc_page(GFP_KERNEL);
+               if (!dst_tpage) {
+                       ret = -ENOMEM;
+                       goto e_free;
+               }
+
+               ret = __sev_dbg_decrypt(kvm, dst_paddr,
+                                       __sme_page_pa(dst_tpage), size, error);
+               if (ret)
+                       goto e_free;
+
+               /*
+                *  If source is kernel buffer then use memcpy() otherwise
+                *  copy_from_user().
+                */
+               dst_offset = dst_paddr & 15;
+
+               if (src_tpage)
+                       memcpy(page_address(dst_tpage) + dst_offset,
+                              page_address(src_tpage), size);
+               else {
+                       if (copy_from_user(page_address(dst_tpage) + dst_offset,
+                                          (void __user *)(uintptr_t)vaddr, size)) {
+                               ret = -EFAULT;
+                               goto e_free;
+                       }
+               }
+
+               paddr = __sme_page_pa(dst_tpage);
+               dst_paddr = round_down(dst_paddr, 16);
+               len = round_up(size, 16);
+       }
+
+       ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
+
+e_free:
+       if (src_tpage)
+               __free_page(src_tpage);
+       if (dst_tpage)
+               __free_page(dst_tpage);
+       return ret;
+}
+
+static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+{
+       unsigned long vaddr, vaddr_end, next_vaddr;
+       unsigned long dst_vaddr;
+       struct page **src_p, **dst_p;
+       struct kvm_sev_dbg debug;
+       unsigned long n;
+       unsigned int size;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
+               return -EFAULT;
+
+       if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
+               return -EINVAL;
+       if (!debug.dst_uaddr)
+               return -EINVAL;
+
+       vaddr = debug.src_uaddr;
+       size = debug.len;
+       vaddr_end = vaddr + size;
+       dst_vaddr = debug.dst_uaddr;
+
+       for (; vaddr < vaddr_end; vaddr = next_vaddr) {
+               int len, s_off, d_off;
+
+               /* lock userspace source and destination page */
+               src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
+               if (!src_p)
+                       return -EFAULT;
+
+               dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+               if (!dst_p) {
+                       sev_unpin_memory(kvm, src_p, n);
+                       return -EFAULT;
+               }
+
+               /*
+                * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
+                * memory content (i.e it will write the same memory region with C=1).
+                * It's possible that the cache may contain the data with C=0, i.e.,
+                * unencrypted so invalidate it first.
+                */
+               sev_clflush_pages(src_p, 1);
+               sev_clflush_pages(dst_p, 1);
+
+               /*
+                * Since user buffer may not be page aligned, calculate the
+                * offset within the page.
+                */
+               s_off = vaddr & ~PAGE_MASK;
+               d_off = dst_vaddr & ~PAGE_MASK;
+               len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+               if (dec)
+                       ret = __sev_dbg_decrypt_user(kvm,
+                                                    __sme_page_pa(src_p[0]) + s_off,
+                                                    dst_vaddr,
+                                                    __sme_page_pa(dst_p[0]) + d_off,
+                                                    len, &argp->error);
+               else
+                       ret = __sev_dbg_encrypt_user(kvm,
+                                                    __sme_page_pa(src_p[0]) + s_off,
+                                                    vaddr,
+                                                    __sme_page_pa(dst_p[0]) + d_off,
+                                                    dst_vaddr,
+                                                    len, &argp->error);
+
+               sev_unpin_memory(kvm, src_p, n);
+               sev_unpin_memory(kvm, dst_p, n);
+
+               if (ret)
+                       goto err;
+
+               next_vaddr = vaddr + len;
+               dst_vaddr = dst_vaddr + len;
+               size -= len;
+       }
+err:
+       return ret;
+}
+
+static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_launch_secret *data;
+       struct kvm_sev_launch_secret params;
+       struct page **pages;
+       void *blob, *hdr;
+       unsigned long n;
+       int ret, offset;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+               return -EFAULT;
+
+       pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
+       if (!pages)
+               return -ENOMEM;
+
+       /*
+        * The secret must be copied into contiguous memory region, lets verify
+        * that userspace memory pages are contiguous before we issue command.
+        */
+       if (get_num_contig_pages(0, pages, n) != n) {
+               ret = -EINVAL;
+               goto e_unpin_memory;
+       }
+
+       ret = -ENOMEM;
+       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+       if (!data)
+               goto e_unpin_memory;
+
+       offset = params.guest_uaddr & (PAGE_SIZE - 1);
+       data->guest_address = __sme_page_pa(pages[0]) + offset;
+       data->guest_len = params.guest_len;
+
+       blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
+       if (IS_ERR(blob)) {
+               ret = PTR_ERR(blob);
+               goto e_free;
+       }
+
+       data->trans_address = __psp_pa(blob);
+       data->trans_len = params.trans_len;
+
+       hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
+       if (IS_ERR(hdr)) {
+               ret = PTR_ERR(hdr);
+               goto e_free_blob;
+       }
+       data->hdr_address = __psp_pa(hdr);
+       data->hdr_len = params.hdr_len;
+
+       data->handle = sev->handle;
+       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
+
+       kfree(hdr);
+
+e_free_blob:
+       kfree(blob);
+e_free:
+       kfree(data);
+e_unpin_memory:
+       sev_unpin_memory(kvm, pages, n);
+       return ret;
+}
+
+int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+{
+       struct kvm_sev_cmd sev_cmd;
+       int r;
+
+       if (!svm_sev_enabled())
+               return -ENOTTY;
+
+       if (!argp)
+               return 0;
+
+       if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
+               return -EFAULT;
+
+       mutex_lock(&kvm->lock);
+
+       switch (sev_cmd.id) {
+       case KVM_SEV_INIT:
+               r = sev_guest_init(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_LAUNCH_START:
+               r = sev_launch_start(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_LAUNCH_UPDATE_DATA:
+               r = sev_launch_update_data(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_LAUNCH_MEASURE:
+               r = sev_launch_measure(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_LAUNCH_FINISH:
+               r = sev_launch_finish(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_GUEST_STATUS:
+               r = sev_guest_status(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_DBG_DECRYPT:
+               r = sev_dbg_crypt(kvm, &sev_cmd, true);
+               break;
+       case KVM_SEV_DBG_ENCRYPT:
+               r = sev_dbg_crypt(kvm, &sev_cmd, false);
+               break;
+       case KVM_SEV_LAUNCH_SECRET:
+               r = sev_launch_secret(kvm, &sev_cmd);
+               break;
+       default:
+               r = -EINVAL;
+               goto out;
+       }
+
+       if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
+               r = -EFAULT;
+
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
+int svm_register_enc_region(struct kvm *kvm,
+                           struct kvm_enc_region *range)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct enc_region *region;
+       int ret = 0;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
+               return -EINVAL;
+
+       region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
+       if (!region)
+               return -ENOMEM;
+
+       region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
+       if (!region->pages) {
+               ret = -ENOMEM;
+               goto e_free;
+       }
+
+       /*
+        * The guest may change the memory encryption attribute from C=0 -> C=1
+        * or vice versa for this memory range. Lets make sure caches are
+        * flushed to ensure that guest data gets written into memory with
+        * correct C-bit.
+        */
+       sev_clflush_pages(region->pages, region->npages);
+
+       region->uaddr = range->addr;
+       region->size = range->size;
+
+       mutex_lock(&kvm->lock);
+       list_add_tail(&region->list, &sev->regions_list);
+       mutex_unlock(&kvm->lock);
+
+       return ret;
+
+e_free:
+       kfree(region);
+       return ret;
+}
+
+static struct enc_region *
+find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct list_head *head = &sev->regions_list;
+       struct enc_region *i;
+
+       list_for_each_entry(i, head, list) {
+               if (i->uaddr == range->addr &&
+                   i->size == range->size)
+                       return i;
+       }
+
+       return NULL;
+}
+
+static void __unregister_enc_region_locked(struct kvm *kvm,
+                                          struct enc_region *region)
+{
+       sev_unpin_memory(kvm, region->pages, region->npages);
+       list_del(&region->list);
+       kfree(region);
+}
+
+int svm_unregister_enc_region(struct kvm *kvm,
+                             struct kvm_enc_region *range)
+{
+       struct enc_region *region;
+       int ret;
+
+       mutex_lock(&kvm->lock);
+
+       if (!sev_guest(kvm)) {
+               ret = -ENOTTY;
+               goto failed;
+       }
+
+       region = find_enc_region(kvm, range);
+       if (!region) {
+               ret = -EINVAL;
+               goto failed;
+       }
+
+       /*
+        * Ensure that all guest tagged cache entries are flushed before
+        * releasing the pages back to the system for use. CLFLUSH will
+        * not do this, so issue a WBINVD.
+        */
+       wbinvd_on_all_cpus();
+
+       __unregister_enc_region_locked(kvm, region);
+
+       mutex_unlock(&kvm->lock);
+       return 0;
+
+failed:
+       mutex_unlock(&kvm->lock);
+       return ret;
+}
+
+void sev_vm_destroy(struct kvm *kvm)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct list_head *head = &sev->regions_list;
+       struct list_head *pos, *q;
+
+       if (!sev_guest(kvm))
+               return;
+
+       mutex_lock(&kvm->lock);
+
+       /*
+        * Ensure that all guest tagged cache entries are flushed before
+        * releasing the pages back to the system for use. CLFLUSH will
+        * not do this, so issue a WBINVD.
+        */
+       wbinvd_on_all_cpus();
+
+       /*
+        * if userspace was terminated before unregistering the memory regions
+        * then lets unpin all the registered memory.
+        */
+       if (!list_empty(head)) {
+               list_for_each_safe(pos, q, head) {
+                       __unregister_enc_region_locked(kvm,
+                               list_entry(pos, struct enc_region, list));
+               }
+       }
+
+       mutex_unlock(&kvm->lock);
+
+       sev_unbind_asid(kvm, sev->handle);
+       sev_asid_free(sev->asid);
+}
+
+int __init sev_hardware_setup(void)
+{
+       struct sev_user_data_status *status;
+       int rc;
+
+       /* Maximum number of encrypted guests supported simultaneously */
+       max_sev_asid = cpuid_ecx(0x8000001F);
+
+       if (!max_sev_asid)
+               return 1;
+
+       /* Minimum ASID value that should be used for SEV guest */
+       min_sev_asid = cpuid_edx(0x8000001F);
+
+       /* Initialize SEV ASID bitmaps */
+       sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+       if (!sev_asid_bitmap)
+               return 1;
+
+       sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+       if (!sev_reclaim_asid_bitmap)
+               return 1;
+
+       status = kmalloc(sizeof(*status), GFP_KERNEL);
+       if (!status)
+               return 1;
+
+       /*
+        * Check SEV platform status.
+        *
+        * PLATFORM_STATUS can be called in any state, if we failed to query
+        * the PLATFORM status then either PSP firmware does not support SEV
+        * feature or SEV firmware is dead.
+        */
+       rc = sev_platform_status(status, NULL);
+       if (rc)
+               goto err;
+
+       pr_info("SEV supported\n");
+
+err:
+       kfree(status);
+       return rc;
+}
+
+void sev_hardware_teardown(void)
+{
+       bitmap_free(sev_asid_bitmap);
+       bitmap_free(sev_reclaim_asid_bitmap);
+
+       sev_flush_asids();
+}
+
+void pre_sev_run(struct vcpu_svm *svm, int cpu)
+{
+       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+       int asid = sev_get_asid(svm->vcpu.kvm);
+
+       /* Assign the asid allocated with this SEV guest */
+       svm->vmcb->control.asid = asid;
+
+       /*
+        * Flush guest TLB:
+        *
+        * 1) when different VMCB for the same ASID is to be run on the same host CPU.
+        * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
+        */
+       if (sd->sev_vmcbs[asid] == svm->vmcb &&
+           svm->last_cpu == cpu)
+               return;
+
+       svm->last_cpu = cpu;
+       sd->sev_vmcbs[asid] = svm->vmcb;
+       svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+       mark_dirty(svm->vmcb, VMCB_ASID);
+}
index e32b4956fa0685e77b0f1f3c888cf745c12d2768..05d77b395fc18592643769d3b1291e11896959be 100644 (file)
@@ -196,47 +196,6 @@ static u8 rsm_ins_bytes[] = "\x0f\xaa";
 
 static void svm_complete_interrupts(struct vcpu_svm *svm);
 
-static int sev_flush_asids(void);
-static DECLARE_RWSEM(sev_deactivate_lock);
-static DEFINE_MUTEX(sev_bitmap_lock);
-static unsigned int max_sev_asid;
-static unsigned int min_sev_asid;
-static unsigned long *sev_asid_bitmap;
-static unsigned long *sev_reclaim_asid_bitmap;
-#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
-
-struct enc_region {
-       struct list_head list;
-       unsigned long npages;
-       struct page **pages;
-       unsigned long uaddr;
-       unsigned long size;
-};
-
-
-static inline bool svm_sev_enabled(void)
-{
-       return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
-}
-
-static inline bool sev_guest(struct kvm *kvm)
-{
-#ifdef CONFIG_KVM_AMD_SEV
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-
-       return sev->active;
-#else
-       return false;
-#endif
-}
-
-static inline int sev_get_asid(struct kvm *kvm)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-
-       return sev->asid;
-}
-
 static unsigned long iopm_base;
 
 struct kvm_ldttss_desc {
@@ -248,23 +207,7 @@ struct kvm_ldttss_desc {
        u32 zero1;
 } __attribute__((packed));
 
-struct svm_cpu_data {
-       int cpu;
-
-       u64 asid_generation;
-       u32 max_asid;
-       u32 next_asid;
-       u32 min_asid;
-       struct kvm_ldttss_desc *tss_desc;
-
-       struct page *save_area;
-       struct vmcb *current_vmcb;
-
-       /* index = sev_asid, value = vmcb pointer */
-       struct vmcb **sev_vmcbs;
-};
-
-static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
+DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
 
 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
 
@@ -763,51 +706,6 @@ void disable_nmi_singlestep(struct vcpu_svm *svm)
        }
 }
 
-static __init int sev_hardware_setup(void)
-{
-       struct sev_user_data_status *status;
-       int rc;
-
-       /* Maximum number of encrypted guests supported simultaneously */
-       max_sev_asid = cpuid_ecx(0x8000001F);
-
-       if (!max_sev_asid)
-               return 1;
-
-       /* Minimum ASID value that should be used for SEV guest */
-       min_sev_asid = cpuid_edx(0x8000001F);
-
-       /* Initialize SEV ASID bitmaps */
-       sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
-       if (!sev_asid_bitmap)
-               return 1;
-
-       sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
-       if (!sev_reclaim_asid_bitmap)
-               return 1;
-
-       status = kmalloc(sizeof(*status), GFP_KERNEL);
-       if (!status)
-               return 1;
-
-       /*
-        * Check SEV platform status.
-        *
-        * PLATFORM_STATUS can be called in any state, if we failed to query
-        * the PLATFORM status then either PSP firmware does not support SEV
-        * feature or SEV firmware is dead.
-        */
-       rc = sev_platform_status(status, NULL);
-       if (rc)
-               goto err;
-
-       pr_info("SEV supported\n");
-
-err:
-       kfree(status);
-       return rc;
-}
-
 static void grow_ple_window(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -889,12 +787,8 @@ static void svm_hardware_teardown(void)
 {
        int cpu;
 
-       if (svm_sev_enabled()) {
-               bitmap_free(sev_asid_bitmap);
-               bitmap_free(sev_reclaim_asid_bitmap);
-
-               sev_flush_asids();
-       }
+       if (svm_sev_enabled())
+               sev_hardware_teardown();
 
        for_each_possible_cpu(cpu)
                svm_cpu_uninit(cpu);
@@ -1250,199 +1144,6 @@ static void init_vmcb(struct vcpu_svm *svm)
 
 }
 
-static void sev_asid_free(int asid)
-{
-       struct svm_cpu_data *sd;
-       int cpu, pos;
-
-       mutex_lock(&sev_bitmap_lock);
-
-       pos = asid - 1;
-       __set_bit(pos, sev_reclaim_asid_bitmap);
-
-       for_each_possible_cpu(cpu) {
-               sd = per_cpu(svm_data, cpu);
-               sd->sev_vmcbs[pos] = NULL;
-       }
-
-       mutex_unlock(&sev_bitmap_lock);
-}
-
-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
-{
-       struct sev_data_decommission *decommission;
-       struct sev_data_deactivate *data;
-
-       if (!handle)
-               return;
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
-       if (!data)
-               return;
-
-       /* deactivate handle */
-       data->handle = handle;
-
-       /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
-       down_read(&sev_deactivate_lock);
-       sev_guest_deactivate(data, NULL);
-       up_read(&sev_deactivate_lock);
-
-       kfree(data);
-
-       decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
-       if (!decommission)
-               return;
-
-       /* decommission handle */
-       decommission->handle = handle;
-       sev_guest_decommission(decommission, NULL);
-
-       kfree(decommission);
-}
-
-static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
-                                   unsigned long ulen, unsigned long *n,
-                                   int write)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       unsigned long npages, npinned, size;
-       unsigned long locked, lock_limit;
-       struct page **pages;
-       unsigned long first, last;
-
-       if (ulen == 0 || uaddr + ulen < uaddr)
-               return NULL;
-
-       /* Calculate number of pages. */
-       first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
-       last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
-       npages = (last - first + 1);
-
-       locked = sev->pages_locked + npages;
-       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-       if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
-               pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
-               return NULL;
-       }
-
-       /* Avoid using vmalloc for smaller buffers. */
-       size = npages * sizeof(struct page *);
-       if (size > PAGE_SIZE)
-               pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
-                                 PAGE_KERNEL);
-       else
-               pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
-
-       if (!pages)
-               return NULL;
-
-       /* Pin the user virtual address. */
-       npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
-       if (npinned != npages) {
-               pr_err("SEV: Failure locking %lu pages.\n", npages);
-               goto err;
-       }
-
-       *n = npages;
-       sev->pages_locked = locked;
-
-       return pages;
-
-err:
-       if (npinned > 0)
-               release_pages(pages, npinned);
-
-       kvfree(pages);
-       return NULL;
-}
-
-static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
-                            unsigned long npages)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-
-       release_pages(pages, npages);
-       kvfree(pages);
-       sev->pages_locked -= npages;
-}
-
-static void sev_clflush_pages(struct page *pages[], unsigned long npages)
-{
-       uint8_t *page_virtual;
-       unsigned long i;
-
-       if (npages == 0 || pages == NULL)
-               return;
-
-       for (i = 0; i < npages; i++) {
-               page_virtual = kmap_atomic(pages[i]);
-               clflush_cache_range(page_virtual, PAGE_SIZE);
-               kunmap_atomic(page_virtual);
-       }
-}
-
-static void __unregister_enc_region_locked(struct kvm *kvm,
-                                          struct enc_region *region)
-{
-       sev_unpin_memory(kvm, region->pages, region->npages);
-       list_del(&region->list);
-       kfree(region);
-}
-
-static void sev_vm_destroy(struct kvm *kvm)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct list_head *head = &sev->regions_list;
-       struct list_head *pos, *q;
-
-       if (!sev_guest(kvm))
-               return;
-
-       mutex_lock(&kvm->lock);
-
-       /*
-        * Ensure that all guest tagged cache entries are flushed before
-        * releasing the pages back to the system for use. CLFLUSH will
-        * not do this, so issue a WBINVD.
-        */
-       wbinvd_on_all_cpus();
-
-       /*
-        * if userspace was terminated before unregistering the memory regions
-        * then lets unpin all the registered memory.
-        */
-       if (!list_empty(head)) {
-               list_for_each_safe(pos, q, head) {
-                       __unregister_enc_region_locked(kvm,
-                               list_entry(pos, struct enc_region, list));
-               }
-       }
-
-       mutex_unlock(&kvm->lock);
-
-       sev_unbind_asid(kvm, sev->handle);
-       sev_asid_free(sev->asid);
-}
-
-static void svm_vm_destroy(struct kvm *kvm)
-{
-       avic_vm_destroy(kvm);
-       sev_vm_destroy(kvm);
-}
-
-static int svm_vm_init(struct kvm *kvm)
-{
-       if (avic) {
-               int ret = avic_vm_init(kvm);
-               if (ret)
-                       return ret;
-       }
-
-       kvm_apicv_init(kvm, avic);
-       return 0;
-}
-
 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -3292,30 +2993,6 @@ static void reload_tss(struct kvm_vcpu *vcpu)
        load_TR_desc();
 }
 
-static void pre_sev_run(struct vcpu_svm *svm, int cpu)
-{
-       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
-       int asid = sev_get_asid(svm->vcpu.kvm);
-
-       /* Assign the asid allocated with this SEV guest */
-       svm->vmcb->control.asid = asid;
-
-       /*
-        * Flush guest TLB:
-        *
-        * 1) when different VMCB for the same ASID is to be run on the same host CPU.
-        * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
-        */
-       if (sd->sev_vmcbs[asid] == svm->vmcb &&
-           svm->last_cpu == cpu)
-               return;
-
-       svm->last_cpu = cpu;
-       sd->sev_vmcbs[asid] = svm->vmcb;
-       svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
-       mark_dirty(svm->vmcb, VMCB_ASID);
-}
-
 static void pre_svm_run(struct vcpu_svm *svm)
 {
        int cpu = raw_smp_processor_id();
@@ -4216,900 +3893,6 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static int sev_flush_asids(void)
-{
-       int ret, error;
-
-       /*
-        * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
-        * so it must be guarded.
-        */
-       down_write(&sev_deactivate_lock);
-
-       wbinvd_on_all_cpus();
-       ret = sev_guest_df_flush(&error);
-
-       up_write(&sev_deactivate_lock);
-
-       if (ret)
-               pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
-
-       return ret;
-}
-
-/* Must be called with the sev_bitmap_lock held */
-static bool __sev_recycle_asids(void)
-{
-       int pos;
-
-       /* Check if there are any ASIDs to reclaim before performing a flush */
-       pos = find_next_bit(sev_reclaim_asid_bitmap,
-                           max_sev_asid, min_sev_asid - 1);
-       if (pos >= max_sev_asid)
-               return false;
-
-       if (sev_flush_asids())
-               return false;
-
-       bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
-                  max_sev_asid);
-       bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
-
-       return true;
-}
-
-static int sev_asid_new(void)
-{
-       bool retry = true;
-       int pos;
-
-       mutex_lock(&sev_bitmap_lock);
-
-       /*
-        * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
-        */
-again:
-       pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
-       if (pos >= max_sev_asid) {
-               if (retry && __sev_recycle_asids()) {
-                       retry = false;
-                       goto again;
-               }
-               mutex_unlock(&sev_bitmap_lock);
-               return -EBUSY;
-       }
-
-       __set_bit(pos, sev_asid_bitmap);
-
-       mutex_unlock(&sev_bitmap_lock);
-
-       return pos + 1;
-}
-
-static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       int asid, ret;
-
-       ret = -EBUSY;
-       if (unlikely(sev->active))
-               return ret;
-
-       asid = sev_asid_new();
-       if (asid < 0)
-               return ret;
-
-       ret = sev_platform_init(&argp->error);
-       if (ret)
-               goto e_free;
-
-       sev->active = true;
-       sev->asid = asid;
-       INIT_LIST_HEAD(&sev->regions_list);
-
-       return 0;
-
-e_free:
-       sev_asid_free(asid);
-       return ret;
-}
-
-static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
-{
-       struct sev_data_activate *data;
-       int asid = sev_get_asid(kvm);
-       int ret;
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
-       if (!data)
-               return -ENOMEM;
-
-       /* activate ASID on the given handle */
-       data->handle = handle;
-       data->asid   = asid;
-       ret = sev_guest_activate(data, error);
-       kfree(data);
-
-       return ret;
-}
-
-static int __sev_issue_cmd(int fd, int id, void *data, int *error)
-{
-       struct fd f;
-       int ret;
-
-       f = fdget(fd);
-       if (!f.file)
-               return -EBADF;
-
-       ret = sev_issue_cmd_external_user(f.file, id, data, error);
-
-       fdput(f);
-       return ret;
-}
-
-static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-
-       return __sev_issue_cmd(sev->fd, id, data, error);
-}
-
-static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct sev_data_launch_start *start;
-       struct kvm_sev_launch_start params;
-       void *dh_blob, *session_blob;
-       int *error = &argp->error;
-       int ret;
-
-       if (!sev_guest(kvm))
-               return -ENOTTY;
-
-       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
-               return -EFAULT;
-
-       start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
-       if (!start)
-               return -ENOMEM;
-
-       dh_blob = NULL;
-       if (params.dh_uaddr) {
-               dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
-               if (IS_ERR(dh_blob)) {
-                       ret = PTR_ERR(dh_blob);
-                       goto e_free;
-               }
-
-               start->dh_cert_address = __sme_set(__pa(dh_blob));
-               start->dh_cert_len = params.dh_len;
-       }
-
-       session_blob = NULL;
-       if (params.session_uaddr) {
-               session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
-               if (IS_ERR(session_blob)) {
-                       ret = PTR_ERR(session_blob);
-                       goto e_free_dh;
-               }
-
-               start->session_address = __sme_set(__pa(session_blob));
-               start->session_len = params.session_len;
-       }
-
-       start->handle = params.handle;
-       start->policy = params.policy;
-
-       /* create memory encryption context */
-       ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
-       if (ret)
-               goto e_free_session;
-
-       /* Bind ASID to this guest */
-       ret = sev_bind_asid(kvm, start->handle, error);
-       if (ret)
-               goto e_free_session;
-
-       /* return handle to userspace */
-       params.handle = start->handle;
-       if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
-               sev_unbind_asid(kvm, start->handle);
-               ret = -EFAULT;
-               goto e_free_session;
-       }
-
-       sev->handle = start->handle;
-       sev->fd = argp->sev_fd;
-
-e_free_session:
-       kfree(session_blob);
-e_free_dh:
-       kfree(dh_blob);
-e_free:
-       kfree(start);
-       return ret;
-}
-
-static unsigned long get_num_contig_pages(unsigned long idx,
-                               struct page **inpages, unsigned long npages)
-{
-       unsigned long paddr, next_paddr;
-       unsigned long i = idx + 1, pages = 1;
-
-       /* find the number of contiguous pages starting from idx */
-       paddr = __sme_page_pa(inpages[idx]);
-       while (i < npages) {
-               next_paddr = __sme_page_pa(inpages[i++]);
-               if ((paddr + PAGE_SIZE) == next_paddr) {
-                       pages++;
-                       paddr = next_paddr;
-                       continue;
-               }
-               break;
-       }
-
-       return pages;
-}
-
-static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
-{
-       unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct kvm_sev_launch_update_data params;
-       struct sev_data_launch_update_data *data;
-       struct page **inpages;
-       int ret;
-
-       if (!sev_guest(kvm))
-               return -ENOTTY;
-
-       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
-               return -EFAULT;
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
-       if (!data)
-               return -ENOMEM;
-
-       vaddr = params.uaddr;
-       size = params.len;
-       vaddr_end = vaddr + size;
-
-       /* Lock the user memory. */
-       inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
-       if (!inpages) {
-               ret = -ENOMEM;
-               goto e_free;
-       }
-
-       /*
-        * The LAUNCH_UPDATE command will perform in-place encryption of the
-        * memory content (i.e it will write the same memory region with C=1).
-        * It's possible that the cache may contain the data with C=0, i.e.,
-        * unencrypted so invalidate it first.
-        */
-       sev_clflush_pages(inpages, npages);
-
-       for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
-               int offset, len;
-
-               /*
-                * If the user buffer is not page-aligned, calculate the offset
-                * within the page.
-                */
-               offset = vaddr & (PAGE_SIZE - 1);
-
-               /* Calculate the number of pages that can be encrypted in one go. */
-               pages = get_num_contig_pages(i, inpages, npages);
-
-               len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
-
-               data->handle = sev->handle;
-               data->len = len;
-               data->address = __sme_page_pa(inpages[i]) + offset;
-               ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
-               if (ret)
-                       goto e_unpin;
-
-               size -= len;
-               next_vaddr = vaddr + len;
-       }
-
-e_unpin:
-       /* content of memory is updated, mark pages dirty */
-       for (i = 0; i < npages; i++) {
-               set_page_dirty_lock(inpages[i]);
-               mark_page_accessed(inpages[i]);
-       }
-       /* unlock the user pages */
-       sev_unpin_memory(kvm, inpages, npages);
-e_free:
-       kfree(data);
-       return ret;
-}
-
-static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
-{
-       void __user *measure = (void __user *)(uintptr_t)argp->data;
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct sev_data_launch_measure *data;
-       struct kvm_sev_launch_measure params;
-       void __user *p = NULL;
-       void *blob = NULL;
-       int ret;
-
-       if (!sev_guest(kvm))
-               return -ENOTTY;
-
-       if (copy_from_user(&params, measure, sizeof(params)))
-               return -EFAULT;
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
-       if (!data)
-               return -ENOMEM;
-
-       /* User wants to query the blob length */
-       if (!params.len)
-               goto cmd;
-
-       p = (void __user *)(uintptr_t)params.uaddr;
-       if (p) {
-               if (params.len > SEV_FW_BLOB_MAX_SIZE) {
-                       ret = -EINVAL;
-                       goto e_free;
-               }
-
-               ret = -ENOMEM;
-               blob = kmalloc(params.len, GFP_KERNEL);
-               if (!blob)
-                       goto e_free;
-
-               data->address = __psp_pa(blob);
-               data->len = params.len;
-       }
-
-cmd:
-       data->handle = sev->handle;
-       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
-
-       /*
-        * If we query the session length, FW responded with expected data.
-        */
-       if (!params.len)
-               goto done;
-
-       if (ret)
-               goto e_free_blob;
-
-       if (blob) {
-               if (copy_to_user(p, blob, params.len))
-                       ret = -EFAULT;
-       }
-
-done:
-       params.len = data->len;
-       if (copy_to_user(measure, &params, sizeof(params)))
-               ret = -EFAULT;
-e_free_blob:
-       kfree(blob);
-e_free:
-       kfree(data);
-       return ret;
-}
-
-static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct sev_data_launch_finish *data;
-       int ret;
-
-       if (!sev_guest(kvm))
-               return -ENOTTY;
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
-       if (!data)
-               return -ENOMEM;
-
-       data->handle = sev->handle;
-       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
-
-       kfree(data);
-       return ret;
-}
-
-static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct kvm_sev_guest_status params;
-       struct sev_data_guest_status *data;
-       int ret;
-
-       if (!sev_guest(kvm))
-               return -ENOTTY;
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
-       if (!data)
-               return -ENOMEM;
-
-       data->handle = sev->handle;
-       ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
-       if (ret)
-               goto e_free;
-
-       params.policy = data->policy;
-       params.state = data->state;
-       params.handle = data->handle;
-
-       if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
-               ret = -EFAULT;
-e_free:
-       kfree(data);
-       return ret;
-}
-
-static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
-                              unsigned long dst, int size,
-                              int *error, bool enc)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct sev_data_dbg *data;
-       int ret;
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
-       if (!data)
-               return -ENOMEM;
-
-       data->handle = sev->handle;
-       data->dst_addr = dst;
-       data->src_addr = src;
-       data->len = size;
-
-       ret = sev_issue_cmd(kvm,
-                           enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
-                           data, error);
-       kfree(data);
-       return ret;
-}
-
-static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
-                            unsigned long dst_paddr, int sz, int *err)
-{
-       int offset;
-
-       /*
-        * Its safe to read more than we are asked, caller should ensure that
-        * destination has enough space.
-        */
-       src_paddr = round_down(src_paddr, 16);
-       offset = src_paddr & 15;
-       sz = round_up(sz + offset, 16);
-
-       return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
-}
-
-static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
-                                 unsigned long __user dst_uaddr,
-                                 unsigned long dst_paddr,
-                                 int size, int *err)
-{
-       struct page *tpage = NULL;
-       int ret, offset;
-
-       /* if inputs are not 16-byte then use intermediate buffer */
-       if (!IS_ALIGNED(dst_paddr, 16) ||
-           !IS_ALIGNED(paddr,     16) ||
-           !IS_ALIGNED(size,      16)) {
-               tpage = (void *)alloc_page(GFP_KERNEL);
-               if (!tpage)
-                       return -ENOMEM;
-
-               dst_paddr = __sme_page_pa(tpage);
-       }
-
-       ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
-       if (ret)
-               goto e_free;
-
-       if (tpage) {
-               offset = paddr & 15;
-               if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
-                                page_address(tpage) + offset, size))
-                       ret = -EFAULT;
-       }
-
-e_free:
-       if (tpage)
-               __free_page(tpage);
-
-       return ret;
-}
-
-static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
-                                 unsigned long __user vaddr,
-                                 unsigned long dst_paddr,
-                                 unsigned long __user dst_vaddr,
-                                 int size, int *error)
-{
-       struct page *src_tpage = NULL;
-       struct page *dst_tpage = NULL;
-       int ret, len = size;
-
-       /* If source buffer is not aligned then use an intermediate buffer */
-       if (!IS_ALIGNED(vaddr, 16)) {
-               src_tpage = alloc_page(GFP_KERNEL);
-               if (!src_tpage)
-                       return -ENOMEM;
-
-               if (copy_from_user(page_address(src_tpage),
-                               (void __user *)(uintptr_t)vaddr, size)) {
-                       __free_page(src_tpage);
-                       return -EFAULT;
-               }
-
-               paddr = __sme_page_pa(src_tpage);
-       }
-
-       /*
-        *  If destination buffer or length is not aligned then do read-modify-write:
-        *   - decrypt destination in an intermediate buffer
-        *   - copy the source buffer in an intermediate buffer
-        *   - use the intermediate buffer as source buffer
-        */
-       if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
-               int dst_offset;
-
-               dst_tpage = alloc_page(GFP_KERNEL);
-               if (!dst_tpage) {
-                       ret = -ENOMEM;
-                       goto e_free;
-               }
-
-               ret = __sev_dbg_decrypt(kvm, dst_paddr,
-                                       __sme_page_pa(dst_tpage), size, error);
-               if (ret)
-                       goto e_free;
-
-               /*
-                *  If source is kernel buffer then use memcpy() otherwise
-                *  copy_from_user().
-                */
-               dst_offset = dst_paddr & 15;
-
-               if (src_tpage)
-                       memcpy(page_address(dst_tpage) + dst_offset,
-                              page_address(src_tpage), size);
-               else {
-                       if (copy_from_user(page_address(dst_tpage) + dst_offset,
-                                          (void __user *)(uintptr_t)vaddr, size)) {
-                               ret = -EFAULT;
-                               goto e_free;
-                       }
-               }
-
-               paddr = __sme_page_pa(dst_tpage);
-               dst_paddr = round_down(dst_paddr, 16);
-               len = round_up(size, 16);
-       }
-
-       ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
-
-e_free:
-       if (src_tpage)
-               __free_page(src_tpage);
-       if (dst_tpage)
-               __free_page(dst_tpage);
-       return ret;
-}
-
-static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
-{
-       unsigned long vaddr, vaddr_end, next_vaddr;
-       unsigned long dst_vaddr;
-       struct page **src_p, **dst_p;
-       struct kvm_sev_dbg debug;
-       unsigned long n;
-       unsigned int size;
-       int ret;
-
-       if (!sev_guest(kvm))
-               return -ENOTTY;
-
-       if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
-               return -EFAULT;
-
-       if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
-               return -EINVAL;
-       if (!debug.dst_uaddr)
-               return -EINVAL;
-
-       vaddr = debug.src_uaddr;
-       size = debug.len;
-       vaddr_end = vaddr + size;
-       dst_vaddr = debug.dst_uaddr;
-
-       for (; vaddr < vaddr_end; vaddr = next_vaddr) {
-               int len, s_off, d_off;
-
-               /* lock userspace source and destination page */
-               src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
-               if (!src_p)
-                       return -EFAULT;
-
-               dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
-               if (!dst_p) {
-                       sev_unpin_memory(kvm, src_p, n);
-                       return -EFAULT;
-               }
-
-               /*
-                * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
-                * memory content (i.e it will write the same memory region with C=1).
-                * It's possible that the cache may contain the data with C=0, i.e.,
-                * unencrypted so invalidate it first.
-                */
-               sev_clflush_pages(src_p, 1);
-               sev_clflush_pages(dst_p, 1);
-
-               /*
-                * Since user buffer may not be page aligned, calculate the
-                * offset within the page.
-                */
-               s_off = vaddr & ~PAGE_MASK;
-               d_off = dst_vaddr & ~PAGE_MASK;
-               len = min_t(size_t, (PAGE_SIZE - s_off), size);
-
-               if (dec)
-                       ret = __sev_dbg_decrypt_user(kvm,
-                                                    __sme_page_pa(src_p[0]) + s_off,
-                                                    dst_vaddr,
-                                                    __sme_page_pa(dst_p[0]) + d_off,
-                                                    len, &argp->error);
-               else
-                       ret = __sev_dbg_encrypt_user(kvm,
-                                                    __sme_page_pa(src_p[0]) + s_off,
-                                                    vaddr,
-                                                    __sme_page_pa(dst_p[0]) + d_off,
-                                                    dst_vaddr,
-                                                    len, &argp->error);
-
-               sev_unpin_memory(kvm, src_p, n);
-               sev_unpin_memory(kvm, dst_p, n);
-
-               if (ret)
-                       goto err;
-
-               next_vaddr = vaddr + len;
-               dst_vaddr = dst_vaddr + len;
-               size -= len;
-       }
-err:
-       return ret;
-}
-
-static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct sev_data_launch_secret *data;
-       struct kvm_sev_launch_secret params;
-       struct page **pages;
-       void *blob, *hdr;
-       unsigned long n;
-       int ret, offset;
-
-       if (!sev_guest(kvm))
-               return -ENOTTY;
-
-       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
-               return -EFAULT;
-
-       pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
-       if (!pages)
-               return -ENOMEM;
-
-       /*
-        * The secret must be copied into contiguous memory region, lets verify
-        * that userspace memory pages are contiguous before we issue command.
-        */
-       if (get_num_contig_pages(0, pages, n) != n) {
-               ret = -EINVAL;
-               goto e_unpin_memory;
-       }
-
-       ret = -ENOMEM;
-       data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
-       if (!data)
-               goto e_unpin_memory;
-
-       offset = params.guest_uaddr & (PAGE_SIZE - 1);
-       data->guest_address = __sme_page_pa(pages[0]) + offset;
-       data->guest_len = params.guest_len;
-
-       blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
-       if (IS_ERR(blob)) {
-               ret = PTR_ERR(blob);
-               goto e_free;
-       }
-
-       data->trans_address = __psp_pa(blob);
-       data->trans_len = params.trans_len;
-
-       hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
-       if (IS_ERR(hdr)) {
-               ret = PTR_ERR(hdr);
-               goto e_free_blob;
-       }
-       data->hdr_address = __psp_pa(hdr);
-       data->hdr_len = params.hdr_len;
-
-       data->handle = sev->handle;
-       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
-
-       kfree(hdr);
-
-e_free_blob:
-       kfree(blob);
-e_free:
-       kfree(data);
-e_unpin_memory:
-       sev_unpin_memory(kvm, pages, n);
-       return ret;
-}
-
-static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
-{
-       struct kvm_sev_cmd sev_cmd;
-       int r;
-
-       if (!svm_sev_enabled())
-               return -ENOTTY;
-
-       if (!argp)
-               return 0;
-
-       if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
-               return -EFAULT;
-
-       mutex_lock(&kvm->lock);
-
-       switch (sev_cmd.id) {
-       case KVM_SEV_INIT:
-               r = sev_guest_init(kvm, &sev_cmd);
-               break;
-       case KVM_SEV_LAUNCH_START:
-               r = sev_launch_start(kvm, &sev_cmd);
-               break;
-       case KVM_SEV_LAUNCH_UPDATE_DATA:
-               r = sev_launch_update_data(kvm, &sev_cmd);
-               break;
-       case KVM_SEV_LAUNCH_MEASURE:
-               r = sev_launch_measure(kvm, &sev_cmd);
-               break;
-       case KVM_SEV_LAUNCH_FINISH:
-               r = sev_launch_finish(kvm, &sev_cmd);
-               break;
-       case KVM_SEV_GUEST_STATUS:
-               r = sev_guest_status(kvm, &sev_cmd);
-               break;
-       case KVM_SEV_DBG_DECRYPT:
-               r = sev_dbg_crypt(kvm, &sev_cmd, true);
-               break;
-       case KVM_SEV_DBG_ENCRYPT:
-               r = sev_dbg_crypt(kvm, &sev_cmd, false);
-               break;
-       case KVM_SEV_LAUNCH_SECRET:
-               r = sev_launch_secret(kvm, &sev_cmd);
-               break;
-       default:
-               r = -EINVAL;
-               goto out;
-       }
-
-       if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
-               r = -EFAULT;
-
-out:
-       mutex_unlock(&kvm->lock);
-       return r;
-}
-
-static int svm_register_enc_region(struct kvm *kvm,
-                                  struct kvm_enc_region *range)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct enc_region *region;
-       int ret = 0;
-
-       if (!sev_guest(kvm))
-               return -ENOTTY;
-
-       if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
-               return -EINVAL;
-
-       region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
-       if (!region)
-               return -ENOMEM;
-
-       region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
-       if (!region->pages) {
-               ret = -ENOMEM;
-               goto e_free;
-       }
-
-       /*
-        * The guest may change the memory encryption attribute from C=0 -> C=1
-        * or vice versa for this memory range. Lets make sure caches are
-        * flushed to ensure that guest data gets written into memory with
-        * correct C-bit.
-        */
-       sev_clflush_pages(region->pages, region->npages);
-
-       region->uaddr = range->addr;
-       region->size = range->size;
-
-       mutex_lock(&kvm->lock);
-       list_add_tail(&region->list, &sev->regions_list);
-       mutex_unlock(&kvm->lock);
-
-       return ret;
-
-e_free:
-       kfree(region);
-       return ret;
-}
-
-static struct enc_region *
-find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
-{
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       struct list_head *head = &sev->regions_list;
-       struct enc_region *i;
-
-       list_for_each_entry(i, head, list) {
-               if (i->uaddr == range->addr &&
-                   i->size == range->size)
-                       return i;
-       }
-
-       return NULL;
-}
-
-
-static int svm_unregister_enc_region(struct kvm *kvm,
-                                    struct kvm_enc_region *range)
-{
-       struct enc_region *region;
-       int ret;
-
-       mutex_lock(&kvm->lock);
-
-       if (!sev_guest(kvm)) {
-               ret = -ENOTTY;
-               goto failed;
-       }
-
-       region = find_enc_region(kvm, range);
-       if (!region) {
-               ret = -EINVAL;
-               goto failed;
-       }
-
-       /*
-        * Ensure that all guest tagged cache entries are flushed before
-        * releasing the pages back to the system for use. CLFLUSH will
-        * not do this, so issue a WBINVD.
-        */
-       wbinvd_on_all_cpus();
-
-       __unregister_enc_region_locked(kvm, region);
-
-       mutex_unlock(&kvm->lock);
-       return 0;
-
-failed:
-       mutex_unlock(&kvm->lock);
-       return ret;
-}
-
 static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
 {
        unsigned long cr4 = kvm_read_cr4(vcpu);
@@ -5183,6 +3966,24 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
                   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
 }
 
+static void svm_vm_destroy(struct kvm *kvm)
+{
+       avic_vm_destroy(kvm);
+       sev_vm_destroy(kvm);
+}
+
+static int svm_vm_init(struct kvm *kvm)
+{
+       if (avic) {
+               int ret = avic_vm_init(kvm);
+               if (ret)
+                       return ret;
+       }
+
+       kvm_apicv_init(kvm, avic);
+       return 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops __initdata = {
        .hardware_unsetup = svm_hardware_teardown,
        .hardware_enable = svm_hardware_enable,
index c7abc1fede9703479c51d082e273f72d56ff4545..df3474f4fb022578a29c0d5954048f5f66d0950e 100644 (file)
@@ -171,6 +171,24 @@ struct vcpu_svm {
        unsigned int last_cpu;
 };
 
+struct svm_cpu_data {
+       int cpu;
+
+       u64 asid_generation;
+       u32 max_asid;
+       u32 next_asid;
+       u32 min_asid;
+       struct kvm_ldttss_desc *tss_desc;
+
+       struct page *save_area;
+       struct vmcb *current_vmcb;
+
+       /* index = sev_asid, value = vmcb pointer */
+       struct vmcb **sev_vmcbs;
+};
+
+DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
+
 void recalc_intercepts(struct vcpu_svm *svm);
 
 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
@@ -440,4 +458,34 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
 void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
 
+/* sev.c */
+
+extern unsigned int max_sev_asid;
+
+static inline bool sev_guest(struct kvm *kvm)
+{
+#ifdef CONFIG_KVM_AMD_SEV
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       return sev->active;
+#else
+       return false;
+#endif
+}
+
+static inline bool svm_sev_enabled(void)
+{
+       return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
+}
+
+void sev_vm_destroy(struct kvm *kvm);
+int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
+int svm_register_enc_region(struct kvm *kvm,
+                           struct kvm_enc_region *range);
+int svm_unregister_enc_region(struct kvm *kvm,
+                             struct kvm_enc_region *range);
+void pre_sev_run(struct vcpu_svm *svm, int cpu);
+int __init sev_hardware_setup(void);
+void sev_hardware_teardown(void);
+
 #endif