1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/misc_cgroup.h>
18 #include <linux/processor.h>
19 #include <linux/trace_events.h>
20 #include <asm/fpu/internal.h>
23 #include <asm/trapnr.h>
31 #ifndef CONFIG_KVM_AMD_SEV
33 * When this config is not defined, SEV feature is not supported and APIs in
34 * this file are not used but this file still gets compiled into the KVM AMD
37 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
38 * misc_res_type {} defined in linux/misc_cgroup.h.
40 * Below macros allow compilation to succeed.
42 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
43 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
46 #ifdef CONFIG_KVM_AMD_SEV
47 /* enable/disable SEV support */
48 static bool sev_enabled = true;
49 module_param_named(sev, sev_enabled, bool, 0444);
51 /* enable/disable SEV-ES support */
52 static bool sev_es_enabled = true;
53 module_param_named(sev_es, sev_es_enabled, bool, 0444);
55 #define sev_enabled false
56 #define sev_es_enabled false
57 #endif /* CONFIG_KVM_AMD_SEV */
59 static u8 sev_enc_bit;
60 static DECLARE_RWSEM(sev_deactivate_lock);
61 static DEFINE_MUTEX(sev_bitmap_lock);
62 unsigned int max_sev_asid;
63 static unsigned int min_sev_asid;
64 static unsigned long sev_me_mask;
65 static unsigned int nr_asids;
66 static unsigned long *sev_asid_bitmap;
67 static unsigned long *sev_reclaim_asid_bitmap;
70 struct list_head list;
77 /* Called with the sev_bitmap_lock held, or on shutdown */
78 static int sev_flush_asids(int min_asid, int max_asid)
80 int ret, asid, error = 0;
82 /* Check if there are any ASIDs to reclaim before performing a flush */
83 asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
88 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
89 * so it must be guarded.
91 down_write(&sev_deactivate_lock);
94 ret = sev_guest_df_flush(&error);
96 up_write(&sev_deactivate_lock);
99 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
104 static inline bool is_mirroring_enc_context(struct kvm *kvm)
106 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
109 /* Must be called with the sev_bitmap_lock held */
110 static bool __sev_recycle_asids(int min_asid, int max_asid)
112 if (sev_flush_asids(min_asid, max_asid))
115 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
116 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
118 bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
123 static int sev_asid_new(struct kvm_sev_info *sev)
125 int asid, min_asid, max_asid, ret;
127 enum misc_res_type type;
129 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
130 WARN_ON(sev->misc_cg);
131 sev->misc_cg = get_current_misc_cg();
132 ret = misc_cg_try_charge(type, sev->misc_cg, 1);
134 put_misc_cg(sev->misc_cg);
139 mutex_lock(&sev_bitmap_lock);
142 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
143 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
145 min_asid = sev->es_active ? 1 : min_sev_asid;
146 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
148 asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
149 if (asid > max_asid) {
150 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
154 mutex_unlock(&sev_bitmap_lock);
159 __set_bit(asid, sev_asid_bitmap);
161 mutex_unlock(&sev_bitmap_lock);
165 misc_cg_uncharge(type, sev->misc_cg, 1);
166 put_misc_cg(sev->misc_cg);
171 static int sev_get_asid(struct kvm *kvm)
173 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
178 static void sev_asid_free(struct kvm_sev_info *sev)
180 struct svm_cpu_data *sd;
182 enum misc_res_type type;
184 mutex_lock(&sev_bitmap_lock);
186 __set_bit(sev->asid, sev_reclaim_asid_bitmap);
188 for_each_possible_cpu(cpu) {
189 sd = per_cpu(svm_data, cpu);
190 sd->sev_vmcbs[sev->asid] = NULL;
193 mutex_unlock(&sev_bitmap_lock);
195 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
196 misc_cg_uncharge(type, sev->misc_cg, 1);
197 put_misc_cg(sev->misc_cg);
201 static void sev_decommission(unsigned int handle)
203 struct sev_data_decommission decommission;
208 decommission.handle = handle;
209 sev_guest_decommission(&decommission, NULL);
212 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
214 struct sev_data_deactivate deactivate;
219 deactivate.handle = handle;
221 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
222 down_read(&sev_deactivate_lock);
223 sev_guest_deactivate(&deactivate, NULL);
224 up_read(&sev_deactivate_lock);
226 sev_decommission(handle);
229 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
231 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
232 bool es_active = argp->id == KVM_SEV_ES_INIT;
235 if (kvm->created_vcpus)
239 if (unlikely(sev->active))
242 sev->es_active = es_active;
243 asid = sev_asid_new(sev);
248 ret = sev_platform_init(&argp->error);
254 INIT_LIST_HEAD(&sev->regions_list);
262 sev->es_active = false;
266 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
268 struct sev_data_activate activate;
269 int asid = sev_get_asid(kvm);
272 /* activate ASID on the given handle */
273 activate.handle = handle;
274 activate.asid = asid;
275 ret = sev_guest_activate(&activate, error);
280 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
289 ret = sev_issue_cmd_external_user(f.file, id, data, error);
295 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
297 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
299 return __sev_issue_cmd(sev->fd, id, data, error);
302 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
304 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
305 struct sev_data_launch_start start;
306 struct kvm_sev_launch_start params;
307 void *dh_blob, *session_blob;
308 int *error = &argp->error;
314 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
317 memset(&start, 0, sizeof(start));
320 if (params.dh_uaddr) {
321 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
323 return PTR_ERR(dh_blob);
325 start.dh_cert_address = __sme_set(__pa(dh_blob));
326 start.dh_cert_len = params.dh_len;
330 if (params.session_uaddr) {
331 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
332 if (IS_ERR(session_blob)) {
333 ret = PTR_ERR(session_blob);
337 start.session_address = __sme_set(__pa(session_blob));
338 start.session_len = params.session_len;
341 start.handle = params.handle;
342 start.policy = params.policy;
344 /* create memory encryption context */
345 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
349 /* Bind ASID to this guest */
350 ret = sev_bind_asid(kvm, start.handle, error);
352 sev_decommission(start.handle);
356 /* return handle to userspace */
357 params.handle = start.handle;
358 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
359 sev_unbind_asid(kvm, start.handle);
364 sev->handle = start.handle;
365 sev->fd = argp->sev_fd;
374 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
375 unsigned long ulen, unsigned long *n,
378 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
379 unsigned long npages, size;
381 unsigned long locked, lock_limit;
383 unsigned long first, last;
386 lockdep_assert_held(&kvm->lock);
388 if (ulen == 0 || uaddr + ulen < uaddr)
389 return ERR_PTR(-EINVAL);
391 /* Calculate number of pages. */
392 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
393 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
394 npages = (last - first + 1);
396 locked = sev->pages_locked + npages;
397 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
398 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
399 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
400 return ERR_PTR(-ENOMEM);
403 if (WARN_ON_ONCE(npages > INT_MAX))
404 return ERR_PTR(-EINVAL);
406 /* Avoid using vmalloc for smaller buffers. */
407 size = npages * sizeof(struct page *);
408 if (size > PAGE_SIZE)
409 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
411 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
414 return ERR_PTR(-ENOMEM);
416 /* Pin the user virtual address. */
417 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
418 if (npinned != npages) {
419 pr_err("SEV: Failure locking %lu pages.\n", npages);
425 sev->pages_locked = locked;
431 unpin_user_pages(pages, npinned);
437 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
438 unsigned long npages)
440 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
442 unpin_user_pages(pages, npages);
444 sev->pages_locked -= npages;
447 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
449 uint8_t *page_virtual;
452 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
456 for (i = 0; i < npages; i++) {
457 page_virtual = kmap_atomic(pages[i]);
458 clflush_cache_range(page_virtual, PAGE_SIZE);
459 kunmap_atomic(page_virtual);
463 static unsigned long get_num_contig_pages(unsigned long idx,
464 struct page **inpages, unsigned long npages)
466 unsigned long paddr, next_paddr;
467 unsigned long i = idx + 1, pages = 1;
469 /* find the number of contiguous pages starting from idx */
470 paddr = __sme_page_pa(inpages[idx]);
472 next_paddr = __sme_page_pa(inpages[i++]);
473 if ((paddr + PAGE_SIZE) == next_paddr) {
484 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
486 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
487 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
488 struct kvm_sev_launch_update_data params;
489 struct sev_data_launch_update_data data;
490 struct page **inpages;
496 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
499 vaddr = params.uaddr;
501 vaddr_end = vaddr + size;
503 /* Lock the user memory. */
504 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
506 return PTR_ERR(inpages);
509 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
510 * place; the cache may contain the data that was written unencrypted.
512 sev_clflush_pages(inpages, npages);
515 data.handle = sev->handle;
517 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
521 * If the user buffer is not page-aligned, calculate the offset
524 offset = vaddr & (PAGE_SIZE - 1);
526 /* Calculate the number of pages that can be encrypted in one go. */
527 pages = get_num_contig_pages(i, inpages, npages);
529 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
532 data.address = __sme_page_pa(inpages[i]) + offset;
533 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
538 next_vaddr = vaddr + len;
542 /* content of memory is updated, mark pages dirty */
543 for (i = 0; i < npages; i++) {
544 set_page_dirty_lock(inpages[i]);
545 mark_page_accessed(inpages[i]);
547 /* unlock the user pages */
548 sev_unpin_memory(kvm, inpages, npages);
552 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
554 struct vmcb_save_area *save = &svm->vmcb->save;
556 /* Check some debug related fields before encrypting the VMSA */
557 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
560 /* Sync registgers */
561 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
562 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
563 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
564 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
565 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
566 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
567 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
568 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
570 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
571 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
572 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
573 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
574 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
575 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
576 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
577 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
579 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
581 /* Sync some non-GPR registers before encrypting */
582 save->xcr0 = svm->vcpu.arch.xcr0;
583 save->pkru = svm->vcpu.arch.pkru;
584 save->xss = svm->vcpu.arch.ia32_xss;
585 save->dr6 = svm->vcpu.arch.dr6;
588 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
589 * the traditional VMSA that is part of the VMCB. Copy the
590 * traditional VMSA as it has been built so far (in prep
591 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
593 memcpy(svm->vmsa, save, sizeof(*save));
598 static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
601 struct sev_data_launch_update_vmsa vmsa;
602 struct vcpu_svm *svm = to_svm(vcpu);
605 /* Perform some pre-encryption checks against the VMSA */
606 ret = sev_es_sync_vmsa(svm);
611 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
612 * the VMSA memory content (i.e it will write the same memory region
613 * with the guest's key), so invalidate it first.
615 clflush_cache_range(svm->vmsa, PAGE_SIZE);
618 vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
619 vmsa.address = __sme_pa(svm->vmsa);
620 vmsa.len = PAGE_SIZE;
621 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
624 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
626 struct kvm_vcpu *vcpu;
629 if (!sev_es_guest(kvm))
632 kvm_for_each_vcpu(i, vcpu, kvm) {
633 ret = mutex_lock_killable(&vcpu->mutex);
637 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
639 mutex_unlock(&vcpu->mutex);
647 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
649 void __user *measure = (void __user *)(uintptr_t)argp->data;
650 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
651 struct sev_data_launch_measure data;
652 struct kvm_sev_launch_measure params;
653 void __user *p = NULL;
660 if (copy_from_user(¶ms, measure, sizeof(params)))
663 memset(&data, 0, sizeof(data));
665 /* User wants to query the blob length */
669 p = (void __user *)(uintptr_t)params.uaddr;
671 if (params.len > SEV_FW_BLOB_MAX_SIZE)
674 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
678 data.address = __psp_pa(blob);
679 data.len = params.len;
683 data.handle = sev->handle;
684 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
687 * If we query the session length, FW responded with expected data.
696 if (copy_to_user(p, blob, params.len))
701 params.len = data.len;
702 if (copy_to_user(measure, ¶ms, sizeof(params)))
709 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
711 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
712 struct sev_data_launch_finish data;
717 data.handle = sev->handle;
718 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
721 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
723 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
724 struct kvm_sev_guest_status params;
725 struct sev_data_guest_status data;
731 memset(&data, 0, sizeof(data));
733 data.handle = sev->handle;
734 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
738 params.policy = data.policy;
739 params.state = data.state;
740 params.handle = data.handle;
742 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
748 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
749 unsigned long dst, int size,
750 int *error, bool enc)
752 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
753 struct sev_data_dbg data;
756 data.handle = sev->handle;
761 return sev_issue_cmd(kvm,
762 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
766 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
767 unsigned long dst_paddr, int sz, int *err)
772 * Its safe to read more than we are asked, caller should ensure that
773 * destination has enough space.
775 offset = src_paddr & 15;
776 src_paddr = round_down(src_paddr, 16);
777 sz = round_up(sz + offset, 16);
779 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
782 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
783 void __user *dst_uaddr,
784 unsigned long dst_paddr,
787 struct page *tpage = NULL;
790 /* if inputs are not 16-byte then use intermediate buffer */
791 if (!IS_ALIGNED(dst_paddr, 16) ||
792 !IS_ALIGNED(paddr, 16) ||
793 !IS_ALIGNED(size, 16)) {
794 tpage = (void *)alloc_page(GFP_KERNEL);
798 dst_paddr = __sme_page_pa(tpage);
801 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
807 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
818 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
820 unsigned long dst_paddr,
821 void __user *dst_vaddr,
822 int size, int *error)
824 struct page *src_tpage = NULL;
825 struct page *dst_tpage = NULL;
828 /* If source buffer is not aligned then use an intermediate buffer */
829 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
830 src_tpage = alloc_page(GFP_KERNEL);
834 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
835 __free_page(src_tpage);
839 paddr = __sme_page_pa(src_tpage);
843 * If destination buffer or length is not aligned then do read-modify-write:
844 * - decrypt destination in an intermediate buffer
845 * - copy the source buffer in an intermediate buffer
846 * - use the intermediate buffer as source buffer
848 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
851 dst_tpage = alloc_page(GFP_KERNEL);
857 ret = __sev_dbg_decrypt(kvm, dst_paddr,
858 __sme_page_pa(dst_tpage), size, error);
863 * If source is kernel buffer then use memcpy() otherwise
866 dst_offset = dst_paddr & 15;
869 memcpy(page_address(dst_tpage) + dst_offset,
870 page_address(src_tpage), size);
872 if (copy_from_user(page_address(dst_tpage) + dst_offset,
879 paddr = __sme_page_pa(dst_tpage);
880 dst_paddr = round_down(dst_paddr, 16);
881 len = round_up(size, 16);
884 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
888 __free_page(src_tpage);
890 __free_page(dst_tpage);
894 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
896 unsigned long vaddr, vaddr_end, next_vaddr;
897 unsigned long dst_vaddr;
898 struct page **src_p, **dst_p;
899 struct kvm_sev_dbg debug;
907 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
910 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
912 if (!debug.dst_uaddr)
915 vaddr = debug.src_uaddr;
917 vaddr_end = vaddr + size;
918 dst_vaddr = debug.dst_uaddr;
920 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
921 int len, s_off, d_off;
923 /* lock userspace source and destination page */
924 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
926 return PTR_ERR(src_p);
928 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
930 sev_unpin_memory(kvm, src_p, n);
931 return PTR_ERR(dst_p);
935 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
936 * the pages; flush the destination too so that future accesses do not
939 sev_clflush_pages(src_p, 1);
940 sev_clflush_pages(dst_p, 1);
943 * Since user buffer may not be page aligned, calculate the
944 * offset within the page.
946 s_off = vaddr & ~PAGE_MASK;
947 d_off = dst_vaddr & ~PAGE_MASK;
948 len = min_t(size_t, (PAGE_SIZE - s_off), size);
951 ret = __sev_dbg_decrypt_user(kvm,
952 __sme_page_pa(src_p[0]) + s_off,
953 (void __user *)dst_vaddr,
954 __sme_page_pa(dst_p[0]) + d_off,
957 ret = __sev_dbg_encrypt_user(kvm,
958 __sme_page_pa(src_p[0]) + s_off,
959 (void __user *)vaddr,
960 __sme_page_pa(dst_p[0]) + d_off,
961 (void __user *)dst_vaddr,
964 sev_unpin_memory(kvm, src_p, n);
965 sev_unpin_memory(kvm, dst_p, n);
970 next_vaddr = vaddr + len;
971 dst_vaddr = dst_vaddr + len;
978 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
980 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
981 struct sev_data_launch_secret data;
982 struct kvm_sev_launch_secret params;
991 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
994 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
996 return PTR_ERR(pages);
999 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1000 * place; the cache may contain the data that was written unencrypted.
1002 sev_clflush_pages(pages, n);
1005 * The secret must be copied into contiguous memory region, lets verify
1006 * that userspace memory pages are contiguous before we issue command.
1008 if (get_num_contig_pages(0, pages, n) != n) {
1010 goto e_unpin_memory;
1013 memset(&data, 0, sizeof(data));
1015 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1016 data.guest_address = __sme_page_pa(pages[0]) + offset;
1017 data.guest_len = params.guest_len;
1019 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1021 ret = PTR_ERR(blob);
1022 goto e_unpin_memory;
1025 data.trans_address = __psp_pa(blob);
1026 data.trans_len = params.trans_len;
1028 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1033 data.hdr_address = __psp_pa(hdr);
1034 data.hdr_len = params.hdr_len;
1036 data.handle = sev->handle;
1037 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1044 /* content of memory is updated, mark pages dirty */
1045 for (i = 0; i < n; i++) {
1046 set_page_dirty_lock(pages[i]);
1047 mark_page_accessed(pages[i]);
1049 sev_unpin_memory(kvm, pages, n);
1053 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1055 void __user *report = (void __user *)(uintptr_t)argp->data;
1056 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1057 struct sev_data_attestation_report data;
1058 struct kvm_sev_attestation_report params;
1063 if (!sev_guest(kvm))
1066 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1069 memset(&data, 0, sizeof(data));
1071 /* User wants to query the blob length */
1075 p = (void __user *)(uintptr_t)params.uaddr;
1077 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1080 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
1084 data.address = __psp_pa(blob);
1085 data.len = params.len;
1086 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1089 data.handle = sev->handle;
1090 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1092 * If we query the session length, FW responded with expected data.
1101 if (copy_to_user(p, blob, params.len))
1106 params.len = data.len;
1107 if (copy_to_user(report, ¶ms, sizeof(params)))
1114 /* Userspace wants to query session length. */
1116 __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1117 struct kvm_sev_send_start *params)
1119 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1120 struct sev_data_send_start data;
1123 memset(&data, 0, sizeof(data));
1124 data.handle = sev->handle;
1125 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1127 params->session_len = data.session_len;
1128 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1129 sizeof(struct kvm_sev_send_start)))
1135 static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1137 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1138 struct sev_data_send_start data;
1139 struct kvm_sev_send_start params;
1140 void *amd_certs, *session_data;
1141 void *pdh_cert, *plat_certs;
1144 if (!sev_guest(kvm))
1147 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1148 sizeof(struct kvm_sev_send_start)))
1151 /* if session_len is zero, userspace wants to query the session length */
1152 if (!params.session_len)
1153 return __sev_send_start_query_session_length(kvm, argp,
1156 /* some sanity checks */
1157 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1158 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1161 /* allocate the memory to hold the session data blob */
1162 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1166 /* copy the certificate blobs from userspace */
1167 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1168 params.pdh_cert_len);
1169 if (IS_ERR(pdh_cert)) {
1170 ret = PTR_ERR(pdh_cert);
1171 goto e_free_session;
1174 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1175 params.plat_certs_len);
1176 if (IS_ERR(plat_certs)) {
1177 ret = PTR_ERR(plat_certs);
1181 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1182 params.amd_certs_len);
1183 if (IS_ERR(amd_certs)) {
1184 ret = PTR_ERR(amd_certs);
1185 goto e_free_plat_cert;
1188 /* populate the FW SEND_START field with system physical address */
1189 memset(&data, 0, sizeof(data));
1190 data.pdh_cert_address = __psp_pa(pdh_cert);
1191 data.pdh_cert_len = params.pdh_cert_len;
1192 data.plat_certs_address = __psp_pa(plat_certs);
1193 data.plat_certs_len = params.plat_certs_len;
1194 data.amd_certs_address = __psp_pa(amd_certs);
1195 data.amd_certs_len = params.amd_certs_len;
1196 data.session_address = __psp_pa(session_data);
1197 data.session_len = params.session_len;
1198 data.handle = sev->handle;
1200 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1202 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1203 session_data, params.session_len)) {
1205 goto e_free_amd_cert;
1208 params.policy = data.policy;
1209 params.session_len = data.session_len;
1210 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms,
1211 sizeof(struct kvm_sev_send_start)))
1221 kfree(session_data);
1225 /* Userspace wants to query either header or trans length. */
1227 __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1228 struct kvm_sev_send_update_data *params)
1230 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1231 struct sev_data_send_update_data data;
1234 memset(&data, 0, sizeof(data));
1235 data.handle = sev->handle;
1236 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1238 params->hdr_len = data.hdr_len;
1239 params->trans_len = data.trans_len;
1241 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1242 sizeof(struct kvm_sev_send_update_data)))
1248 static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1250 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1251 struct sev_data_send_update_data data;
1252 struct kvm_sev_send_update_data params;
1253 void *hdr, *trans_data;
1254 struct page **guest_page;
1258 if (!sev_guest(kvm))
1261 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1262 sizeof(struct kvm_sev_send_update_data)))
1265 /* userspace wants to query either header or trans length */
1266 if (!params.trans_len || !params.hdr_len)
1267 return __sev_send_update_data_query_lengths(kvm, argp, ¶ms);
1269 if (!params.trans_uaddr || !params.guest_uaddr ||
1270 !params.guest_len || !params.hdr_uaddr)
1273 /* Check if we are crossing the page boundary */
1274 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1275 if ((params.guest_len + offset > PAGE_SIZE))
1278 /* Pin guest memory */
1279 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1281 if (IS_ERR(guest_page))
1282 return PTR_ERR(guest_page);
1284 /* allocate memory for header and transport buffer */
1286 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1290 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1294 memset(&data, 0, sizeof(data));
1295 data.hdr_address = __psp_pa(hdr);
1296 data.hdr_len = params.hdr_len;
1297 data.trans_address = __psp_pa(trans_data);
1298 data.trans_len = params.trans_len;
1300 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1301 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1302 data.guest_address |= sev_me_mask;
1303 data.guest_len = params.guest_len;
1304 data.handle = sev->handle;
1306 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1309 goto e_free_trans_data;
1311 /* copy transport buffer to user space */
1312 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1313 trans_data, params.trans_len)) {
1315 goto e_free_trans_data;
1318 /* Copy packet header to userspace. */
1319 if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1328 sev_unpin_memory(kvm, guest_page, n);
1333 static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1335 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1336 struct sev_data_send_finish data;
1338 if (!sev_guest(kvm))
1341 data.handle = sev->handle;
1342 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1345 static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1347 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1348 struct sev_data_send_cancel data;
1350 if (!sev_guest(kvm))
1353 data.handle = sev->handle;
1354 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1357 static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1359 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1360 struct sev_data_receive_start start;
1361 struct kvm_sev_receive_start params;
1362 int *error = &argp->error;
1367 if (!sev_guest(kvm))
1370 /* Get parameter from the userspace */
1371 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1372 sizeof(struct kvm_sev_receive_start)))
1375 /* some sanity checks */
1376 if (!params.pdh_uaddr || !params.pdh_len ||
1377 !params.session_uaddr || !params.session_len)
1380 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1381 if (IS_ERR(pdh_data))
1382 return PTR_ERR(pdh_data);
1384 session_data = psp_copy_user_blob(params.session_uaddr,
1385 params.session_len);
1386 if (IS_ERR(session_data)) {
1387 ret = PTR_ERR(session_data);
1391 memset(&start, 0, sizeof(start));
1392 start.handle = params.handle;
1393 start.policy = params.policy;
1394 start.pdh_cert_address = __psp_pa(pdh_data);
1395 start.pdh_cert_len = params.pdh_len;
1396 start.session_address = __psp_pa(session_data);
1397 start.session_len = params.session_len;
1399 /* create memory encryption context */
1400 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1403 goto e_free_session;
1405 /* Bind ASID to this guest */
1406 ret = sev_bind_asid(kvm, start.handle, error);
1408 sev_decommission(start.handle);
1409 goto e_free_session;
1412 params.handle = start.handle;
1413 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1414 ¶ms, sizeof(struct kvm_sev_receive_start))) {
1416 sev_unbind_asid(kvm, start.handle);
1417 goto e_free_session;
1420 sev->handle = start.handle;
1421 sev->fd = argp->sev_fd;
1424 kfree(session_data);
1431 static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1433 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1434 struct kvm_sev_receive_update_data params;
1435 struct sev_data_receive_update_data data;
1436 void *hdr = NULL, *trans = NULL;
1437 struct page **guest_page;
1441 if (!sev_guest(kvm))
1444 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1445 sizeof(struct kvm_sev_receive_update_data)))
1448 if (!params.hdr_uaddr || !params.hdr_len ||
1449 !params.guest_uaddr || !params.guest_len ||
1450 !params.trans_uaddr || !params.trans_len)
1453 /* Check if we are crossing the page boundary */
1454 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1455 if ((params.guest_len + offset > PAGE_SIZE))
1458 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1460 return PTR_ERR(hdr);
1462 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1463 if (IS_ERR(trans)) {
1464 ret = PTR_ERR(trans);
1468 memset(&data, 0, sizeof(data));
1469 data.hdr_address = __psp_pa(hdr);
1470 data.hdr_len = params.hdr_len;
1471 data.trans_address = __psp_pa(trans);
1472 data.trans_len = params.trans_len;
1474 /* Pin guest memory */
1475 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1477 if (IS_ERR(guest_page)) {
1478 ret = PTR_ERR(guest_page);
1482 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1483 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1484 data.guest_address |= sev_me_mask;
1485 data.guest_len = params.guest_len;
1486 data.handle = sev->handle;
1488 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1491 sev_unpin_memory(kvm, guest_page, n);
1501 static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1503 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1504 struct sev_data_receive_finish data;
1506 if (!sev_guest(kvm))
1509 data.handle = sev->handle;
1510 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1513 static bool cmd_allowed_from_miror(u32 cmd_id)
1516 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1517 * active mirror VMs. Also allow the debugging and status commands.
1519 if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1520 cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1521 cmd_id == KVM_SEV_DBG_ENCRYPT)
1527 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1529 struct kvm_sev_cmd sev_cmd;
1538 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1541 mutex_lock(&kvm->lock);
1543 /* Only the enc_context_owner handles some memory enc operations. */
1544 if (is_mirroring_enc_context(kvm) &&
1545 !cmd_allowed_from_miror(sev_cmd.id)) {
1550 switch (sev_cmd.id) {
1551 case KVM_SEV_ES_INIT:
1552 if (!sev_es_enabled) {
1558 r = sev_guest_init(kvm, &sev_cmd);
1560 case KVM_SEV_LAUNCH_START:
1561 r = sev_launch_start(kvm, &sev_cmd);
1563 case KVM_SEV_LAUNCH_UPDATE_DATA:
1564 r = sev_launch_update_data(kvm, &sev_cmd);
1566 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1567 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1569 case KVM_SEV_LAUNCH_MEASURE:
1570 r = sev_launch_measure(kvm, &sev_cmd);
1572 case KVM_SEV_LAUNCH_FINISH:
1573 r = sev_launch_finish(kvm, &sev_cmd);
1575 case KVM_SEV_GUEST_STATUS:
1576 r = sev_guest_status(kvm, &sev_cmd);
1578 case KVM_SEV_DBG_DECRYPT:
1579 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1581 case KVM_SEV_DBG_ENCRYPT:
1582 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1584 case KVM_SEV_LAUNCH_SECRET:
1585 r = sev_launch_secret(kvm, &sev_cmd);
1587 case KVM_SEV_GET_ATTESTATION_REPORT:
1588 r = sev_get_attestation_report(kvm, &sev_cmd);
1590 case KVM_SEV_SEND_START:
1591 r = sev_send_start(kvm, &sev_cmd);
1593 case KVM_SEV_SEND_UPDATE_DATA:
1594 r = sev_send_update_data(kvm, &sev_cmd);
1596 case KVM_SEV_SEND_FINISH:
1597 r = sev_send_finish(kvm, &sev_cmd);
1599 case KVM_SEV_SEND_CANCEL:
1600 r = sev_send_cancel(kvm, &sev_cmd);
1602 case KVM_SEV_RECEIVE_START:
1603 r = sev_receive_start(kvm, &sev_cmd);
1605 case KVM_SEV_RECEIVE_UPDATE_DATA:
1606 r = sev_receive_update_data(kvm, &sev_cmd);
1608 case KVM_SEV_RECEIVE_FINISH:
1609 r = sev_receive_finish(kvm, &sev_cmd);
1616 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1620 mutex_unlock(&kvm->lock);
1624 int svm_register_enc_region(struct kvm *kvm,
1625 struct kvm_enc_region *range)
1627 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1628 struct enc_region *region;
1631 if (!sev_guest(kvm))
1634 /* If kvm is mirroring encryption context it isn't responsible for it */
1635 if (is_mirroring_enc_context(kvm))
1638 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1641 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1645 mutex_lock(&kvm->lock);
1646 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1647 if (IS_ERR(region->pages)) {
1648 ret = PTR_ERR(region->pages);
1649 mutex_unlock(&kvm->lock);
1653 region->uaddr = range->addr;
1654 region->size = range->size;
1656 list_add_tail(®ion->list, &sev->regions_list);
1657 mutex_unlock(&kvm->lock);
1660 * The guest may change the memory encryption attribute from C=0 -> C=1
1661 * or vice versa for this memory range. Lets make sure caches are
1662 * flushed to ensure that guest data gets written into memory with
1665 sev_clflush_pages(region->pages, region->npages);
1674 static struct enc_region *
1675 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1677 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1678 struct list_head *head = &sev->regions_list;
1679 struct enc_region *i;
1681 list_for_each_entry(i, head, list) {
1682 if (i->uaddr == range->addr &&
1683 i->size == range->size)
1690 static void __unregister_enc_region_locked(struct kvm *kvm,
1691 struct enc_region *region)
1693 sev_unpin_memory(kvm, region->pages, region->npages);
1694 list_del(®ion->list);
1698 int svm_unregister_enc_region(struct kvm *kvm,
1699 struct kvm_enc_region *range)
1701 struct enc_region *region;
1704 /* If kvm is mirroring encryption context it isn't responsible for it */
1705 if (is_mirroring_enc_context(kvm))
1708 mutex_lock(&kvm->lock);
1710 if (!sev_guest(kvm)) {
1715 region = find_enc_region(kvm, range);
1722 * Ensure that all guest tagged cache entries are flushed before
1723 * releasing the pages back to the system for use. CLFLUSH will
1724 * not do this, so issue a WBINVD.
1726 wbinvd_on_all_cpus();
1728 __unregister_enc_region_locked(kvm, region);
1730 mutex_unlock(&kvm->lock);
1734 mutex_unlock(&kvm->lock);
1738 int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1740 struct file *source_kvm_file;
1741 struct kvm *source_kvm;
1742 struct kvm_sev_info source_sev, *mirror_sev;
1745 source_kvm_file = fget(source_fd);
1746 if (!file_is_kvm(source_kvm_file)) {
1751 source_kvm = source_kvm_file->private_data;
1752 mutex_lock(&source_kvm->lock);
1754 if (!sev_guest(source_kvm)) {
1756 goto e_source_unlock;
1759 /* Mirrors of mirrors should work, but let's not get silly */
1760 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
1762 goto e_source_unlock;
1765 memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
1766 sizeof(source_sev));
1769 * The mirror kvm holds an enc_context_owner ref so its asid can't
1770 * disappear until we're done with it
1772 kvm_get_kvm(source_kvm);
1774 fput(source_kvm_file);
1775 mutex_unlock(&source_kvm->lock);
1776 mutex_lock(&kvm->lock);
1778 if (sev_guest(kvm)) {
1780 goto e_mirror_unlock;
1783 /* Set enc_context_owner and copy its encryption context over */
1784 mirror_sev = &to_kvm_svm(kvm)->sev_info;
1785 mirror_sev->enc_context_owner = source_kvm;
1786 mirror_sev->active = true;
1787 mirror_sev->asid = source_sev.asid;
1788 mirror_sev->fd = source_sev.fd;
1789 mirror_sev->es_active = source_sev.es_active;
1790 mirror_sev->handle = source_sev.handle;
1792 * Do not copy ap_jump_table. Since the mirror does not share the same
1793 * KVM contexts as the original, and they may have different
1797 mutex_unlock(&kvm->lock);
1801 mutex_unlock(&kvm->lock);
1802 kvm_put_kvm(source_kvm);
1805 mutex_unlock(&source_kvm->lock);
1807 if (source_kvm_file)
1808 fput(source_kvm_file);
1812 void sev_vm_destroy(struct kvm *kvm)
1814 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1815 struct list_head *head = &sev->regions_list;
1816 struct list_head *pos, *q;
1818 if (!sev_guest(kvm))
1821 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
1822 if (is_mirroring_enc_context(kvm)) {
1823 kvm_put_kvm(sev->enc_context_owner);
1827 mutex_lock(&kvm->lock);
1830 * Ensure that all guest tagged cache entries are flushed before
1831 * releasing the pages back to the system for use. CLFLUSH will
1832 * not do this, so issue a WBINVD.
1834 wbinvd_on_all_cpus();
1837 * if userspace was terminated before unregistering the memory regions
1838 * then lets unpin all the registered memory.
1840 if (!list_empty(head)) {
1841 list_for_each_safe(pos, q, head) {
1842 __unregister_enc_region_locked(kvm,
1843 list_entry(pos, struct enc_region, list));
1848 mutex_unlock(&kvm->lock);
1850 sev_unbind_asid(kvm, sev->handle);
1854 void __init sev_set_cpu_caps(void)
1857 kvm_cpu_cap_clear(X86_FEATURE_SEV);
1858 if (!sev_es_enabled)
1859 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
1862 void __init sev_hardware_setup(void)
1864 #ifdef CONFIG_KVM_AMD_SEV
1865 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
1866 bool sev_es_supported = false;
1867 bool sev_supported = false;
1869 if (!sev_enabled || !npt_enabled)
1872 /* Does the CPU support SEV? */
1873 if (!boot_cpu_has(X86_FEATURE_SEV))
1876 /* Retrieve SEV CPUID information */
1877 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1879 /* Set encryption bit location for SEV-ES guests */
1880 sev_enc_bit = ebx & 0x3f;
1882 /* Maximum number of encrypted guests supported simultaneously */
1887 /* Minimum ASID value that should be used for SEV guest */
1889 sev_me_mask = 1UL << (ebx & 0x3f);
1892 * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
1893 * even though it's never used, so that the bitmap is indexed by the
1896 nr_asids = max_sev_asid + 1;
1897 sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
1898 if (!sev_asid_bitmap)
1901 sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
1902 if (!sev_reclaim_asid_bitmap) {
1903 bitmap_free(sev_asid_bitmap);
1904 sev_asid_bitmap = NULL;
1908 sev_asid_count = max_sev_asid - min_sev_asid + 1;
1909 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
1912 pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
1913 sev_supported = true;
1915 /* SEV-ES support requested? */
1916 if (!sev_es_enabled)
1919 /* Does the CPU support SEV-ES? */
1920 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1923 /* Has the system been allocated ASIDs for SEV-ES? */
1924 if (min_sev_asid == 1)
1927 sev_es_asid_count = min_sev_asid - 1;
1928 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
1931 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
1932 sev_es_supported = true;
1935 sev_enabled = sev_supported;
1936 sev_es_enabled = sev_es_supported;
1940 void sev_hardware_teardown(void)
1945 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
1946 sev_flush_asids(1, max_sev_asid);
1948 bitmap_free(sev_asid_bitmap);
1949 bitmap_free(sev_reclaim_asid_bitmap);
1951 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
1952 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
1955 int sev_cpu_init(struct svm_cpu_data *sd)
1960 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
1968 * Pages used by hardware to hold guest encrypted state must be flushed before
1969 * returning them to the system.
1971 static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1975 * If hardware enforced cache coherency for encrypted mappings of the
1976 * same physical page is supported, nothing to do.
1978 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1982 * If the VM Page Flush MSR is supported, use it to flush the page
1983 * (using the page virtual address and the guest ASID).
1985 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1986 struct kvm_sev_info *sev;
1987 unsigned long va_start;
1990 /* Align start and stop to page boundaries. */
1991 va_start = (unsigned long)va;
1992 start = (u64)va_start & PAGE_MASK;
1993 stop = PAGE_ALIGN((u64)va_start + len);
1996 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1998 while (start < stop) {
1999 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
2008 WARN(1, "Address overflow, using WBINVD\n");
2012 * Hardware should always have one of the above features,
2013 * but if not, use WBINVD and issue a warning.
2015 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
2016 wbinvd_on_all_cpus();
2019 void sev_free_vcpu(struct kvm_vcpu *vcpu)
2021 struct vcpu_svm *svm;
2023 if (!sev_es_guest(vcpu->kvm))
2028 if (vcpu->arch.guest_state_protected)
2029 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
2030 __free_page(virt_to_page(svm->vmsa));
2032 if (svm->ghcb_sa_free)
2033 kfree(svm->ghcb_sa);
2036 static void dump_ghcb(struct vcpu_svm *svm)
2038 struct ghcb *ghcb = svm->ghcb;
2041 /* Re-use the dump_invalid_vmcb module parameter */
2042 if (!dump_invalid_vmcb) {
2043 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2047 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2049 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2050 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2051 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2052 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2053 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2054 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2055 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2056 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2057 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2058 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2061 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2063 struct kvm_vcpu *vcpu = &svm->vcpu;
2064 struct ghcb *ghcb = svm->ghcb;
2067 * The GHCB protocol so far allows for the following data
2069 * GPRs RAX, RBX, RCX, RDX
2071 * Copy their values, even if they may not have been written during the
2072 * VM-Exit. It's the guest's responsibility to not consume random data.
2074 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2075 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2076 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2077 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2080 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2082 struct vmcb_control_area *control = &svm->vmcb->control;
2083 struct kvm_vcpu *vcpu = &svm->vcpu;
2084 struct ghcb *ghcb = svm->ghcb;
2088 * The GHCB protocol so far allows for the following data
2090 * GPRs RAX, RBX, RCX, RDX
2094 * VMMCALL allows the guest to provide extra registers. KVM also
2095 * expects RSI for hypercalls, so include that, too.
2097 * Copy their values to the appropriate location if supplied.
2099 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2101 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
2102 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
2103 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
2104 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
2105 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
2107 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
2109 if (ghcb_xcr0_is_valid(ghcb)) {
2110 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2111 kvm_update_cpuid_runtime(vcpu);
2114 /* Copy the GHCB exit information into the VMCB fields */
2115 exit_code = ghcb_get_sw_exit_code(ghcb);
2116 control->exit_code = lower_32_bits(exit_code);
2117 control->exit_code_hi = upper_32_bits(exit_code);
2118 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2119 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2121 /* Clear the valid entries fields */
2122 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2125 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2127 struct kvm_vcpu *vcpu;
2133 /* Only GHCB Usage code 0 is supported */
2134 if (ghcb->ghcb_usage)
2138 * Retrieve the exit code now even though is may not be marked valid
2139 * as it could help with debugging.
2141 exit_code = ghcb_get_sw_exit_code(ghcb);
2143 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2144 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2145 !ghcb_sw_exit_info_2_is_valid(ghcb))
2148 switch (ghcb_get_sw_exit_code(ghcb)) {
2149 case SVM_EXIT_READ_DR7:
2151 case SVM_EXIT_WRITE_DR7:
2152 if (!ghcb_rax_is_valid(ghcb))
2155 case SVM_EXIT_RDTSC:
2157 case SVM_EXIT_RDPMC:
2158 if (!ghcb_rcx_is_valid(ghcb))
2161 case SVM_EXIT_CPUID:
2162 if (!ghcb_rax_is_valid(ghcb) ||
2163 !ghcb_rcx_is_valid(ghcb))
2165 if (ghcb_get_rax(ghcb) == 0xd)
2166 if (!ghcb_xcr0_is_valid(ghcb))
2172 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2173 if (!ghcb_sw_scratch_is_valid(ghcb))
2176 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2177 if (!ghcb_rax_is_valid(ghcb))
2182 if (!ghcb_rcx_is_valid(ghcb))
2184 if (ghcb_get_sw_exit_info_1(ghcb)) {
2185 if (!ghcb_rax_is_valid(ghcb) ||
2186 !ghcb_rdx_is_valid(ghcb))
2190 case SVM_EXIT_VMMCALL:
2191 if (!ghcb_rax_is_valid(ghcb) ||
2192 !ghcb_cpl_is_valid(ghcb))
2195 case SVM_EXIT_RDTSCP:
2197 case SVM_EXIT_WBINVD:
2199 case SVM_EXIT_MONITOR:
2200 if (!ghcb_rax_is_valid(ghcb) ||
2201 !ghcb_rcx_is_valid(ghcb) ||
2202 !ghcb_rdx_is_valid(ghcb))
2205 case SVM_EXIT_MWAIT:
2206 if (!ghcb_rax_is_valid(ghcb) ||
2207 !ghcb_rcx_is_valid(ghcb))
2210 case SVM_VMGEXIT_MMIO_READ:
2211 case SVM_VMGEXIT_MMIO_WRITE:
2212 if (!ghcb_sw_scratch_is_valid(ghcb))
2215 case SVM_VMGEXIT_NMI_COMPLETE:
2216 case SVM_VMGEXIT_AP_HLT_LOOP:
2217 case SVM_VMGEXIT_AP_JUMP_TABLE:
2218 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2229 if (ghcb->ghcb_usage) {
2230 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2233 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
2238 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2239 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2240 vcpu->run->internal.ndata = 2;
2241 vcpu->run->internal.data[0] = exit_code;
2242 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2247 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2252 if (svm->ghcb_sa_free) {
2254 * The scratch area lives outside the GHCB, so there is a
2255 * buffer that, depending on the operation performed, may
2256 * need to be synced, then freed.
2258 if (svm->ghcb_sa_sync) {
2259 kvm_write_guest(svm->vcpu.kvm,
2260 ghcb_get_sw_scratch(svm->ghcb),
2261 svm->ghcb_sa, svm->ghcb_sa_len);
2262 svm->ghcb_sa_sync = false;
2265 kfree(svm->ghcb_sa);
2266 svm->ghcb_sa = NULL;
2267 svm->ghcb_sa_free = false;
2270 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
2272 sev_es_sync_to_ghcb(svm);
2274 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
2278 void pre_sev_run(struct vcpu_svm *svm, int cpu)
2280 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2281 int asid = sev_get_asid(svm->vcpu.kvm);
2283 /* Assign the asid allocated with this SEV guest */
2289 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2290 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2292 if (sd->sev_vmcbs[asid] == svm->vmcb &&
2293 svm->vcpu.arch.last_vmentry_cpu == cpu)
2296 sd->sev_vmcbs[asid] = svm->vmcb;
2297 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2298 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2301 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2302 static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2304 struct vmcb_control_area *control = &svm->vmcb->control;
2305 struct ghcb *ghcb = svm->ghcb;
2306 u64 ghcb_scratch_beg, ghcb_scratch_end;
2307 u64 scratch_gpa_beg, scratch_gpa_end;
2310 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2311 if (!scratch_gpa_beg) {
2312 pr_err("vmgexit: scratch gpa not provided\n");
2316 scratch_gpa_end = scratch_gpa_beg + len;
2317 if (scratch_gpa_end < scratch_gpa_beg) {
2318 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2319 len, scratch_gpa_beg);
2323 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2324 /* Scratch area begins within GHCB */
2325 ghcb_scratch_beg = control->ghcb_gpa +
2326 offsetof(struct ghcb, shared_buffer);
2327 ghcb_scratch_end = control->ghcb_gpa +
2328 offsetof(struct ghcb, reserved_1);
2331 * If the scratch area begins within the GHCB, it must be
2332 * completely contained in the GHCB shared buffer area.
2334 if (scratch_gpa_beg < ghcb_scratch_beg ||
2335 scratch_gpa_end > ghcb_scratch_end) {
2336 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2337 scratch_gpa_beg, scratch_gpa_end);
2341 scratch_va = (void *)svm->ghcb;
2342 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2345 * The guest memory must be read into a kernel buffer, so
2348 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2349 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2350 len, GHCB_SCRATCH_AREA_LIMIT);
2353 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
2357 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2358 /* Unable to copy scratch area from guest */
2359 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2366 * The scratch area is outside the GHCB. The operation will
2367 * dictate whether the buffer needs to be synced before running
2368 * the vCPU next time (i.e. a read was requested so the data
2369 * must be written back to the guest memory).
2371 svm->ghcb_sa_sync = sync;
2372 svm->ghcb_sa_free = true;
2375 svm->ghcb_sa = scratch_va;
2376 svm->ghcb_sa_len = len;
2381 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2384 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2385 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2388 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2390 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2393 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2395 svm->vmcb->control.ghcb_gpa = value;
2398 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2400 struct vmcb_control_area *control = &svm->vmcb->control;
2401 struct kvm_vcpu *vcpu = &svm->vcpu;
2405 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2407 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2410 switch (ghcb_info) {
2411 case GHCB_MSR_SEV_INFO_REQ:
2412 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2416 case GHCB_MSR_CPUID_REQ: {
2417 u64 cpuid_fn, cpuid_reg, cpuid_value;
2419 cpuid_fn = get_ghcb_msr_bits(svm,
2420 GHCB_MSR_CPUID_FUNC_MASK,
2421 GHCB_MSR_CPUID_FUNC_POS);
2423 /* Initialize the registers needed by the CPUID intercept */
2424 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2425 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2427 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
2433 cpuid_reg = get_ghcb_msr_bits(svm,
2434 GHCB_MSR_CPUID_REG_MASK,
2435 GHCB_MSR_CPUID_REG_POS);
2437 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2438 else if (cpuid_reg == 1)
2439 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2440 else if (cpuid_reg == 2)
2441 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2443 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2445 set_ghcb_msr_bits(svm, cpuid_value,
2446 GHCB_MSR_CPUID_VALUE_MASK,
2447 GHCB_MSR_CPUID_VALUE_POS);
2449 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2454 case GHCB_MSR_TERM_REQ: {
2455 u64 reason_set, reason_code;
2457 reason_set = get_ghcb_msr_bits(svm,
2458 GHCB_MSR_TERM_REASON_SET_MASK,
2459 GHCB_MSR_TERM_REASON_SET_POS);
2460 reason_code = get_ghcb_msr_bits(svm,
2461 GHCB_MSR_TERM_REASON_MASK,
2462 GHCB_MSR_TERM_REASON_POS);
2463 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2464 reason_set, reason_code);
2471 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2472 control->ghcb_gpa, ret);
2477 int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2479 struct vcpu_svm *svm = to_svm(vcpu);
2480 struct vmcb_control_area *control = &svm->vmcb->control;
2481 u64 ghcb_gpa, exit_code;
2485 /* Validate the GHCB */
2486 ghcb_gpa = control->ghcb_gpa;
2487 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2488 return sev_handle_vmgexit_msr_protocol(svm);
2491 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2495 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
2496 /* Unable to map GHCB from guest */
2497 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2502 svm->ghcb = svm->ghcb_map.hva;
2503 ghcb = svm->ghcb_map.hva;
2505 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
2507 exit_code = ghcb_get_sw_exit_code(ghcb);
2509 ret = sev_es_validate_vmgexit(svm);
2513 sev_es_sync_from_ghcb(svm);
2514 ghcb_set_sw_exit_info_1(ghcb, 0);
2515 ghcb_set_sw_exit_info_2(ghcb, 0);
2518 switch (exit_code) {
2519 case SVM_VMGEXIT_MMIO_READ:
2520 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
2523 ret = kvm_sev_es_mmio_read(vcpu,
2524 control->exit_info_1,
2525 control->exit_info_2,
2528 case SVM_VMGEXIT_MMIO_WRITE:
2529 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2532 ret = kvm_sev_es_mmio_write(vcpu,
2533 control->exit_info_1,
2534 control->exit_info_2,
2537 case SVM_VMGEXIT_NMI_COMPLETE:
2538 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
2540 case SVM_VMGEXIT_AP_HLT_LOOP:
2541 ret = kvm_emulate_ap_reset_hold(vcpu);
2543 case SVM_VMGEXIT_AP_JUMP_TABLE: {
2544 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
2546 switch (control->exit_info_1) {
2548 /* Set AP jump table address */
2549 sev->ap_jump_table = control->exit_info_2;
2552 /* Get AP jump table address */
2553 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2556 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2557 control->exit_info_1);
2558 ghcb_set_sw_exit_info_1(ghcb, 1);
2559 ghcb_set_sw_exit_info_2(ghcb,
2561 SVM_EVTINJ_TYPE_EXEPT |
2568 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2570 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2571 control->exit_info_1, control->exit_info_2);
2574 ret = svm_invoke_exit_handler(vcpu, exit_code);
2580 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2582 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
2585 return kvm_sev_es_string_io(&svm->vcpu, size, port,
2586 svm->ghcb_sa, svm->ghcb_sa_len, in);
2589 void sev_es_init_vmcb(struct vcpu_svm *svm)
2591 struct kvm_vcpu *vcpu = &svm->vcpu;
2593 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2594 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2597 * An SEV-ES guest requires a VMSA area that is a separate from the
2598 * VMCB page. Do not include the encryption mask on the VMSA physical
2599 * address since hardware will access it using the guest key.
2601 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2603 /* Can't intercept CR register access, HV can't modify CR registers */
2604 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2605 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2606 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2607 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2608 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2609 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2611 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2613 /* Track EFER/CR register changes */
2614 svm_set_intercept(svm, TRAP_EFER_WRITE);
2615 svm_set_intercept(svm, TRAP_CR0_WRITE);
2616 svm_set_intercept(svm, TRAP_CR4_WRITE);
2617 svm_set_intercept(svm, TRAP_CR8_WRITE);
2619 /* No support for enable_vmware_backdoor */
2620 clr_exception_intercept(svm, GP_VECTOR);
2622 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2623 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2625 /* Clear intercepts on selected MSRs */
2626 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2627 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2628 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2629 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2630 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2631 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2634 void sev_es_create_vcpu(struct vcpu_svm *svm)
2637 * Set the GHCB MSR value as per the GHCB specification when creating
2638 * a vCPU for an SEV-ES guest.
2640 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2645 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
2647 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2648 struct vmcb_save_area *hostsa;
2651 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2652 * of which one step is to perform a VMLOAD. Since hardware does not
2653 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2655 vmsave(__sme_page_pa(sd->save_area));
2657 /* XCR0 is restored on VMEXIT, save the current host value */
2658 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2659 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2661 /* PKRU is restored on VMEXIT, save the current host value */
2662 hostsa->pkru = read_pkru();
2664 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2665 hostsa->xss = host_xss;
2668 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2670 struct vcpu_svm *svm = to_svm(vcpu);
2672 /* First SIPI: Use the values as initially set by the VMM */
2673 if (!svm->received_first_sipi) {
2674 svm->received_first_sipi = true;
2679 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2680 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2686 ghcb_set_sw_exit_info_2(svm->ghcb, 1);