1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/misc_cgroup.h>
18 #include <linux/processor.h>
19 #include <linux/trace_events.h>
20 #include <asm/fpu/internal.h>
23 #include <asm/trapnr.h>
31 #define __ex(x) __kvm_handle_fault_on_reboot(x)
33 #ifndef CONFIG_KVM_AMD_SEV
35 * When this config is not defined, SEV feature is not supported and APIs in
36 * this file are not used but this file still gets compiled into the KVM AMD
39 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
40 * misc_res_type {} defined in linux/misc_cgroup.h.
42 * Below macros allow compilation to succeed.
44 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
45 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
48 #ifdef CONFIG_KVM_AMD_SEV
49 /* enable/disable SEV support */
50 static bool sev_enabled = true;
51 module_param_named(sev, sev_enabled, bool, 0444);
53 /* enable/disable SEV-ES support */
54 static bool sev_es_enabled = true;
55 module_param_named(sev_es, sev_es_enabled, bool, 0444);
57 #define sev_enabled false
58 #define sev_es_enabled false
59 #endif /* CONFIG_KVM_AMD_SEV */
61 static u8 sev_enc_bit;
62 static DECLARE_RWSEM(sev_deactivate_lock);
63 static DEFINE_MUTEX(sev_bitmap_lock);
64 unsigned int max_sev_asid;
65 static unsigned int min_sev_asid;
66 static unsigned long sev_me_mask;
67 static unsigned long *sev_asid_bitmap;
68 static unsigned long *sev_reclaim_asid_bitmap;
71 struct list_head list;
78 /* Called with the sev_bitmap_lock held, or on shutdown */
79 static int sev_flush_asids(int min_asid, int max_asid)
81 int ret, pos, error = 0;
83 /* Check if there are any ASIDs to reclaim before performing a flush */
84 pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid);
89 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
90 * so it must be guarded.
92 down_write(&sev_deactivate_lock);
95 ret = sev_guest_df_flush(&error);
97 up_write(&sev_deactivate_lock);
100 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
105 static inline bool is_mirroring_enc_context(struct kvm *kvm)
107 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
110 /* Must be called with the sev_bitmap_lock held */
111 static bool __sev_recycle_asids(int min_asid, int max_asid)
113 if (sev_flush_asids(min_asid, max_asid))
116 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
117 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
119 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
124 static int sev_asid_new(struct kvm_sev_info *sev)
126 int pos, min_asid, max_asid, ret;
128 enum misc_res_type type;
130 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
131 WARN_ON(sev->misc_cg);
132 sev->misc_cg = get_current_misc_cg();
133 ret = misc_cg_try_charge(type, sev->misc_cg, 1);
135 put_misc_cg(sev->misc_cg);
140 mutex_lock(&sev_bitmap_lock);
143 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
144 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
146 min_asid = sev->es_active ? 0 : min_sev_asid - 1;
147 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
149 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
150 if (pos >= max_asid) {
151 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
155 mutex_unlock(&sev_bitmap_lock);
160 __set_bit(pos, sev_asid_bitmap);
162 mutex_unlock(&sev_bitmap_lock);
166 misc_cg_uncharge(type, sev->misc_cg, 1);
167 put_misc_cg(sev->misc_cg);
172 static int sev_get_asid(struct kvm *kvm)
174 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
179 static void sev_asid_free(struct kvm_sev_info *sev)
181 struct svm_cpu_data *sd;
183 enum misc_res_type type;
185 mutex_lock(&sev_bitmap_lock);
188 __set_bit(pos, sev_reclaim_asid_bitmap);
190 for_each_possible_cpu(cpu) {
191 sd = per_cpu(svm_data, cpu);
192 sd->sev_vmcbs[pos] = NULL;
195 mutex_unlock(&sev_bitmap_lock);
197 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
198 misc_cg_uncharge(type, sev->misc_cg, 1);
199 put_misc_cg(sev->misc_cg);
203 static void sev_decommission(unsigned int handle)
205 struct sev_data_decommission decommission;
210 decommission.handle = handle;
211 sev_guest_decommission(&decommission, NULL);
214 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
216 struct sev_data_deactivate deactivate;
221 deactivate.handle = handle;
223 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
224 down_read(&sev_deactivate_lock);
225 sev_guest_deactivate(&deactivate, NULL);
226 up_read(&sev_deactivate_lock);
228 sev_decommission(handle);
231 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
233 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
234 bool es_active = argp->id == KVM_SEV_ES_INIT;
237 if (kvm->created_vcpus)
241 if (unlikely(sev->active))
244 sev->es_active = es_active;
245 asid = sev_asid_new(sev);
250 ret = sev_platform_init(&argp->error);
256 INIT_LIST_HEAD(&sev->regions_list);
264 sev->es_active = false;
268 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
270 struct sev_data_activate activate;
271 int asid = sev_get_asid(kvm);
274 /* activate ASID on the given handle */
275 activate.handle = handle;
276 activate.asid = asid;
277 ret = sev_guest_activate(&activate, error);
282 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
291 ret = sev_issue_cmd_external_user(f.file, id, data, error);
297 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
299 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
301 return __sev_issue_cmd(sev->fd, id, data, error);
304 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
306 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
307 struct sev_data_launch_start start;
308 struct kvm_sev_launch_start params;
309 void *dh_blob, *session_blob;
310 int *error = &argp->error;
316 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
319 memset(&start, 0, sizeof(start));
322 if (params.dh_uaddr) {
323 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
325 return PTR_ERR(dh_blob);
327 start.dh_cert_address = __sme_set(__pa(dh_blob));
328 start.dh_cert_len = params.dh_len;
332 if (params.session_uaddr) {
333 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
334 if (IS_ERR(session_blob)) {
335 ret = PTR_ERR(session_blob);
339 start.session_address = __sme_set(__pa(session_blob));
340 start.session_len = params.session_len;
343 start.handle = params.handle;
344 start.policy = params.policy;
346 /* create memory encryption context */
347 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
351 /* Bind ASID to this guest */
352 ret = sev_bind_asid(kvm, start.handle, error);
354 sev_decommission(start.handle);
358 /* return handle to userspace */
359 params.handle = start.handle;
360 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
361 sev_unbind_asid(kvm, start.handle);
366 sev->handle = start.handle;
367 sev->fd = argp->sev_fd;
376 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
377 unsigned long ulen, unsigned long *n,
380 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
381 unsigned long npages, size;
383 unsigned long locked, lock_limit;
385 unsigned long first, last;
388 lockdep_assert_held(&kvm->lock);
390 if (ulen == 0 || uaddr + ulen < uaddr)
391 return ERR_PTR(-EINVAL);
393 /* Calculate number of pages. */
394 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
395 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
396 npages = (last - first + 1);
398 locked = sev->pages_locked + npages;
399 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
400 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
401 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
402 return ERR_PTR(-ENOMEM);
405 if (WARN_ON_ONCE(npages > INT_MAX))
406 return ERR_PTR(-EINVAL);
408 /* Avoid using vmalloc for smaller buffers. */
409 size = npages * sizeof(struct page *);
410 if (size > PAGE_SIZE)
411 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
413 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
416 return ERR_PTR(-ENOMEM);
418 /* Pin the user virtual address. */
419 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
420 if (npinned != npages) {
421 pr_err("SEV: Failure locking %lu pages.\n", npages);
427 sev->pages_locked = locked;
433 unpin_user_pages(pages, npinned);
439 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
440 unsigned long npages)
442 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
444 unpin_user_pages(pages, npages);
446 sev->pages_locked -= npages;
449 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
451 uint8_t *page_virtual;
454 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
458 for (i = 0; i < npages; i++) {
459 page_virtual = kmap_atomic(pages[i]);
460 clflush_cache_range(page_virtual, PAGE_SIZE);
461 kunmap_atomic(page_virtual);
465 static unsigned long get_num_contig_pages(unsigned long idx,
466 struct page **inpages, unsigned long npages)
468 unsigned long paddr, next_paddr;
469 unsigned long i = idx + 1, pages = 1;
471 /* find the number of contiguous pages starting from idx */
472 paddr = __sme_page_pa(inpages[idx]);
474 next_paddr = __sme_page_pa(inpages[i++]);
475 if ((paddr + PAGE_SIZE) == next_paddr) {
486 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
488 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
489 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
490 struct kvm_sev_launch_update_data params;
491 struct sev_data_launch_update_data data;
492 struct page **inpages;
498 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
501 vaddr = params.uaddr;
503 vaddr_end = vaddr + size;
505 /* Lock the user memory. */
506 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
508 return PTR_ERR(inpages);
511 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
512 * place; the cache may contain the data that was written unencrypted.
514 sev_clflush_pages(inpages, npages);
517 data.handle = sev->handle;
519 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
523 * If the user buffer is not page-aligned, calculate the offset
526 offset = vaddr & (PAGE_SIZE - 1);
528 /* Calculate the number of pages that can be encrypted in one go. */
529 pages = get_num_contig_pages(i, inpages, npages);
531 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
534 data.address = __sme_page_pa(inpages[i]) + offset;
535 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
540 next_vaddr = vaddr + len;
544 /* content of memory is updated, mark pages dirty */
545 for (i = 0; i < npages; i++) {
546 set_page_dirty_lock(inpages[i]);
547 mark_page_accessed(inpages[i]);
549 /* unlock the user pages */
550 sev_unpin_memory(kvm, inpages, npages);
554 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
556 struct vmcb_save_area *save = &svm->vmcb->save;
558 /* Check some debug related fields before encrypting the VMSA */
559 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
562 /* Sync registgers */
563 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
564 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
565 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
566 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
567 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
568 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
569 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
570 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
572 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
573 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
574 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
575 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
576 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
577 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
578 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
579 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
581 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
583 /* Sync some non-GPR registers before encrypting */
584 save->xcr0 = svm->vcpu.arch.xcr0;
585 save->pkru = svm->vcpu.arch.pkru;
586 save->xss = svm->vcpu.arch.ia32_xss;
589 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
590 * the traditional VMSA that is part of the VMCB. Copy the
591 * traditional VMSA as it has been built so far (in prep
592 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
594 memcpy(svm->vmsa, save, sizeof(*save));
599 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
601 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
602 struct sev_data_launch_update_vmsa vmsa;
603 struct kvm_vcpu *vcpu;
606 if (!sev_es_guest(kvm))
611 kvm_for_each_vcpu(i, vcpu, kvm) {
612 struct vcpu_svm *svm = to_svm(vcpu);
614 /* Perform some pre-encryption checks against the VMSA */
615 ret = sev_es_sync_vmsa(svm);
620 * The LAUNCH_UPDATE_VMSA command will perform in-place
621 * encryption of the VMSA memory content (i.e it will write
622 * the same memory region with the guest's key), so invalidate
625 clflush_cache_range(svm->vmsa, PAGE_SIZE);
627 vmsa.handle = sev->handle;
628 vmsa.address = __sme_pa(svm->vmsa);
629 vmsa.len = PAGE_SIZE;
630 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
635 svm->vcpu.arch.guest_state_protected = true;
641 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
643 void __user *measure = (void __user *)(uintptr_t)argp->data;
644 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
645 struct sev_data_launch_measure data;
646 struct kvm_sev_launch_measure params;
647 void __user *p = NULL;
654 if (copy_from_user(¶ms, measure, sizeof(params)))
657 memset(&data, 0, sizeof(data));
659 /* User wants to query the blob length */
663 p = (void __user *)(uintptr_t)params.uaddr;
665 if (params.len > SEV_FW_BLOB_MAX_SIZE)
668 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
672 data.address = __psp_pa(blob);
673 data.len = params.len;
677 data.handle = sev->handle;
678 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
681 * If we query the session length, FW responded with expected data.
690 if (copy_to_user(p, blob, params.len))
695 params.len = data.len;
696 if (copy_to_user(measure, ¶ms, sizeof(params)))
703 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
705 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
706 struct sev_data_launch_finish data;
711 data.handle = sev->handle;
712 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
715 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
717 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
718 struct kvm_sev_guest_status params;
719 struct sev_data_guest_status data;
725 memset(&data, 0, sizeof(data));
727 data.handle = sev->handle;
728 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
732 params.policy = data.policy;
733 params.state = data.state;
734 params.handle = data.handle;
736 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
742 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
743 unsigned long dst, int size,
744 int *error, bool enc)
746 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
747 struct sev_data_dbg data;
750 data.handle = sev->handle;
755 return sev_issue_cmd(kvm,
756 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
760 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
761 unsigned long dst_paddr, int sz, int *err)
766 * Its safe to read more than we are asked, caller should ensure that
767 * destination has enough space.
769 offset = src_paddr & 15;
770 src_paddr = round_down(src_paddr, 16);
771 sz = round_up(sz + offset, 16);
773 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
776 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
777 void __user *dst_uaddr,
778 unsigned long dst_paddr,
781 struct page *tpage = NULL;
784 /* if inputs are not 16-byte then use intermediate buffer */
785 if (!IS_ALIGNED(dst_paddr, 16) ||
786 !IS_ALIGNED(paddr, 16) ||
787 !IS_ALIGNED(size, 16)) {
788 tpage = (void *)alloc_page(GFP_KERNEL);
792 dst_paddr = __sme_page_pa(tpage);
795 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
801 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
812 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
814 unsigned long dst_paddr,
815 void __user *dst_vaddr,
816 int size, int *error)
818 struct page *src_tpage = NULL;
819 struct page *dst_tpage = NULL;
822 /* If source buffer is not aligned then use an intermediate buffer */
823 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
824 src_tpage = alloc_page(GFP_KERNEL);
828 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
829 __free_page(src_tpage);
833 paddr = __sme_page_pa(src_tpage);
837 * If destination buffer or length is not aligned then do read-modify-write:
838 * - decrypt destination in an intermediate buffer
839 * - copy the source buffer in an intermediate buffer
840 * - use the intermediate buffer as source buffer
842 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
845 dst_tpage = alloc_page(GFP_KERNEL);
851 ret = __sev_dbg_decrypt(kvm, dst_paddr,
852 __sme_page_pa(dst_tpage), size, error);
857 * If source is kernel buffer then use memcpy() otherwise
860 dst_offset = dst_paddr & 15;
863 memcpy(page_address(dst_tpage) + dst_offset,
864 page_address(src_tpage), size);
866 if (copy_from_user(page_address(dst_tpage) + dst_offset,
873 paddr = __sme_page_pa(dst_tpage);
874 dst_paddr = round_down(dst_paddr, 16);
875 len = round_up(size, 16);
878 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
882 __free_page(src_tpage);
884 __free_page(dst_tpage);
888 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
890 unsigned long vaddr, vaddr_end, next_vaddr;
891 unsigned long dst_vaddr;
892 struct page **src_p, **dst_p;
893 struct kvm_sev_dbg debug;
901 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
904 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
906 if (!debug.dst_uaddr)
909 vaddr = debug.src_uaddr;
911 vaddr_end = vaddr + size;
912 dst_vaddr = debug.dst_uaddr;
914 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
915 int len, s_off, d_off;
917 /* lock userspace source and destination page */
918 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
920 return PTR_ERR(src_p);
922 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
924 sev_unpin_memory(kvm, src_p, n);
925 return PTR_ERR(dst_p);
929 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
930 * the pages; flush the destination too so that future accesses do not
933 sev_clflush_pages(src_p, 1);
934 sev_clflush_pages(dst_p, 1);
937 * Since user buffer may not be page aligned, calculate the
938 * offset within the page.
940 s_off = vaddr & ~PAGE_MASK;
941 d_off = dst_vaddr & ~PAGE_MASK;
942 len = min_t(size_t, (PAGE_SIZE - s_off), size);
945 ret = __sev_dbg_decrypt_user(kvm,
946 __sme_page_pa(src_p[0]) + s_off,
947 (void __user *)dst_vaddr,
948 __sme_page_pa(dst_p[0]) + d_off,
951 ret = __sev_dbg_encrypt_user(kvm,
952 __sme_page_pa(src_p[0]) + s_off,
953 (void __user *)vaddr,
954 __sme_page_pa(dst_p[0]) + d_off,
955 (void __user *)dst_vaddr,
958 sev_unpin_memory(kvm, src_p, n);
959 sev_unpin_memory(kvm, dst_p, n);
964 next_vaddr = vaddr + len;
965 dst_vaddr = dst_vaddr + len;
972 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
974 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
975 struct sev_data_launch_secret data;
976 struct kvm_sev_launch_secret params;
985 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
988 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
990 return PTR_ERR(pages);
993 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
994 * place; the cache may contain the data that was written unencrypted.
996 sev_clflush_pages(pages, n);
999 * The secret must be copied into contiguous memory region, lets verify
1000 * that userspace memory pages are contiguous before we issue command.
1002 if (get_num_contig_pages(0, pages, n) != n) {
1004 goto e_unpin_memory;
1007 memset(&data, 0, sizeof(data));
1009 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1010 data.guest_address = __sme_page_pa(pages[0]) + offset;
1011 data.guest_len = params.guest_len;
1013 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1015 ret = PTR_ERR(blob);
1016 goto e_unpin_memory;
1019 data.trans_address = __psp_pa(blob);
1020 data.trans_len = params.trans_len;
1022 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1027 data.hdr_address = __psp_pa(hdr);
1028 data.hdr_len = params.hdr_len;
1030 data.handle = sev->handle;
1031 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1038 /* content of memory is updated, mark pages dirty */
1039 for (i = 0; i < n; i++) {
1040 set_page_dirty_lock(pages[i]);
1041 mark_page_accessed(pages[i]);
1043 sev_unpin_memory(kvm, pages, n);
1047 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1049 void __user *report = (void __user *)(uintptr_t)argp->data;
1050 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1051 struct sev_data_attestation_report data;
1052 struct kvm_sev_attestation_report params;
1057 if (!sev_guest(kvm))
1060 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1063 memset(&data, 0, sizeof(data));
1065 /* User wants to query the blob length */
1069 p = (void __user *)(uintptr_t)params.uaddr;
1071 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1074 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
1078 data.address = __psp_pa(blob);
1079 data.len = params.len;
1080 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1083 data.handle = sev->handle;
1084 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1086 * If we query the session length, FW responded with expected data.
1095 if (copy_to_user(p, blob, params.len))
1100 params.len = data.len;
1101 if (copy_to_user(report, ¶ms, sizeof(params)))
1108 /* Userspace wants to query session length. */
1110 __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1111 struct kvm_sev_send_start *params)
1113 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1114 struct sev_data_send_start data;
1117 memset(&data, 0, sizeof(data));
1118 data.handle = sev->handle;
1119 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1121 params->session_len = data.session_len;
1122 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1123 sizeof(struct kvm_sev_send_start)))
1129 static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1131 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1132 struct sev_data_send_start data;
1133 struct kvm_sev_send_start params;
1134 void *amd_certs, *session_data;
1135 void *pdh_cert, *plat_certs;
1138 if (!sev_guest(kvm))
1141 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1142 sizeof(struct kvm_sev_send_start)))
1145 /* if session_len is zero, userspace wants to query the session length */
1146 if (!params.session_len)
1147 return __sev_send_start_query_session_length(kvm, argp,
1150 /* some sanity checks */
1151 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1152 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1155 /* allocate the memory to hold the session data blob */
1156 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1160 /* copy the certificate blobs from userspace */
1161 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1162 params.pdh_cert_len);
1163 if (IS_ERR(pdh_cert)) {
1164 ret = PTR_ERR(pdh_cert);
1165 goto e_free_session;
1168 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1169 params.plat_certs_len);
1170 if (IS_ERR(plat_certs)) {
1171 ret = PTR_ERR(plat_certs);
1175 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1176 params.amd_certs_len);
1177 if (IS_ERR(amd_certs)) {
1178 ret = PTR_ERR(amd_certs);
1179 goto e_free_plat_cert;
1182 /* populate the FW SEND_START field with system physical address */
1183 memset(&data, 0, sizeof(data));
1184 data.pdh_cert_address = __psp_pa(pdh_cert);
1185 data.pdh_cert_len = params.pdh_cert_len;
1186 data.plat_certs_address = __psp_pa(plat_certs);
1187 data.plat_certs_len = params.plat_certs_len;
1188 data.amd_certs_address = __psp_pa(amd_certs);
1189 data.amd_certs_len = params.amd_certs_len;
1190 data.session_address = __psp_pa(session_data);
1191 data.session_len = params.session_len;
1192 data.handle = sev->handle;
1194 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1196 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1197 session_data, params.session_len)) {
1199 goto e_free_amd_cert;
1202 params.policy = data.policy;
1203 params.session_len = data.session_len;
1204 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms,
1205 sizeof(struct kvm_sev_send_start)))
1215 kfree(session_data);
1219 /* Userspace wants to query either header or trans length. */
1221 __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1222 struct kvm_sev_send_update_data *params)
1224 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1225 struct sev_data_send_update_data data;
1228 memset(&data, 0, sizeof(data));
1229 data.handle = sev->handle;
1230 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1232 params->hdr_len = data.hdr_len;
1233 params->trans_len = data.trans_len;
1235 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1236 sizeof(struct kvm_sev_send_update_data)))
1242 static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1244 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1245 struct sev_data_send_update_data data;
1246 struct kvm_sev_send_update_data params;
1247 void *hdr, *trans_data;
1248 struct page **guest_page;
1252 if (!sev_guest(kvm))
1255 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1256 sizeof(struct kvm_sev_send_update_data)))
1259 /* userspace wants to query either header or trans length */
1260 if (!params.trans_len || !params.hdr_len)
1261 return __sev_send_update_data_query_lengths(kvm, argp, ¶ms);
1263 if (!params.trans_uaddr || !params.guest_uaddr ||
1264 !params.guest_len || !params.hdr_uaddr)
1267 /* Check if we are crossing the page boundary */
1268 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1269 if ((params.guest_len + offset > PAGE_SIZE))
1272 /* Pin guest memory */
1273 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1275 if (IS_ERR(guest_page))
1276 return PTR_ERR(guest_page);
1278 /* allocate memory for header and transport buffer */
1280 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1284 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1288 memset(&data, 0, sizeof(data));
1289 data.hdr_address = __psp_pa(hdr);
1290 data.hdr_len = params.hdr_len;
1291 data.trans_address = __psp_pa(trans_data);
1292 data.trans_len = params.trans_len;
1294 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1295 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1296 data.guest_address |= sev_me_mask;
1297 data.guest_len = params.guest_len;
1298 data.handle = sev->handle;
1300 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1303 goto e_free_trans_data;
1305 /* copy transport buffer to user space */
1306 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1307 trans_data, params.trans_len)) {
1309 goto e_free_trans_data;
1312 /* Copy packet header to userspace. */
1313 if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1322 sev_unpin_memory(kvm, guest_page, n);
1327 static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1329 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1330 struct sev_data_send_finish data;
1332 if (!sev_guest(kvm))
1335 data.handle = sev->handle;
1336 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1339 static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1341 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1342 struct sev_data_send_cancel data;
1344 if (!sev_guest(kvm))
1347 data.handle = sev->handle;
1348 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1351 static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1353 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1354 struct sev_data_receive_start start;
1355 struct kvm_sev_receive_start params;
1356 int *error = &argp->error;
1361 if (!sev_guest(kvm))
1364 /* Get parameter from the userspace */
1365 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1366 sizeof(struct kvm_sev_receive_start)))
1369 /* some sanity checks */
1370 if (!params.pdh_uaddr || !params.pdh_len ||
1371 !params.session_uaddr || !params.session_len)
1374 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1375 if (IS_ERR(pdh_data))
1376 return PTR_ERR(pdh_data);
1378 session_data = psp_copy_user_blob(params.session_uaddr,
1379 params.session_len);
1380 if (IS_ERR(session_data)) {
1381 ret = PTR_ERR(session_data);
1385 memset(&start, 0, sizeof(start));
1386 start.handle = params.handle;
1387 start.policy = params.policy;
1388 start.pdh_cert_address = __psp_pa(pdh_data);
1389 start.pdh_cert_len = params.pdh_len;
1390 start.session_address = __psp_pa(session_data);
1391 start.session_len = params.session_len;
1393 /* create memory encryption context */
1394 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1397 goto e_free_session;
1399 /* Bind ASID to this guest */
1400 ret = sev_bind_asid(kvm, start.handle, error);
1402 goto e_free_session;
1404 params.handle = start.handle;
1405 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1406 ¶ms, sizeof(struct kvm_sev_receive_start))) {
1408 sev_unbind_asid(kvm, start.handle);
1409 goto e_free_session;
1412 sev->handle = start.handle;
1413 sev->fd = argp->sev_fd;
1416 kfree(session_data);
1423 static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1425 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1426 struct kvm_sev_receive_update_data params;
1427 struct sev_data_receive_update_data data;
1428 void *hdr = NULL, *trans = NULL;
1429 struct page **guest_page;
1433 if (!sev_guest(kvm))
1436 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1437 sizeof(struct kvm_sev_receive_update_data)))
1440 if (!params.hdr_uaddr || !params.hdr_len ||
1441 !params.guest_uaddr || !params.guest_len ||
1442 !params.trans_uaddr || !params.trans_len)
1445 /* Check if we are crossing the page boundary */
1446 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1447 if ((params.guest_len + offset > PAGE_SIZE))
1450 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1452 return PTR_ERR(hdr);
1454 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1455 if (IS_ERR(trans)) {
1456 ret = PTR_ERR(trans);
1460 memset(&data, 0, sizeof(data));
1461 data.hdr_address = __psp_pa(hdr);
1462 data.hdr_len = params.hdr_len;
1463 data.trans_address = __psp_pa(trans);
1464 data.trans_len = params.trans_len;
1466 /* Pin guest memory */
1467 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1469 if (IS_ERR(guest_page)) {
1470 ret = PTR_ERR(guest_page);
1474 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1475 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1476 data.guest_address |= sev_me_mask;
1477 data.guest_len = params.guest_len;
1478 data.handle = sev->handle;
1480 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1483 sev_unpin_memory(kvm, guest_page, n);
1493 static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1495 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1496 struct sev_data_receive_finish data;
1498 if (!sev_guest(kvm))
1501 data.handle = sev->handle;
1502 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1505 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1507 struct kvm_sev_cmd sev_cmd;
1516 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1519 mutex_lock(&kvm->lock);
1521 /* enc_context_owner handles all memory enc operations */
1522 if (is_mirroring_enc_context(kvm)) {
1527 switch (sev_cmd.id) {
1528 case KVM_SEV_ES_INIT:
1529 if (!sev_es_enabled) {
1535 r = sev_guest_init(kvm, &sev_cmd);
1537 case KVM_SEV_LAUNCH_START:
1538 r = sev_launch_start(kvm, &sev_cmd);
1540 case KVM_SEV_LAUNCH_UPDATE_DATA:
1541 r = sev_launch_update_data(kvm, &sev_cmd);
1543 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1544 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1546 case KVM_SEV_LAUNCH_MEASURE:
1547 r = sev_launch_measure(kvm, &sev_cmd);
1549 case KVM_SEV_LAUNCH_FINISH:
1550 r = sev_launch_finish(kvm, &sev_cmd);
1552 case KVM_SEV_GUEST_STATUS:
1553 r = sev_guest_status(kvm, &sev_cmd);
1555 case KVM_SEV_DBG_DECRYPT:
1556 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1558 case KVM_SEV_DBG_ENCRYPT:
1559 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1561 case KVM_SEV_LAUNCH_SECRET:
1562 r = sev_launch_secret(kvm, &sev_cmd);
1564 case KVM_SEV_GET_ATTESTATION_REPORT:
1565 r = sev_get_attestation_report(kvm, &sev_cmd);
1567 case KVM_SEV_SEND_START:
1568 r = sev_send_start(kvm, &sev_cmd);
1570 case KVM_SEV_SEND_UPDATE_DATA:
1571 r = sev_send_update_data(kvm, &sev_cmd);
1573 case KVM_SEV_SEND_FINISH:
1574 r = sev_send_finish(kvm, &sev_cmd);
1576 case KVM_SEV_SEND_CANCEL:
1577 r = sev_send_cancel(kvm, &sev_cmd);
1579 case KVM_SEV_RECEIVE_START:
1580 r = sev_receive_start(kvm, &sev_cmd);
1582 case KVM_SEV_RECEIVE_UPDATE_DATA:
1583 r = sev_receive_update_data(kvm, &sev_cmd);
1585 case KVM_SEV_RECEIVE_FINISH:
1586 r = sev_receive_finish(kvm, &sev_cmd);
1593 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1597 mutex_unlock(&kvm->lock);
1601 int svm_register_enc_region(struct kvm *kvm,
1602 struct kvm_enc_region *range)
1604 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1605 struct enc_region *region;
1608 if (!sev_guest(kvm))
1611 /* If kvm is mirroring encryption context it isn't responsible for it */
1612 if (is_mirroring_enc_context(kvm))
1615 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1618 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1622 mutex_lock(&kvm->lock);
1623 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1624 if (IS_ERR(region->pages)) {
1625 ret = PTR_ERR(region->pages);
1626 mutex_unlock(&kvm->lock);
1630 region->uaddr = range->addr;
1631 region->size = range->size;
1633 list_add_tail(®ion->list, &sev->regions_list);
1634 mutex_unlock(&kvm->lock);
1637 * The guest may change the memory encryption attribute from C=0 -> C=1
1638 * or vice versa for this memory range. Lets make sure caches are
1639 * flushed to ensure that guest data gets written into memory with
1642 sev_clflush_pages(region->pages, region->npages);
1651 static struct enc_region *
1652 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1654 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1655 struct list_head *head = &sev->regions_list;
1656 struct enc_region *i;
1658 list_for_each_entry(i, head, list) {
1659 if (i->uaddr == range->addr &&
1660 i->size == range->size)
1667 static void __unregister_enc_region_locked(struct kvm *kvm,
1668 struct enc_region *region)
1670 sev_unpin_memory(kvm, region->pages, region->npages);
1671 list_del(®ion->list);
1675 int svm_unregister_enc_region(struct kvm *kvm,
1676 struct kvm_enc_region *range)
1678 struct enc_region *region;
1681 /* If kvm is mirroring encryption context it isn't responsible for it */
1682 if (is_mirroring_enc_context(kvm))
1685 mutex_lock(&kvm->lock);
1687 if (!sev_guest(kvm)) {
1692 region = find_enc_region(kvm, range);
1699 * Ensure that all guest tagged cache entries are flushed before
1700 * releasing the pages back to the system for use. CLFLUSH will
1701 * not do this, so issue a WBINVD.
1703 wbinvd_on_all_cpus();
1705 __unregister_enc_region_locked(kvm, region);
1707 mutex_unlock(&kvm->lock);
1711 mutex_unlock(&kvm->lock);
1715 int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1717 struct file *source_kvm_file;
1718 struct kvm *source_kvm;
1719 struct kvm_sev_info *mirror_sev;
1723 source_kvm_file = fget(source_fd);
1724 if (!file_is_kvm(source_kvm_file)) {
1729 source_kvm = source_kvm_file->private_data;
1730 mutex_lock(&source_kvm->lock);
1732 if (!sev_guest(source_kvm)) {
1734 goto e_source_unlock;
1737 /* Mirrors of mirrors should work, but let's not get silly */
1738 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
1740 goto e_source_unlock;
1743 asid = to_kvm_svm(source_kvm)->sev_info.asid;
1746 * The mirror kvm holds an enc_context_owner ref so its asid can't
1747 * disappear until we're done with it
1749 kvm_get_kvm(source_kvm);
1751 fput(source_kvm_file);
1752 mutex_unlock(&source_kvm->lock);
1753 mutex_lock(&kvm->lock);
1755 if (sev_guest(kvm)) {
1757 goto e_mirror_unlock;
1760 /* Set enc_context_owner and copy its encryption context over */
1761 mirror_sev = &to_kvm_svm(kvm)->sev_info;
1762 mirror_sev->enc_context_owner = source_kvm;
1763 mirror_sev->asid = asid;
1764 mirror_sev->active = true;
1766 mutex_unlock(&kvm->lock);
1770 mutex_unlock(&kvm->lock);
1771 kvm_put_kvm(source_kvm);
1774 mutex_unlock(&source_kvm->lock);
1776 if (source_kvm_file)
1777 fput(source_kvm_file);
1781 void sev_vm_destroy(struct kvm *kvm)
1783 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1784 struct list_head *head = &sev->regions_list;
1785 struct list_head *pos, *q;
1787 if (!sev_guest(kvm))
1790 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
1791 if (is_mirroring_enc_context(kvm)) {
1792 kvm_put_kvm(sev->enc_context_owner);
1796 mutex_lock(&kvm->lock);
1799 * Ensure that all guest tagged cache entries are flushed before
1800 * releasing the pages back to the system for use. CLFLUSH will
1801 * not do this, so issue a WBINVD.
1803 wbinvd_on_all_cpus();
1806 * if userspace was terminated before unregistering the memory regions
1807 * then lets unpin all the registered memory.
1809 if (!list_empty(head)) {
1810 list_for_each_safe(pos, q, head) {
1811 __unregister_enc_region_locked(kvm,
1812 list_entry(pos, struct enc_region, list));
1817 mutex_unlock(&kvm->lock);
1819 sev_unbind_asid(kvm, sev->handle);
1823 void __init sev_set_cpu_caps(void)
1826 kvm_cpu_cap_clear(X86_FEATURE_SEV);
1827 if (!sev_es_enabled)
1828 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
1831 void __init sev_hardware_setup(void)
1833 #ifdef CONFIG_KVM_AMD_SEV
1834 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
1835 bool sev_es_supported = false;
1836 bool sev_supported = false;
1838 if (!sev_enabled || !npt_enabled)
1841 /* Does the CPU support SEV? */
1842 if (!boot_cpu_has(X86_FEATURE_SEV))
1845 /* Retrieve SEV CPUID information */
1846 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1848 /* Set encryption bit location for SEV-ES guests */
1849 sev_enc_bit = ebx & 0x3f;
1851 /* Maximum number of encrypted guests supported simultaneously */
1856 /* Minimum ASID value that should be used for SEV guest */
1858 sev_me_mask = 1UL << (ebx & 0x3f);
1860 /* Initialize SEV ASID bitmaps */
1861 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1862 if (!sev_asid_bitmap)
1865 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1866 if (!sev_reclaim_asid_bitmap) {
1867 bitmap_free(sev_asid_bitmap);
1868 sev_asid_bitmap = NULL;
1872 sev_asid_count = max_sev_asid - min_sev_asid + 1;
1873 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
1876 pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
1877 sev_supported = true;
1879 /* SEV-ES support requested? */
1880 if (!sev_es_enabled)
1883 /* Does the CPU support SEV-ES? */
1884 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1887 /* Has the system been allocated ASIDs for SEV-ES? */
1888 if (min_sev_asid == 1)
1891 sev_es_asid_count = min_sev_asid - 1;
1892 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
1895 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
1896 sev_es_supported = true;
1899 sev_enabled = sev_supported;
1900 sev_es_enabled = sev_es_supported;
1904 void sev_hardware_teardown(void)
1909 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
1910 sev_flush_asids(0, max_sev_asid);
1912 bitmap_free(sev_asid_bitmap);
1913 bitmap_free(sev_reclaim_asid_bitmap);
1915 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
1916 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
1919 int sev_cpu_init(struct svm_cpu_data *sd)
1924 sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL);
1932 * Pages used by hardware to hold guest encrypted state must be flushed before
1933 * returning them to the system.
1935 static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1939 * If hardware enforced cache coherency for encrypted mappings of the
1940 * same physical page is supported, nothing to do.
1942 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1946 * If the VM Page Flush MSR is supported, use it to flush the page
1947 * (using the page virtual address and the guest ASID).
1949 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1950 struct kvm_sev_info *sev;
1951 unsigned long va_start;
1954 /* Align start and stop to page boundaries. */
1955 va_start = (unsigned long)va;
1956 start = (u64)va_start & PAGE_MASK;
1957 stop = PAGE_ALIGN((u64)va_start + len);
1960 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1962 while (start < stop) {
1963 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
1972 WARN(1, "Address overflow, using WBINVD\n");
1976 * Hardware should always have one of the above features,
1977 * but if not, use WBINVD and issue a warning.
1979 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
1980 wbinvd_on_all_cpus();
1983 void sev_free_vcpu(struct kvm_vcpu *vcpu)
1985 struct vcpu_svm *svm;
1987 if (!sev_es_guest(vcpu->kvm))
1992 if (vcpu->arch.guest_state_protected)
1993 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
1994 __free_page(virt_to_page(svm->vmsa));
1996 if (svm->ghcb_sa_free)
1997 kfree(svm->ghcb_sa);
2000 static void dump_ghcb(struct vcpu_svm *svm)
2002 struct ghcb *ghcb = svm->ghcb;
2005 /* Re-use the dump_invalid_vmcb module parameter */
2006 if (!dump_invalid_vmcb) {
2007 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2011 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2013 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2014 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2015 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2016 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2017 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2018 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2019 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2020 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2021 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2022 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2025 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2027 struct kvm_vcpu *vcpu = &svm->vcpu;
2028 struct ghcb *ghcb = svm->ghcb;
2031 * The GHCB protocol so far allows for the following data
2033 * GPRs RAX, RBX, RCX, RDX
2035 * Copy their values, even if they may not have been written during the
2036 * VM-Exit. It's the guest's responsibility to not consume random data.
2038 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2039 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2040 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2041 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2044 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2046 struct vmcb_control_area *control = &svm->vmcb->control;
2047 struct kvm_vcpu *vcpu = &svm->vcpu;
2048 struct ghcb *ghcb = svm->ghcb;
2052 * The GHCB protocol so far allows for the following data
2054 * GPRs RAX, RBX, RCX, RDX
2058 * VMMCALL allows the guest to provide extra registers. KVM also
2059 * expects RSI for hypercalls, so include that, too.
2061 * Copy their values to the appropriate location if supplied.
2063 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2065 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
2066 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
2067 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
2068 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
2069 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
2071 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
2073 if (ghcb_xcr0_is_valid(ghcb)) {
2074 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2075 kvm_update_cpuid_runtime(vcpu);
2078 /* Copy the GHCB exit information into the VMCB fields */
2079 exit_code = ghcb_get_sw_exit_code(ghcb);
2080 control->exit_code = lower_32_bits(exit_code);
2081 control->exit_code_hi = upper_32_bits(exit_code);
2082 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2083 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2085 /* Clear the valid entries fields */
2086 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2089 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2091 struct kvm_vcpu *vcpu;
2097 /* Only GHCB Usage code 0 is supported */
2098 if (ghcb->ghcb_usage)
2102 * Retrieve the exit code now even though is may not be marked valid
2103 * as it could help with debugging.
2105 exit_code = ghcb_get_sw_exit_code(ghcb);
2107 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2108 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2109 !ghcb_sw_exit_info_2_is_valid(ghcb))
2112 switch (ghcb_get_sw_exit_code(ghcb)) {
2113 case SVM_EXIT_READ_DR7:
2115 case SVM_EXIT_WRITE_DR7:
2116 if (!ghcb_rax_is_valid(ghcb))
2119 case SVM_EXIT_RDTSC:
2121 case SVM_EXIT_RDPMC:
2122 if (!ghcb_rcx_is_valid(ghcb))
2125 case SVM_EXIT_CPUID:
2126 if (!ghcb_rax_is_valid(ghcb) ||
2127 !ghcb_rcx_is_valid(ghcb))
2129 if (ghcb_get_rax(ghcb) == 0xd)
2130 if (!ghcb_xcr0_is_valid(ghcb))
2136 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2137 if (!ghcb_sw_scratch_is_valid(ghcb))
2140 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2141 if (!ghcb_rax_is_valid(ghcb))
2146 if (!ghcb_rcx_is_valid(ghcb))
2148 if (ghcb_get_sw_exit_info_1(ghcb)) {
2149 if (!ghcb_rax_is_valid(ghcb) ||
2150 !ghcb_rdx_is_valid(ghcb))
2154 case SVM_EXIT_VMMCALL:
2155 if (!ghcb_rax_is_valid(ghcb) ||
2156 !ghcb_cpl_is_valid(ghcb))
2159 case SVM_EXIT_RDTSCP:
2161 case SVM_EXIT_WBINVD:
2163 case SVM_EXIT_MONITOR:
2164 if (!ghcb_rax_is_valid(ghcb) ||
2165 !ghcb_rcx_is_valid(ghcb) ||
2166 !ghcb_rdx_is_valid(ghcb))
2169 case SVM_EXIT_MWAIT:
2170 if (!ghcb_rax_is_valid(ghcb) ||
2171 !ghcb_rcx_is_valid(ghcb))
2174 case SVM_VMGEXIT_MMIO_READ:
2175 case SVM_VMGEXIT_MMIO_WRITE:
2176 if (!ghcb_sw_scratch_is_valid(ghcb))
2179 case SVM_VMGEXIT_NMI_COMPLETE:
2180 case SVM_VMGEXIT_AP_HLT_LOOP:
2181 case SVM_VMGEXIT_AP_JUMP_TABLE:
2182 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2193 if (ghcb->ghcb_usage) {
2194 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2197 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
2202 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2203 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2204 vcpu->run->internal.ndata = 2;
2205 vcpu->run->internal.data[0] = exit_code;
2206 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2211 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2216 if (svm->ghcb_sa_free) {
2218 * The scratch area lives outside the GHCB, so there is a
2219 * buffer that, depending on the operation performed, may
2220 * need to be synced, then freed.
2222 if (svm->ghcb_sa_sync) {
2223 kvm_write_guest(svm->vcpu.kvm,
2224 ghcb_get_sw_scratch(svm->ghcb),
2225 svm->ghcb_sa, svm->ghcb_sa_len);
2226 svm->ghcb_sa_sync = false;
2229 kfree(svm->ghcb_sa);
2230 svm->ghcb_sa = NULL;
2231 svm->ghcb_sa_free = false;
2234 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
2236 sev_es_sync_to_ghcb(svm);
2238 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
2242 void pre_sev_run(struct vcpu_svm *svm, int cpu)
2244 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2245 int asid = sev_get_asid(svm->vcpu.kvm);
2247 /* Assign the asid allocated with this SEV guest */
2253 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2254 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2256 if (sd->sev_vmcbs[asid] == svm->vmcb &&
2257 svm->vcpu.arch.last_vmentry_cpu == cpu)
2260 sd->sev_vmcbs[asid] = svm->vmcb;
2261 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2262 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2265 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2266 static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2268 struct vmcb_control_area *control = &svm->vmcb->control;
2269 struct ghcb *ghcb = svm->ghcb;
2270 u64 ghcb_scratch_beg, ghcb_scratch_end;
2271 u64 scratch_gpa_beg, scratch_gpa_end;
2274 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2275 if (!scratch_gpa_beg) {
2276 pr_err("vmgexit: scratch gpa not provided\n");
2280 scratch_gpa_end = scratch_gpa_beg + len;
2281 if (scratch_gpa_end < scratch_gpa_beg) {
2282 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2283 len, scratch_gpa_beg);
2287 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2288 /* Scratch area begins within GHCB */
2289 ghcb_scratch_beg = control->ghcb_gpa +
2290 offsetof(struct ghcb, shared_buffer);
2291 ghcb_scratch_end = control->ghcb_gpa +
2292 offsetof(struct ghcb, reserved_1);
2295 * If the scratch area begins within the GHCB, it must be
2296 * completely contained in the GHCB shared buffer area.
2298 if (scratch_gpa_beg < ghcb_scratch_beg ||
2299 scratch_gpa_end > ghcb_scratch_end) {
2300 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2301 scratch_gpa_beg, scratch_gpa_end);
2305 scratch_va = (void *)svm->ghcb;
2306 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2309 * The guest memory must be read into a kernel buffer, so
2312 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2313 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2314 len, GHCB_SCRATCH_AREA_LIMIT);
2317 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
2321 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2322 /* Unable to copy scratch area from guest */
2323 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2330 * The scratch area is outside the GHCB. The operation will
2331 * dictate whether the buffer needs to be synced before running
2332 * the vCPU next time (i.e. a read was requested so the data
2333 * must be written back to the guest memory).
2335 svm->ghcb_sa_sync = sync;
2336 svm->ghcb_sa_free = true;
2339 svm->ghcb_sa = scratch_va;
2340 svm->ghcb_sa_len = len;
2345 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2348 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2349 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2352 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2354 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2357 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2359 svm->vmcb->control.ghcb_gpa = value;
2362 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2364 struct vmcb_control_area *control = &svm->vmcb->control;
2365 struct kvm_vcpu *vcpu = &svm->vcpu;
2369 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2371 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2374 switch (ghcb_info) {
2375 case GHCB_MSR_SEV_INFO_REQ:
2376 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2380 case GHCB_MSR_CPUID_REQ: {
2381 u64 cpuid_fn, cpuid_reg, cpuid_value;
2383 cpuid_fn = get_ghcb_msr_bits(svm,
2384 GHCB_MSR_CPUID_FUNC_MASK,
2385 GHCB_MSR_CPUID_FUNC_POS);
2387 /* Initialize the registers needed by the CPUID intercept */
2388 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2389 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2391 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
2397 cpuid_reg = get_ghcb_msr_bits(svm,
2398 GHCB_MSR_CPUID_REG_MASK,
2399 GHCB_MSR_CPUID_REG_POS);
2401 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2402 else if (cpuid_reg == 1)
2403 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2404 else if (cpuid_reg == 2)
2405 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2407 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2409 set_ghcb_msr_bits(svm, cpuid_value,
2410 GHCB_MSR_CPUID_VALUE_MASK,
2411 GHCB_MSR_CPUID_VALUE_POS);
2413 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2418 case GHCB_MSR_TERM_REQ: {
2419 u64 reason_set, reason_code;
2421 reason_set = get_ghcb_msr_bits(svm,
2422 GHCB_MSR_TERM_REASON_SET_MASK,
2423 GHCB_MSR_TERM_REASON_SET_POS);
2424 reason_code = get_ghcb_msr_bits(svm,
2425 GHCB_MSR_TERM_REASON_MASK,
2426 GHCB_MSR_TERM_REASON_POS);
2427 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2428 reason_set, reason_code);
2435 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2436 control->ghcb_gpa, ret);
2441 int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2443 struct vcpu_svm *svm = to_svm(vcpu);
2444 struct vmcb_control_area *control = &svm->vmcb->control;
2445 u64 ghcb_gpa, exit_code;
2449 /* Validate the GHCB */
2450 ghcb_gpa = control->ghcb_gpa;
2451 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2452 return sev_handle_vmgexit_msr_protocol(svm);
2455 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2459 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
2460 /* Unable to map GHCB from guest */
2461 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2466 svm->ghcb = svm->ghcb_map.hva;
2467 ghcb = svm->ghcb_map.hva;
2469 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
2471 exit_code = ghcb_get_sw_exit_code(ghcb);
2473 ret = sev_es_validate_vmgexit(svm);
2477 sev_es_sync_from_ghcb(svm);
2478 ghcb_set_sw_exit_info_1(ghcb, 0);
2479 ghcb_set_sw_exit_info_2(ghcb, 0);
2482 switch (exit_code) {
2483 case SVM_VMGEXIT_MMIO_READ:
2484 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
2487 ret = kvm_sev_es_mmio_read(vcpu,
2488 control->exit_info_1,
2489 control->exit_info_2,
2492 case SVM_VMGEXIT_MMIO_WRITE:
2493 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2496 ret = kvm_sev_es_mmio_write(vcpu,
2497 control->exit_info_1,
2498 control->exit_info_2,
2501 case SVM_VMGEXIT_NMI_COMPLETE:
2502 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
2504 case SVM_VMGEXIT_AP_HLT_LOOP:
2505 ret = kvm_emulate_ap_reset_hold(vcpu);
2507 case SVM_VMGEXIT_AP_JUMP_TABLE: {
2508 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
2510 switch (control->exit_info_1) {
2512 /* Set AP jump table address */
2513 sev->ap_jump_table = control->exit_info_2;
2516 /* Get AP jump table address */
2517 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2520 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2521 control->exit_info_1);
2522 ghcb_set_sw_exit_info_1(ghcb, 1);
2523 ghcb_set_sw_exit_info_2(ghcb,
2525 SVM_EVTINJ_TYPE_EXEPT |
2532 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2534 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2535 control->exit_info_1, control->exit_info_2);
2538 ret = svm_invoke_exit_handler(vcpu, exit_code);
2544 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2546 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
2549 return kvm_sev_es_string_io(&svm->vcpu, size, port,
2550 svm->ghcb_sa, svm->ghcb_sa_len, in);
2553 void sev_es_init_vmcb(struct vcpu_svm *svm)
2555 struct kvm_vcpu *vcpu = &svm->vcpu;
2557 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2558 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2561 * An SEV-ES guest requires a VMSA area that is a separate from the
2562 * VMCB page. Do not include the encryption mask on the VMSA physical
2563 * address since hardware will access it using the guest key.
2565 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2567 /* Can't intercept CR register access, HV can't modify CR registers */
2568 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2569 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2570 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2571 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2572 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2573 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2575 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2577 /* Track EFER/CR register changes */
2578 svm_set_intercept(svm, TRAP_EFER_WRITE);
2579 svm_set_intercept(svm, TRAP_CR0_WRITE);
2580 svm_set_intercept(svm, TRAP_CR4_WRITE);
2581 svm_set_intercept(svm, TRAP_CR8_WRITE);
2583 /* No support for enable_vmware_backdoor */
2584 clr_exception_intercept(svm, GP_VECTOR);
2586 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2587 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2589 /* Clear intercepts on selected MSRs */
2590 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2591 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2592 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2593 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2594 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2595 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2598 void sev_es_create_vcpu(struct vcpu_svm *svm)
2601 * Set the GHCB MSR value as per the GHCB specification when creating
2602 * a vCPU for an SEV-ES guest.
2604 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2609 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
2611 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2612 struct vmcb_save_area *hostsa;
2615 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2616 * of which one step is to perform a VMLOAD. Since hardware does not
2617 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2619 vmsave(__sme_page_pa(sd->save_area));
2621 /* XCR0 is restored on VMEXIT, save the current host value */
2622 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2623 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2625 /* PKRU is restored on VMEXIT, save the current host value */
2626 hostsa->pkru = read_pkru();
2628 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2629 hostsa->xss = host_xss;
2632 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2634 struct vcpu_svm *svm = to_svm(vcpu);
2636 /* First SIPI: Use the values as initially set by the VMM */
2637 if (!svm->received_first_sipi) {
2638 svm->received_first_sipi = true;
2643 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2644 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2650 ghcb_set_sw_exit_info_2(svm->ghcb, 1);