1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Memory Encryption Support
5 * Copyright (C) 2019 SUSE
7 * Author: Joerg Roedel <jroedel@suse.de>
10 #define pr_fmt(fmt) "SEV: " fmt
12 #include <linux/sched/debug.h> /* For show_regs() */
13 #include <linux/percpu-defs.h>
14 #include <linux/cc_platform.h>
15 #include <linux/printk.h>
16 #include <linux/mm_types.h>
17 #include <linux/set_memory.h>
18 #include <linux/memblock.h>
19 #include <linux/kernel.h>
21 #include <linux/cpumask.h>
22 #include <linux/efi.h>
23 #include <linux/platform_device.h>
25 #include <linux/psp-sev.h>
26 #include <uapi/linux/sev-guest.h>
28 #include <asm/cpu_entry_area.h>
29 #include <asm/stacktrace.h>
31 #include <asm/insn-eval.h>
32 #include <asm/fpu/xcr.h>
33 #include <asm/processor.h>
34 #include <asm/realmode.h>
35 #include <asm/setup.h>
36 #include <asm/traps.h>
41 #include <asm/cpuid.h>
42 #include <asm/cmdline.h>
44 #define DR7_RESET_VALUE 0x400
46 /* AP INIT values as documented in the APM2 section "Processor Initialization State" */
47 #define AP_INIT_CS_LIMIT 0xffff
48 #define AP_INIT_DS_LIMIT 0xffff
49 #define AP_INIT_LDTR_LIMIT 0xffff
50 #define AP_INIT_GDTR_LIMIT 0xffff
51 #define AP_INIT_IDTR_LIMIT 0xffff
52 #define AP_INIT_TR_LIMIT 0xffff
53 #define AP_INIT_RFLAGS_DEFAULT 0x2
54 #define AP_INIT_DR6_DEFAULT 0xffff0ff0
55 #define AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL
56 #define AP_INIT_XCR0_DEFAULT 0x1
57 #define AP_INIT_X87_FTW_DEFAULT 0x5555
58 #define AP_INIT_X87_FCW_DEFAULT 0x0040
59 #define AP_INIT_CR0_DEFAULT 0x60000010
60 #define AP_INIT_MXCSR_DEFAULT 0x1f80
62 /* For early boot hypervisor communication in SEV-ES enabled guests */
63 static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
66 * Needs to be in the .data section because we need it NULL before bss is
69 static struct ghcb *boot_ghcb __section(".data");
71 /* Bitmap of SEV features supported by the hypervisor */
72 static u64 sev_hv_features __ro_after_init;
74 /* #VC handler runtime per-CPU data */
75 struct sev_es_runtime_data {
76 struct ghcb ghcb_page;
79 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
80 * It is needed when an NMI happens while the #VC handler uses the real
81 * GHCB, and the NMI handler itself is causing another #VC exception. In
82 * that case the GHCB content of the first handler needs to be backed up
85 struct ghcb backup_ghcb;
88 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
89 * There is no need for it to be atomic, because nothing is written to
90 * the GHCB between the read and the write of ghcb_active. So it is safe
91 * to use it when a nested #VC exception happens before the write.
93 * This is necessary for example in the #VC->NMI->#VC case when the NMI
94 * happens while the first #VC handler uses the GHCB. When the NMI code
95 * raises a second #VC handler it might overwrite the contents of the
96 * GHCB written by the first handler. To avoid this the content of the
97 * GHCB is saved and restored when the GHCB is detected to be in use
101 bool backup_ghcb_active;
104 * Cached DR7 value - write it on DR7 writes and return it on reads.
105 * That value will never make it to the real hardware DR7 as debugging
106 * is currently unsupported in SEV-ES guests.
115 static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
116 static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
122 * A flag used by __set_pages_state() that indicates when the
123 * per-CPU GHCB has been created and registered and thus can be
124 * used by the BSP instead of the early boot GHCB.
126 * For APs, the per-CPU GHCB is created before they are started
127 * and registered upon startup, so this flag can be used globally
128 * for the BSP and APs.
130 ghcbs_initialized : 1,
135 static struct sev_config sev_cfg __read_mostly;
137 static __always_inline bool on_vc_stack(struct pt_regs *regs)
139 unsigned long sp = regs->sp;
141 /* User-mode RSP is not trusted */
145 /* SYSCALL gap still has user-mode RSP */
146 if (ip_within_syscall_gap(regs))
149 return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
153 * This function handles the case when an NMI is raised in the #VC
154 * exception handler entry code, before the #VC handler has switched off
155 * its IST stack. In this case, the IST entry for #VC must be adjusted,
156 * so that any nested #VC exception will not overwrite the stack
157 * contents of the interrupted #VC handler.
159 * The IST entry is adjusted unconditionally so that it can be also be
160 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
161 * nested sev_es_ist_exit() call may adjust back the IST entry too
164 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
165 * on the NMI IST stack, as they are only called from NMI handling code
168 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
170 unsigned long old_ist, new_ist;
172 /* Read old IST entry */
173 new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
176 * If NMI happened while on the #VC IST stack, set the new IST
177 * value below regs->sp, so that the interrupted stack frame is
178 * not overwritten by subsequent #VC exceptions.
180 if (on_vc_stack(regs))
184 * Reserve additional 8 bytes and store old IST value so this
185 * adjustment can be unrolled in __sev_es_ist_exit().
187 new_ist -= sizeof(old_ist);
188 *(unsigned long *)new_ist = old_ist;
190 /* Set new IST entry */
191 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
194 void noinstr __sev_es_ist_exit(void)
199 ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
201 if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
204 /* Read back old IST entry and write it to the TSS */
205 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
209 * Nothing shall interrupt this code path while holding the per-CPU
210 * GHCB. The backup GHCB is only for NMIs interrupting this path.
212 * Callers must disable local interrupts around it.
214 static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
216 struct sev_es_runtime_data *data;
219 WARN_ON(!irqs_disabled());
221 data = this_cpu_read(runtime_data);
222 ghcb = &data->ghcb_page;
224 if (unlikely(data->ghcb_active)) {
225 /* GHCB is already in use - save its contents */
227 if (unlikely(data->backup_ghcb_active)) {
229 * Backup-GHCB is also already in use. There is no way
230 * to continue here so just kill the machine. To make
231 * panic() work, mark GHCBs inactive so that messages
232 * can be printed out.
234 data->ghcb_active = false;
235 data->backup_ghcb_active = false;
237 instrumentation_begin();
238 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
239 instrumentation_end();
242 /* Mark backup_ghcb active before writing to it */
243 data->backup_ghcb_active = true;
245 state->ghcb = &data->backup_ghcb;
247 /* Backup GHCB content */
248 *state->ghcb = *ghcb;
251 data->ghcb_active = true;
257 static inline u64 sev_es_rd_ghcb_msr(void)
259 return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
262 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
267 high = (u32)(val >> 32);
269 native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
272 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
273 unsigned char *buffer)
275 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
278 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
280 char buffer[MAX_INSN_SIZE];
283 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
284 if (insn_bytes == 0) {
285 /* Nothing could be copied */
286 ctxt->fi.vector = X86_TRAP_PF;
287 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
288 ctxt->fi.cr2 = ctxt->regs->ip;
290 } else if (insn_bytes == -EINVAL) {
291 /* Effective RIP could not be calculated */
292 ctxt->fi.vector = X86_TRAP_GP;
293 ctxt->fi.error_code = 0;
298 if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
299 return ES_DECODE_FAILED;
301 if (ctxt->insn.immediate.got)
304 return ES_DECODE_FAILED;
307 static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
309 char buffer[MAX_INSN_SIZE];
312 res = vc_fetch_insn_kernel(ctxt, buffer);
314 ctxt->fi.vector = X86_TRAP_PF;
315 ctxt->fi.error_code = X86_PF_INSTR;
316 ctxt->fi.cr2 = ctxt->regs->ip;
320 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
322 return ES_DECODE_FAILED;
327 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
329 if (user_mode(ctxt->regs))
330 return __vc_decode_user_insn(ctxt);
332 return __vc_decode_kern_insn(ctxt);
335 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
336 char *dst, char *buf, size_t size)
338 unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
341 * This function uses __put_user() independent of whether kernel or user
342 * memory is accessed. This works fine because __put_user() does no
343 * sanity checks of the pointer being accessed. All that it does is
344 * to report when the access failed.
346 * Also, this function runs in atomic context, so __put_user() is not
347 * allowed to sleep. The page-fault handler detects that it is running
348 * in atomic context and will not try to take mmap_sem and handle the
349 * fault, so additional pagefault_enable()/disable() calls are not
352 * The access can't be done via copy_to_user() here because
353 * vc_write_mem() must not use string instructions to access unsafe
354 * memory. The reason is that MOVS is emulated by the #VC handler by
355 * splitting the move up into a read and a write and taking a nested #VC
356 * exception on whatever of them is the MMIO access. Using string
357 * instructions here would cause infinite nesting.
362 u8 __user *target = (u8 __user *)dst;
365 if (__put_user(d1, target))
371 u16 __user *target = (u16 __user *)dst;
374 if (__put_user(d2, target))
380 u32 __user *target = (u32 __user *)dst;
383 if (__put_user(d4, target))
389 u64 __user *target = (u64 __user *)dst;
392 if (__put_user(d8, target))
397 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
398 return ES_UNSUPPORTED;
404 if (user_mode(ctxt->regs))
405 error_code |= X86_PF_USER;
407 ctxt->fi.vector = X86_TRAP_PF;
408 ctxt->fi.error_code = error_code;
409 ctxt->fi.cr2 = (unsigned long)dst;
414 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
415 char *src, char *buf, size_t size)
417 unsigned long error_code = X86_PF_PROT;
420 * This function uses __get_user() independent of whether kernel or user
421 * memory is accessed. This works fine because __get_user() does no
422 * sanity checks of the pointer being accessed. All that it does is
423 * to report when the access failed.
425 * Also, this function runs in atomic context, so __get_user() is not
426 * allowed to sleep. The page-fault handler detects that it is running
427 * in atomic context and will not try to take mmap_sem and handle the
428 * fault, so additional pagefault_enable()/disable() calls are not
431 * The access can't be done via copy_from_user() here because
432 * vc_read_mem() must not use string instructions to access unsafe
433 * memory. The reason is that MOVS is emulated by the #VC handler by
434 * splitting the move up into a read and a write and taking a nested #VC
435 * exception on whatever of them is the MMIO access. Using string
436 * instructions here would cause infinite nesting.
441 u8 __user *s = (u8 __user *)src;
443 if (__get_user(d1, s))
450 u16 __user *s = (u16 __user *)src;
452 if (__get_user(d2, s))
459 u32 __user *s = (u32 __user *)src;
461 if (__get_user(d4, s))
468 u64 __user *s = (u64 __user *)src;
469 if (__get_user(d8, s))
475 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
476 return ES_UNSUPPORTED;
482 if (user_mode(ctxt->regs))
483 error_code |= X86_PF_USER;
485 ctxt->fi.vector = X86_TRAP_PF;
486 ctxt->fi.error_code = error_code;
487 ctxt->fi.cr2 = (unsigned long)src;
492 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
493 unsigned long vaddr, phys_addr_t *paddr)
495 unsigned long va = (unsigned long)vaddr;
501 pgd = __va(read_cr3_pa());
502 pgd = &pgd[pgd_index(va)];
503 pte = lookup_address_in_pgd(pgd, va, &level);
505 ctxt->fi.vector = X86_TRAP_PF;
506 ctxt->fi.cr2 = vaddr;
507 ctxt->fi.error_code = 0;
509 if (user_mode(ctxt->regs))
510 ctxt->fi.error_code |= X86_PF_USER;
515 if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
516 /* Emulated MMIO to/from encrypted memory not supported */
517 return ES_UNSUPPORTED;
519 pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
520 pa |= va & ~page_level_mask(level);
527 /* Include code shared with pre-decompression boot stage */
528 #include "sev-shared.c"
530 static noinstr void __sev_put_ghcb(struct ghcb_state *state)
532 struct sev_es_runtime_data *data;
535 WARN_ON(!irqs_disabled());
537 data = this_cpu_read(runtime_data);
538 ghcb = &data->ghcb_page;
541 /* Restore GHCB from Backup */
542 *ghcb = *state->ghcb;
543 data->backup_ghcb_active = false;
547 * Invalidate the GHCB so a VMGEXIT instruction issued
548 * from userspace won't appear to be valid.
550 vc_ghcb_invalidate(ghcb);
551 data->ghcb_active = false;
555 void noinstr __sev_es_nmi_complete(void)
557 struct ghcb_state state;
560 ghcb = __sev_get_ghcb(&state);
562 vc_ghcb_invalidate(ghcb);
563 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
564 ghcb_set_sw_exit_info_1(ghcb, 0);
565 ghcb_set_sw_exit_info_2(ghcb, 0);
567 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
570 __sev_put_ghcb(&state);
573 static u64 __init get_secrets_page(void)
575 u64 pa_data = boot_params.cc_blob_address;
576 struct cc_blob_sev_info info;
580 * The CC blob contains the address of the secrets page, check if the
586 map = early_memremap(pa_data, sizeof(info));
588 pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
591 memcpy(&info, map, sizeof(info));
592 early_memunmap(map, sizeof(info));
594 /* smoke-test the secrets page passed */
595 if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
598 return info.secrets_phys;
601 static u64 __init get_snp_jump_table_addr(void)
603 struct snp_secrets_page_layout *layout;
607 pa = get_secrets_page();
611 mem = ioremap_encrypted(pa, PAGE_SIZE);
613 pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
617 layout = (__force struct snp_secrets_page_layout *)mem;
619 addr = layout->os_area.ap_jump_table_pa;
625 static u64 __init get_jump_table_addr(void)
627 struct ghcb_state state;
632 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
633 return get_snp_jump_table_addr();
635 local_irq_save(flags);
637 ghcb = __sev_get_ghcb(&state);
639 vc_ghcb_invalidate(ghcb);
640 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
641 ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
642 ghcb_set_sw_exit_info_2(ghcb, 0);
644 sev_es_wr_ghcb_msr(__pa(ghcb));
647 if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
648 ghcb_sw_exit_info_2_is_valid(ghcb))
649 ret = ghcb->save.sw_exit_info_2;
651 __sev_put_ghcb(&state);
653 local_irq_restore(flags);
658 static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
659 unsigned long npages, enum psc_op op)
661 unsigned long paddr_end;
665 vaddr = vaddr & PAGE_MASK;
667 paddr = paddr & PAGE_MASK;
668 paddr_end = paddr + (npages << PAGE_SHIFT);
670 while (paddr < paddr_end) {
671 if (op == SNP_PAGE_STATE_SHARED) {
672 /* Page validation must be rescinded before changing to shared */
673 ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false);
674 if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
679 * Use the MSR protocol because this function can be called before
680 * the GHCB is established.
682 sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
685 val = sev_es_rd_ghcb_msr();
687 if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
688 "Wrong PSC response code: 0x%x\n",
689 (unsigned int)GHCB_RESP_CODE(val)))
692 if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
693 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
694 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
695 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
698 if (op == SNP_PAGE_STATE_PRIVATE) {
699 /* Page validation must be performed after changing to private */
700 ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true);
701 if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
712 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
715 void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
716 unsigned long npages)
719 * This can be invoked in early boot while running identity mapped, so
720 * use an open coded check for SNP instead of using cc_platform_has().
721 * This eliminates worries about jump tables or checking boot_cpu_data
722 * in the cc_platform_has() function.
724 if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
728 * Ask the hypervisor to mark the memory pages as private in the RMP
731 early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
734 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
735 unsigned long npages)
738 * This can be invoked in early boot while running identity mapped, so
739 * use an open coded check for SNP instead of using cc_platform_has().
740 * This eliminates worries about jump tables or checking boot_cpu_data
741 * in the cc_platform_has() function.
743 if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
746 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
747 early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
750 void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
752 unsigned long vaddr, npages;
754 vaddr = (unsigned long)__va(paddr);
755 npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
757 if (op == SNP_PAGE_STATE_PRIVATE)
758 early_snp_set_memory_private(vaddr, paddr, npages);
759 else if (op == SNP_PAGE_STATE_SHARED)
760 early_snp_set_memory_shared(vaddr, paddr, npages);
762 WARN(1, "invalid memory op %d\n", op);
765 static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
766 unsigned long vaddr_end, int op)
768 struct ghcb_state state;
769 bool use_large_entry;
780 memset(data, 0, sizeof(*data));
783 while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) {
786 if (is_vmalloc_addr((void *)vaddr)) {
787 pfn = vmalloc_to_pfn((void *)vaddr);
788 use_large_entry = false;
790 pfn = __pa(vaddr) >> PAGE_SHIFT;
791 use_large_entry = true;
797 if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
798 (vaddr_end - vaddr) >= PMD_SIZE) {
799 e->pagesize = RMP_PG_SIZE_2M;
802 e->pagesize = RMP_PG_SIZE_4K;
810 /* Page validation must be rescinded before changing to shared */
811 if (op == SNP_PAGE_STATE_SHARED)
812 pvalidate_pages(data);
814 local_irq_save(flags);
816 if (sev_cfg.ghcbs_initialized)
817 ghcb = __sev_get_ghcb(&state);
821 /* Invoke the hypervisor to perform the page state changes */
822 if (!ghcb || vmgexit_psc(ghcb, data))
823 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
825 if (sev_cfg.ghcbs_initialized)
826 __sev_put_ghcb(&state);
828 local_irq_restore(flags);
830 /* Page validation must be performed after changing to private */
831 if (op == SNP_PAGE_STATE_PRIVATE)
832 pvalidate_pages(data);
837 static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
839 struct snp_psc_desc desc;
840 unsigned long vaddr_end;
842 /* Use the MSR protocol when a GHCB is not available. */
844 return early_set_pages_state(vaddr, __pa(vaddr), npages, op);
846 vaddr = vaddr & PAGE_MASK;
847 vaddr_end = vaddr + (npages << PAGE_SHIFT);
849 while (vaddr < vaddr_end)
850 vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op);
853 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
855 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
858 set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
861 void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
863 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
866 set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
869 void snp_accept_memory(phys_addr_t start, phys_addr_t end)
871 unsigned long vaddr, npages;
873 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
876 vaddr = (unsigned long)__va(start);
877 npages = (end - start) >> PAGE_SHIFT;
879 set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
882 static int snp_set_vmsa(void *va, bool vmsa)
887 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
888 * using the RMPADJUST instruction. However, for the instruction to
889 * succeed it must target the permissions of a lesser privileged
890 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
891 * instruction in the AMD64 APM Volume 3).
895 attrs |= RMPADJUST_VMSA_PAGE_BIT;
897 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
900 #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
901 #define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
902 #define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
904 #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2)
905 #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3)
907 static void *snp_alloc_vmsa_page(void)
912 * Allocate VMSA page to work around the SNP erratum where the CPU will
913 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
914 * collides with the RMP entry of VMSA page. The recommended workaround
915 * is to not use a large page.
917 * Allocate an 8k page which is also 8k-aligned.
919 p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
925 /* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
928 return page_address(p + 1);
931 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
935 err = snp_set_vmsa(vmsa, false);
937 pr_err("clear VMSA page failed (%u), leaking page\n", err);
939 free_page((unsigned long)vmsa);
942 static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
944 struct sev_es_save_area *cur_vmsa, *vmsa;
945 struct ghcb_state state;
953 * The hypervisor SNP feature support check has happened earlier, just check
954 * the AP_CREATION one here.
956 if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
960 * Verify the desired start IP against the known trampoline start IP
961 * to catch any future new trampolines that may be introduced that
962 * would require a new protected guest entry point.
964 if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
965 "Unsupported SNP start_ip: %lx\n", start_ip))
968 /* Override start_ip with known protected guest start IP */
969 start_ip = real_mode_header->sev_es_trampoline_start;
971 /* Find the logical CPU for the APIC ID */
972 for_each_present_cpu(cpu) {
973 if (arch_match_cpu_phys_id(cpu, apic_id))
976 if (cpu >= nr_cpu_ids)
979 cur_vmsa = per_cpu(sev_vmsa, cpu);
982 * A new VMSA is created each time because there is no guarantee that
983 * the current VMSA is the kernels or that the vCPU is not running. If
984 * an attempt was done to use the current VMSA with a running vCPU, a
985 * #VMEXIT of that vCPU would wipe out all of the settings being done
988 vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
992 /* CR4 should maintain the MCE value */
993 cr4 = native_read_cr4() & X86_CR4_MCE;
995 /* Set the CS value based on the start_ip converted to a SIPI vector */
996 sipi_vector = (start_ip >> 12);
997 vmsa->cs.base = sipi_vector << 12;
998 vmsa->cs.limit = AP_INIT_CS_LIMIT;
999 vmsa->cs.attrib = INIT_CS_ATTRIBS;
1000 vmsa->cs.selector = sipi_vector << 8;
1002 /* Set the RIP value based on start_ip */
1003 vmsa->rip = start_ip & 0xfff;
1005 /* Set AP INIT defaults as documented in the APM */
1006 vmsa->ds.limit = AP_INIT_DS_LIMIT;
1007 vmsa->ds.attrib = INIT_DS_ATTRIBS;
1008 vmsa->es = vmsa->ds;
1009 vmsa->fs = vmsa->ds;
1010 vmsa->gs = vmsa->ds;
1011 vmsa->ss = vmsa->ds;
1013 vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT;
1014 vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT;
1015 vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS;
1016 vmsa->idtr.limit = AP_INIT_IDTR_LIMIT;
1017 vmsa->tr.limit = AP_INIT_TR_LIMIT;
1018 vmsa->tr.attrib = INIT_TR_ATTRIBS;
1021 vmsa->cr0 = AP_INIT_CR0_DEFAULT;
1022 vmsa->dr7 = DR7_RESET_VALUE;
1023 vmsa->dr6 = AP_INIT_DR6_DEFAULT;
1024 vmsa->rflags = AP_INIT_RFLAGS_DEFAULT;
1025 vmsa->g_pat = AP_INIT_GPAT_DEFAULT;
1026 vmsa->xcr0 = AP_INIT_XCR0_DEFAULT;
1027 vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT;
1028 vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT;
1029 vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT;
1031 /* SVME must be set. */
1032 vmsa->efer = EFER_SVME;
1035 * Set the SNP-specific fields for this VMSA:
1037 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
1040 vmsa->sev_features = sev_status >> 2;
1042 /* Switch the page over to a VMSA page now that it is initialized */
1043 ret = snp_set_vmsa(vmsa, true);
1045 pr_err("set VMSA page failed (%u)\n", ret);
1046 free_page((unsigned long)vmsa);
1051 /* Issue VMGEXIT AP Creation NAE event */
1052 local_irq_save(flags);
1054 ghcb = __sev_get_ghcb(&state);
1056 vc_ghcb_invalidate(ghcb);
1057 ghcb_set_rax(ghcb, vmsa->sev_features);
1058 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
1059 ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
1060 ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
1062 sev_es_wr_ghcb_msr(__pa(ghcb));
1065 if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1066 lower_32_bits(ghcb->save.sw_exit_info_1)) {
1067 pr_err("SNP AP Creation error\n");
1071 __sev_put_ghcb(&state);
1073 local_irq_restore(flags);
1075 /* Perform cleanup if there was an error */
1077 snp_cleanup_vmsa(vmsa);
1081 /* Free up any previous VMSA page */
1083 snp_cleanup_vmsa(cur_vmsa);
1085 /* Record the current VMSA page */
1086 per_cpu(sev_vmsa, cpu) = vmsa;
1091 void __init snp_set_wakeup_secondary_cpu(void)
1093 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1097 * Always set this override if SNP is enabled. This makes it the
1098 * required method to start APs under SNP. If the hypervisor does
1099 * not support AP creation, then no APs will be started.
1101 apic_update_callback(wakeup_secondary_cpu, wakeup_cpu_via_vmgexit);
1104 int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1106 u16 startup_cs, startup_ip;
1107 phys_addr_t jump_table_pa;
1108 u64 jump_table_addr;
1109 u16 __iomem *jump_table;
1111 jump_table_addr = get_jump_table_addr();
1113 /* On UP guests there is no jump table so this is not a failure */
1114 if (!jump_table_addr)
1117 /* Check if AP Jump Table is page-aligned */
1118 if (jump_table_addr & ~PAGE_MASK)
1121 jump_table_pa = jump_table_addr & PAGE_MASK;
1123 startup_cs = (u16)(rmh->trampoline_start >> 4);
1124 startup_ip = (u16)(rmh->sev_es_trampoline_start -
1125 rmh->trampoline_start);
1127 jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1131 writew(startup_ip, &jump_table[0]);
1132 writew(startup_cs, &jump_table[1]);
1134 iounmap(jump_table);
1140 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
1141 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1142 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1144 int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1146 struct sev_es_runtime_data *data;
1147 unsigned long address, pflags;
1151 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1154 pflags = _PAGE_NX | _PAGE_RW;
1156 for_each_possible_cpu(cpu) {
1157 data = per_cpu(runtime_data, cpu);
1159 address = __pa(&data->ghcb_page);
1160 pfn = address >> PAGE_SHIFT;
1162 if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1169 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1171 struct pt_regs *regs = ctxt->regs;
1175 /* Is it a WRMSR? */
1176 exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
1178 ghcb_set_rcx(ghcb, regs->cx);
1180 ghcb_set_rax(ghcb, regs->ax);
1181 ghcb_set_rdx(ghcb, regs->dx);
1184 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
1186 if ((ret == ES_OK) && (!exit_info_1)) {
1187 regs->ax = ghcb->save.rax;
1188 regs->dx = ghcb->save.rdx;
1194 static void snp_register_per_cpu_ghcb(void)
1196 struct sev_es_runtime_data *data;
1199 data = this_cpu_read(runtime_data);
1200 ghcb = &data->ghcb_page;
1202 snp_register_ghcb_early(__pa(ghcb));
1205 void setup_ghcb(void)
1207 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1210 /* First make sure the hypervisor talks a supported protocol. */
1211 if (!sev_es_negotiate_protocol())
1212 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1215 * Check whether the runtime #VC exception handler is active. It uses
1216 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1218 * If SNP is active, register the per-CPU GHCB page so that the runtime
1219 * exception handler can use it.
1221 if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1222 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1223 snp_register_per_cpu_ghcb();
1225 sev_cfg.ghcbs_initialized = true;
1231 * Clear the boot_ghcb. The first exception comes in before the bss
1232 * section is cleared.
1234 memset(&boot_ghcb_page, 0, PAGE_SIZE);
1236 /* Alright - Make the boot-ghcb public */
1237 boot_ghcb = &boot_ghcb_page;
1239 /* SNP guest requires that GHCB GPA must be registered. */
1240 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1241 snp_register_ghcb_early(__pa(&boot_ghcb_page));
1244 #ifdef CONFIG_HOTPLUG_CPU
1245 static void sev_es_ap_hlt_loop(void)
1247 struct ghcb_state state;
1250 ghcb = __sev_get_ghcb(&state);
1253 vc_ghcb_invalidate(ghcb);
1254 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1255 ghcb_set_sw_exit_info_1(ghcb, 0);
1256 ghcb_set_sw_exit_info_2(ghcb, 0);
1258 sev_es_wr_ghcb_msr(__pa(ghcb));
1261 /* Wakeup signal? */
1262 if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1263 ghcb->save.sw_exit_info_2)
1267 __sev_put_ghcb(&state);
1271 * Play_dead handler when running under SEV-ES. This is needed because
1272 * the hypervisor can't deliver an SIPI request to restart the AP.
1273 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1274 * hypervisor wakes it up again.
1276 static void sev_es_play_dead(void)
1280 /* IRQs now disabled */
1282 sev_es_ap_hlt_loop();
1285 * If we get here, the VCPU was woken up again. Jump to CPU
1286 * startup code to get it back online.
1290 #else /* CONFIG_HOTPLUG_CPU */
1291 #define sev_es_play_dead native_play_dead
1292 #endif /* CONFIG_HOTPLUG_CPU */
1295 static void __init sev_es_setup_play_dead(void)
1297 smp_ops.play_dead = sev_es_play_dead;
1300 static inline void sev_es_setup_play_dead(void) { }
1303 static void __init alloc_runtime_data(int cpu)
1305 struct sev_es_runtime_data *data;
1307 data = memblock_alloc(sizeof(*data), PAGE_SIZE);
1309 panic("Can't allocate SEV-ES runtime data");
1311 per_cpu(runtime_data, cpu) = data;
1314 static void __init init_ghcb(int cpu)
1316 struct sev_es_runtime_data *data;
1319 data = per_cpu(runtime_data, cpu);
1321 err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1322 sizeof(data->ghcb_page));
1324 panic("Can't map GHCBs unencrypted");
1326 memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1328 data->ghcb_active = false;
1329 data->backup_ghcb_active = false;
1332 void __init sev_es_init_vc_handling(void)
1336 BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1338 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1341 if (!sev_es_check_cpu_features())
1342 panic("SEV-ES CPU Features missing");
1345 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1348 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1349 sev_hv_features = get_hv_features();
1351 if (!(sev_hv_features & GHCB_HV_FT_SNP))
1352 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1355 /* Initialize per-cpu GHCB pages */
1356 for_each_possible_cpu(cpu) {
1357 alloc_runtime_data(cpu);
1361 sev_es_setup_play_dead();
1363 /* Secondary CPUs use the runtime #VC handler */
1364 initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1367 static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1369 int trapnr = ctxt->fi.vector;
1371 if (trapnr == X86_TRAP_PF)
1372 native_write_cr2(ctxt->fi.cr2);
1374 ctxt->regs->orig_ax = ctxt->fi.error_code;
1375 do_early_exception(ctxt->regs, trapnr);
1378 static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
1383 reg_array = (long *)ctxt->regs;
1384 offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
1389 offset /= sizeof(long);
1391 return reg_array + offset;
1393 static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
1394 unsigned int bytes, bool read)
1396 u64 exit_code, exit_info_1, exit_info_2;
1397 unsigned long ghcb_pa = __pa(ghcb);
1402 ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
1403 if (ref == (void __user *)-1L)
1404 return ES_UNSUPPORTED;
1406 exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
1408 res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
1410 if (res == ES_EXCEPTION && !read)
1411 ctxt->fi.error_code |= X86_PF_WRITE;
1416 exit_info_1 = paddr;
1417 /* Can never be greater than 8 */
1418 exit_info_2 = bytes;
1420 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
1422 return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
1426 * The MOVS instruction has two memory operands, which raises the
1427 * problem that it is not known whether the access to the source or the
1428 * destination caused the #VC exception (and hence whether an MMIO read
1429 * or write operation needs to be emulated).
1431 * Instead of playing games with walking page-tables and trying to guess
1432 * whether the source or destination is an MMIO range, split the move
1433 * into two operations, a read and a write with only one memory operand.
1434 * This will cause a nested #VC exception on the MMIO address which can
1437 * This implementation has the benefit that it also supports MOVS where
1438 * source _and_ destination are MMIO regions.
1440 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
1441 * rare operation. If it turns out to be a performance problem the split
1442 * operations can be moved to memcpy_fromio() and memcpy_toio().
1444 static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
1447 unsigned long ds_base, es_base;
1448 unsigned char *src, *dst;
1449 unsigned char buffer[8];
1454 ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
1455 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
1457 if (ds_base == -1L || es_base == -1L) {
1458 ctxt->fi.vector = X86_TRAP_GP;
1459 ctxt->fi.error_code = 0;
1460 return ES_EXCEPTION;
1463 src = ds_base + (unsigned char *)ctxt->regs->si;
1464 dst = es_base + (unsigned char *)ctxt->regs->di;
1466 ret = vc_read_mem(ctxt, src, buffer, bytes);
1470 ret = vc_write_mem(ctxt, dst, buffer, bytes);
1474 if (ctxt->regs->flags & X86_EFLAGS_DF)
1479 ctxt->regs->si += off;
1480 ctxt->regs->di += off;
1482 rep = insn_has_rep_prefix(&ctxt->insn);
1484 ctxt->regs->cx -= 1;
1486 if (!rep || ctxt->regs->cx == 0)
1492 static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1494 struct insn *insn = &ctxt->insn;
1495 enum insn_mmio_type mmio;
1496 unsigned int bytes = 0;
1501 mmio = insn_decode_mmio(insn, &bytes);
1502 if (mmio == INSN_MMIO_DECODE_FAILED)
1503 return ES_DECODE_FAILED;
1505 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
1506 reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
1508 return ES_DECODE_FAILED;
1512 case INSN_MMIO_WRITE:
1513 memcpy(ghcb->shared_buffer, reg_data, bytes);
1514 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1516 case INSN_MMIO_WRITE_IMM:
1517 memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
1518 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1520 case INSN_MMIO_READ:
1521 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1525 /* Zero-extend for 32-bit operation */
1529 memcpy(reg_data, ghcb->shared_buffer, bytes);
1531 case INSN_MMIO_READ_ZERO_EXTEND:
1532 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1536 /* Zero extend based on operand size */
1537 memset(reg_data, 0, insn->opnd_bytes);
1538 memcpy(reg_data, ghcb->shared_buffer, bytes);
1540 case INSN_MMIO_READ_SIGN_EXTEND:
1541 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1546 u8 *val = (u8 *)ghcb->shared_buffer;
1548 sign_byte = (*val & 0x80) ? 0xff : 0x00;
1550 u16 *val = (u16 *)ghcb->shared_buffer;
1552 sign_byte = (*val & 0x8000) ? 0xff : 0x00;
1555 /* Sign extend based on operand size */
1556 memset(reg_data, sign_byte, insn->opnd_bytes);
1557 memcpy(reg_data, ghcb->shared_buffer, bytes);
1559 case INSN_MMIO_MOVS:
1560 ret = vc_handle_mmio_movs(ctxt, bytes);
1563 ret = ES_UNSUPPORTED;
1570 static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1571 struct es_em_ctxt *ctxt)
1573 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1574 long val, *reg = vc_insn_get_rm(ctxt);
1577 if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
1578 return ES_VMM_ERROR;
1581 return ES_DECODE_FAILED;
1585 /* Upper 32 bits must be written as zeroes */
1587 ctxt->fi.vector = X86_TRAP_GP;
1588 ctxt->fi.error_code = 0;
1589 return ES_EXCEPTION;
1592 /* Clear out other reserved bits and set bit 10 */
1593 val = (val & 0xffff23ffL) | BIT(10);
1595 /* Early non-zero writes to DR7 are not supported */
1596 if (!data && (val & ~DR7_RESET_VALUE))
1597 return ES_UNSUPPORTED;
1599 /* Using a value of 0 for ExitInfo1 means RAX holds the value */
1600 ghcb_set_rax(ghcb, val);
1601 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1611 static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1612 struct es_em_ctxt *ctxt)
1614 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1615 long *reg = vc_insn_get_rm(ctxt);
1617 if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
1618 return ES_VMM_ERROR;
1621 return ES_DECODE_FAILED;
1626 *reg = DR7_RESET_VALUE;
1631 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1632 struct es_em_ctxt *ctxt)
1634 return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1637 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1641 ghcb_set_rcx(ghcb, ctxt->regs->cx);
1643 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1647 if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1648 return ES_VMM_ERROR;
1650 ctxt->regs->ax = ghcb->save.rax;
1651 ctxt->regs->dx = ghcb->save.rdx;
1656 static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1657 struct es_em_ctxt *ctxt)
1660 * Treat it as a NOP and do not leak a physical address to the
1666 static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1667 struct es_em_ctxt *ctxt)
1669 /* Treat the same as MONITOR/MONITORX */
1673 static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1674 struct es_em_ctxt *ctxt)
1678 ghcb_set_rax(ghcb, ctxt->regs->ax);
1679 ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1681 if (x86_platform.hyper.sev_es_hcall_prepare)
1682 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1684 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1688 if (!ghcb_rax_is_valid(ghcb))
1689 return ES_VMM_ERROR;
1691 ctxt->regs->ax = ghcb->save.rax;
1694 * Call sev_es_hcall_finish() after regs->ax is already set.
1695 * This allows the hypervisor handler to overwrite it again if
1698 if (x86_platform.hyper.sev_es_hcall_finish &&
1699 !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1700 return ES_VMM_ERROR;
1705 static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1706 struct es_em_ctxt *ctxt)
1709 * Calling ecx_alignment_check() directly does not work, because it
1710 * enables IRQs and the GHCB is active. Forward the exception and call
1711 * it later from vc_forward_exception().
1713 ctxt->fi.vector = X86_TRAP_AC;
1714 ctxt->fi.error_code = 0;
1715 return ES_EXCEPTION;
1718 static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1720 unsigned long exit_code)
1722 enum es_result result;
1724 switch (exit_code) {
1725 case SVM_EXIT_READ_DR7:
1726 result = vc_handle_dr7_read(ghcb, ctxt);
1728 case SVM_EXIT_WRITE_DR7:
1729 result = vc_handle_dr7_write(ghcb, ctxt);
1731 case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1732 result = vc_handle_trap_ac(ghcb, ctxt);
1734 case SVM_EXIT_RDTSC:
1735 case SVM_EXIT_RDTSCP:
1736 result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1738 case SVM_EXIT_RDPMC:
1739 result = vc_handle_rdpmc(ghcb, ctxt);
1742 pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1743 result = ES_UNSUPPORTED;
1745 case SVM_EXIT_CPUID:
1746 result = vc_handle_cpuid(ghcb, ctxt);
1749 result = vc_handle_ioio(ghcb, ctxt);
1752 result = vc_handle_msr(ghcb, ctxt);
1754 case SVM_EXIT_VMMCALL:
1755 result = vc_handle_vmmcall(ghcb, ctxt);
1757 case SVM_EXIT_WBINVD:
1758 result = vc_handle_wbinvd(ghcb, ctxt);
1760 case SVM_EXIT_MONITOR:
1761 result = vc_handle_monitor(ghcb, ctxt);
1763 case SVM_EXIT_MWAIT:
1764 result = vc_handle_mwait(ghcb, ctxt);
1767 result = vc_handle_mmio(ghcb, ctxt);
1771 * Unexpected #VC exception
1773 result = ES_UNSUPPORTED;
1779 static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1781 long error_code = ctxt->fi.error_code;
1782 int trapnr = ctxt->fi.vector;
1784 ctxt->regs->orig_ax = ctxt->fi.error_code;
1788 exc_general_protection(ctxt->regs, error_code);
1791 exc_invalid_op(ctxt->regs);
1794 write_cr2(ctxt->fi.cr2);
1795 exc_page_fault(ctxt->regs, error_code);
1798 exc_alignment_check(ctxt->regs, error_code);
1801 pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1806 static __always_inline bool is_vc2_stack(unsigned long sp)
1808 return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1811 static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1813 unsigned long sp, prev_sp;
1815 sp = (unsigned long)regs;
1819 * If the code was already executing on the VC2 stack when the #VC
1820 * happened, let it proceed to the normal handling routine. This way the
1821 * code executing on the VC2 stack can cause #VC exceptions to get handled.
1823 return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1826 static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1828 struct ghcb_state state;
1829 struct es_em_ctxt ctxt;
1830 enum es_result result;
1834 ghcb = __sev_get_ghcb(&state);
1836 vc_ghcb_invalidate(ghcb);
1837 result = vc_init_em_ctxt(&ctxt, regs, error_code);
1839 if (result == ES_OK)
1840 result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1842 __sev_put_ghcb(&state);
1844 /* Done - now check the result */
1847 vc_finish_insn(&ctxt);
1849 case ES_UNSUPPORTED:
1850 pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1851 error_code, regs->ip);
1855 pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1856 error_code, regs->ip);
1859 case ES_DECODE_FAILED:
1860 pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1861 error_code, regs->ip);
1865 vc_forward_exception(&ctxt);
1871 pr_emerg("Unknown result in %s():%d\n", __func__, result);
1873 * Emulating the instruction which caused the #VC exception
1874 * failed - can't continue so print debug information
1882 static __always_inline bool vc_is_db(unsigned long error_code)
1884 return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1888 * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1889 * and will panic when an error happens.
1891 DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1893 irqentry_state_t irq_state;
1896 * With the current implementation it is always possible to switch to a
1897 * safe stack because #VC exceptions only happen at known places, like
1898 * intercepted instructions or accesses to MMIO areas/IO ports. They can
1899 * also happen with code instrumentation when the hypervisor intercepts
1900 * #DB, but the critical paths are forbidden to be instrumented, so #DB
1901 * exceptions currently also only happen in safe places.
1903 * But keep this here in case the noinstr annotations are violated due
1906 if (unlikely(vc_from_invalid_context(regs))) {
1907 instrumentation_begin();
1908 panic("Can't handle #VC exception from unsupported context\n");
1909 instrumentation_end();
1913 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1915 if (vc_is_db(error_code)) {
1920 irq_state = irqentry_nmi_enter(regs);
1922 instrumentation_begin();
1924 if (!vc_raw_handle_exception(regs, error_code)) {
1925 /* Show some debug info */
1928 /* Ask hypervisor to sev_es_terminate */
1929 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1931 /* If that fails and we get here - just panic */
1932 panic("Returned from Terminate-Request to Hypervisor\n");
1935 instrumentation_end();
1936 irqentry_nmi_exit(regs, irq_state);
1940 * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
1941 * and will kill the current task with SIGBUS when an error happens.
1943 DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1946 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1948 if (vc_is_db(error_code)) {
1949 noist_exc_debug(regs);
1953 irqentry_enter_from_user_mode(regs);
1954 instrumentation_begin();
1956 if (!vc_raw_handle_exception(regs, error_code)) {
1958 * Do not kill the machine if user-space triggered the
1959 * exception. Send SIGBUS instead and let user-space deal with
1962 force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
1965 instrumentation_end();
1966 irqentry_exit_to_user_mode(regs);
1969 bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
1971 unsigned long exit_code = regs->orig_ax;
1972 struct es_em_ctxt ctxt;
1973 enum es_result result;
1975 vc_ghcb_invalidate(boot_ghcb);
1977 result = vc_init_em_ctxt(&ctxt, regs, exit_code);
1978 if (result == ES_OK)
1979 result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
1981 /* Done - now check the result */
1984 vc_finish_insn(&ctxt);
1986 case ES_UNSUPPORTED:
1987 early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
1988 exit_code, regs->ip);
1991 early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1992 exit_code, regs->ip);
1994 case ES_DECODE_FAILED:
1995 early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1996 exit_code, regs->ip);
1999 vc_early_forward_exception(&ctxt);
2013 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
2017 * Initial set up of SNP relies on information provided by the
2018 * Confidential Computing blob, which can be passed to the kernel
2019 * in the following ways, depending on how it is booted:
2021 * - when booted via the boot/decompress kernel:
2024 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
2025 * - via a setup_data entry, as defined by the Linux Boot Protocol
2027 * Scan for the blob in that order.
2029 static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
2031 struct cc_blob_sev_info *cc_info;
2033 /* Boot kernel would have passed the CC blob via boot_params. */
2034 if (bp->cc_blob_address) {
2035 cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
2040 * If kernel was booted directly, without the use of the
2041 * boot/decompression kernel, the CC blob may have been passed via
2042 * setup_data instead.
2044 cc_info = find_cc_blob_setup_data(bp);
2049 if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
2055 bool __init snp_init(struct boot_params *bp)
2057 struct cc_blob_sev_info *cc_info;
2062 cc_info = find_cc_blob(bp);
2066 setup_cpuid_table(cc_info);
2069 * The CC blob will be used later to access the secrets page. Cache
2070 * it here like the boot kernel does.
2072 bp->cc_blob_address = (u32)(unsigned long)cc_info;
2077 void __init __noreturn snp_abort(void)
2079 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
2082 static void dump_cpuid_table(void)
2084 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2087 pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
2088 cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
2090 for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
2091 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
2093 pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
2094 i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
2095 fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
2100 * It is useful from an auditing/testing perspective to provide an easy way
2101 * for the guest owner to know that the CPUID table has been initialized as
2102 * expected, but that initialization happens too early in boot to print any
2103 * sort of indicator, and there's not really any other good place to do it,
2106 static int __init report_cpuid_table(void)
2108 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2110 if (!cpuid_table->count)
2113 pr_info("Using SNP CPUID table, %d entries present.\n",
2114 cpuid_table->count);
2121 arch_initcall(report_cpuid_table);
2123 static int __init init_sev_config(char *str)
2127 while ((s = strsep(&str, ","))) {
2128 if (!strcmp(s, "debug")) {
2129 sev_cfg.debug = true;
2133 pr_info("SEV command-line option '%s' was not recognized\n", s);
2138 __setup("sev=", init_sev_config);
2140 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
2142 struct ghcb_state state;
2143 struct es_em_ctxt ctxt;
2144 unsigned long flags;
2148 rio->exitinfo2 = SEV_RET_NO_FW_CALL;
2151 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
2154 local_irq_save(flags);
2156 ghcb = __sev_get_ghcb(&state);
2162 vc_ghcb_invalidate(ghcb);
2164 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2165 ghcb_set_rax(ghcb, input->data_gpa);
2166 ghcb_set_rbx(ghcb, input->data_npages);
2169 ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
2173 rio->exitinfo2 = ghcb->save.sw_exit_info_2;
2174 switch (rio->exitinfo2) {
2178 case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
2182 case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
2183 /* Number of expected pages are returned in RBX */
2184 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2185 input->data_npages = ghcb_get_rbx(ghcb);
2196 __sev_put_ghcb(&state);
2198 local_irq_restore(flags);
2202 EXPORT_SYMBOL_GPL(snp_issue_guest_request);
2204 static struct platform_device sev_guest_device = {
2205 .name = "sev-guest",
2209 static int __init snp_init_platform_device(void)
2211 struct sev_guest_platform_data data;
2214 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2217 gpa = get_secrets_page();
2221 data.secrets_gpa = gpa;
2222 if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
2225 if (platform_device_register(&sev_guest_device))
2228 pr_info("SNP guest platform device initialized.\n");
2231 device_initcall(snp_init_platform_device);