x86/boot/compressed/64: Unmap GHCB page before booting the kernel
authorJoerg Roedel <jroedel@suse.de>
Mon, 7 Sep 2020 13:15:25 +0000 (15:15 +0200)
committerBorislav Petkov <bp@suse.de>
Mon, 7 Sep 2020 17:45:26 +0000 (19:45 +0200)
Force a page-fault on any further accesses to the GHCB page when they
shouldn't happen anymore. This will catch any bugs where a #VC exception
is raised even though none is expected anymore.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20200907131613.12703-25-joro@8bytes.org
arch/x86/boot/compressed/ident_map_64.c
arch/x86/boot/compressed/misc.h
arch/x86/boot/compressed/sev-es.c

index 05742f6..063a60e 100644 (file)
@@ -298,6 +298,11 @@ int set_page_encrypted(unsigned long address)
        return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0);
 }
 
+int set_page_non_present(unsigned long address)
+{
+       return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
+}
+
 static void do_pf_error(const char *msg, unsigned long error_code,
                        unsigned long address, unsigned long ip)
 {
@@ -316,8 +321,14 @@ static void do_pf_error(const char *msg, unsigned long error_code,
 
 void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
-       unsigned long address = native_read_cr2() & PMD_MASK;
-       unsigned long end = address + PMD_SIZE;
+       unsigned long address = native_read_cr2();
+       unsigned long end;
+       bool ghcb_fault;
+
+       ghcb_fault = sev_es_check_ghcb_fault(address);
+
+       address   &= PMD_MASK;
+       end        = address + PMD_SIZE;
 
        /*
         * Check for unexpected error codes. Unexpected are:
@@ -327,6 +338,8 @@ void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
         */
        if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
                do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
+       else if (ghcb_fault)
+               do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip);
 
        /*
         * Error code is sane - now identity map the 2M region around
index 9995c70..c0e0ffe 100644 (file)
@@ -100,6 +100,7 @@ static inline void choose_random_location(unsigned long input,
 #ifdef CONFIG_X86_64
 extern int set_page_decrypted(unsigned long address);
 extern int set_page_encrypted(unsigned long address);
+extern int set_page_non_present(unsigned long address);
 extern unsigned char _pgtable[];
 #endif
 
@@ -117,8 +118,13 @@ void set_sev_encryption_mask(void);
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 void sev_es_shutdown_ghcb(void);
+extern bool sev_es_check_ghcb_fault(unsigned long address);
 #else
 static inline void sev_es_shutdown_ghcb(void) { }
+static inline bool sev_es_check_ghcb_fault(unsigned long address)
+{
+       return false;
+}
 #endif
 
 /* acpi.c */
index fa62af7..1e1fab5 100644 (file)
@@ -121,6 +121,20 @@ void sev_es_shutdown_ghcb(void)
         */
        if (set_page_encrypted((unsigned long)&boot_ghcb_page))
                error("Can't map GHCB page encrypted");
+
+       /*
+        * GHCB page is mapped encrypted again and flushed from the cache.
+        * Mark it non-present now to catch bugs when #VC exceptions trigger
+        * after this point.
+        */
+       if (set_page_non_present((unsigned long)&boot_ghcb_page))
+               error("Can't unmap GHCB page");
+}
+
+bool sev_es_check_ghcb_fault(unsigned long address)
+{
+       /* Check whether the fault was on the GHCB page */
+       return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
 }
 
 void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)