Merge branch 'kvm-guest-sev-migration' into kvm-master
[platform/kernel/linux-starfive.git] / arch / x86 / mm / mem_encrypt.c
index 455ac48..3548730 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/bitops.h>
 #include <linux/dma-mapping.h>
 #include <linux/virtio_config.h>
+#include <linux/cc_platform.h>
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
@@ -143,7 +144,7 @@ void __init sme_unmap_bootdata(char *real_mode_data)
        struct boot_params *boot_data;
        unsigned long cmdline_paddr;
 
-       if (!sme_active())
+       if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
                return;
 
        /* Get the command line address before unmapping the real_mode_data */
@@ -163,7 +164,7 @@ void __init sme_map_bootdata(char *real_mode_data)
        struct boot_params *boot_data;
        unsigned long cmdline_paddr;
 
-       if (!sme_active())
+       if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
                return;
 
        __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
@@ -193,7 +194,7 @@ void __init sme_early_init(void)
        for (i = 0; i < ARRAY_SIZE(protection_map); i++)
                protection_map[i] = pgprot_encrypted(protection_map[i]);
 
-       if (sev_active())
+       if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                swiotlb_force = SWIOTLB_FORCE;
 }
 
@@ -202,7 +203,7 @@ void __init sev_setup_arch(void)
        phys_addr_t total_mem = memblock_phys_mem_size();
        unsigned long size;
 
-       if (!sev_active())
+       if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                return;
 
        /*
@@ -409,33 +410,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
        return early_set_memory_enc_dec(vaddr, size, true);
 }
 
-/*
- * SME and SEV are very similar but they are not the same, so there are
- * times that the kernel will need to distinguish between SME and SEV. The
- * sme_active() and sev_active() functions are used for this.  When a
- * distinction isn't needed, the mem_encrypt_active() function can be used.
- *
- * The trampoline code is a good example for this requirement.  Before
- * paging is activated, SME will access all memory as decrypted, but SEV
- * will access all memory as encrypted.  So, when APs are being brought
- * up under SME the trampoline area cannot be encrypted, whereas under SEV
- * the trampoline area must be encrypted.
- */
-bool sev_active(void)
-{
-       return sev_status & MSR_AMD64_SEV_ENABLED;
-}
-
-bool sme_active(void)
-{
-       return sme_me_mask && !sev_active();
-}
-EXPORT_SYMBOL_GPL(sev_active);
-
-/* Needs to be called from non-instrumentable code */
-bool noinstr sev_es_active(void)
+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
 {
-       return sev_status & MSR_AMD64_SEV_ES_ENABLED;
+       notify_range_enc_status_changed(vaddr, npages, enc);
 }
 
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
@@ -444,7 +421,7 @@ bool force_dma_unencrypted(struct device *dev)
        /*
         * For SEV, all DMA must be to unencrypted addresses.
         */
-       if (sev_active())
+       if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                return true;
 
        /*
@@ -452,7 +429,7 @@ bool force_dma_unencrypted(struct device *dev)
         * device does not support DMA to addresses that include the
         * encryption mask.
         */
-       if (sme_active()) {
+       if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
                u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
                u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
                                                dev->bus_dma_limit);
@@ -477,7 +454,7 @@ void __init mem_encrypt_free_decrypted_mem(void)
         * The unused memory range was mapped decrypted, change the encryption
         * attribute from decrypted to encrypted before freeing it.
         */
-       if (mem_encrypt_active()) {
+       if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
                r = set_memory_encrypted(vaddr, npages);
                if (r) {
                        pr_warn("failed to free unused decrypted pages\n");
@@ -493,7 +470,7 @@ static void print_mem_encrypt_feature_info(void)
        pr_info("AMD Memory Encryption Features active:");
 
        /* Secure Memory Encryption */
-       if (sme_active()) {
+       if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
                /*
                 * SME is mutually exclusive with any of the SEV
                 * features below.
@@ -503,11 +480,11 @@ static void print_mem_encrypt_feature_info(void)
        }
 
        /* Secure Encrypted Virtualization */
-       if (sev_active())
+       if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                pr_cont(" SEV");
 
        /* Encrypted Register State */
-       if (sev_es_active())
+       if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
                pr_cont(" SEV-ES");
 
        pr_cont("\n");
@@ -526,7 +503,8 @@ void __init mem_encrypt_init(void)
         * With SEV, we need to unroll the rep string I/O instructions,
         * but SEV-ES supports them through the #VC handler.
         */
-       if (sev_active() && !sev_es_active())
+       if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
+           !cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
                static_branch_enable(&sev_enable_key);
 
        print_mem_encrypt_feature_info();
@@ -534,6 +512,6 @@ void __init mem_encrypt_init(void)
 
 int arch_has_restricted_virtio_memory_access(void)
 {
-       return sev_active();
+       return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
 }
 EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);