coresight: ultrasoc-smb: Fix sleep while close preempt in enable_smb
[platform/kernel/linux-starfive.git] / drivers / hv / hv.c
index d106411..51e5018 100644 (file)
@@ -57,20 +57,37 @@ int hv_post_message(union hv_connection_id connection_id,
 
        local_irq_save(flags);
 
-       aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+       /*
+        * A TDX VM with the paravisor must use the decrypted post_msg_page: see
+        * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor
+        * can use the encrypted hyperv_pcpu_input_arg because it copies the
+        * input into the GHCB page, which has been decrypted by the paravisor.
+        */
+       if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present)
+               aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page;
+       else
+               aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
        aligned_msg->connectionid = connection_id;
        aligned_msg->reserved = 0;
        aligned_msg->message_type = message_type;
        aligned_msg->payload_size = payload_size;
        memcpy((void *)aligned_msg->payload, payload, payload_size);
 
-       if (hv_isolation_type_snp())
-               status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
-                               (void *)aligned_msg, NULL,
-                               sizeof(*aligned_msg));
-       else
+       if (ms_hyperv.paravisor_present) {
+               if (hv_isolation_type_tdx())
+                       status = hv_tdx_hypercall(HVCALL_POST_MESSAGE,
+                                                 virt_to_phys(aligned_msg), 0);
+               else if (hv_isolation_type_snp())
+                       status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
+                                                  aligned_msg, NULL,
+                                                  sizeof(*aligned_msg));
+               else
+                       status = HV_STATUS_INVALID_PARAMETER;
+       } else {
                status = hv_do_hypercall(HVCALL_POST_MESSAGE,
                                aligned_msg, NULL);
+       }
 
        local_irq_restore(flags);
 
@@ -105,11 +122,29 @@ int hv_synic_alloc(void)
                tasklet_init(&hv_cpu->msg_dpc,
                             vmbus_on_msg_dpc, (unsigned long) hv_cpu);
 
+               if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+                       hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
+                       if (hv_cpu->post_msg_page == NULL) {
+                               pr_err("Unable to allocate post msg page\n");
+                               goto err;
+                       }
+
+                       ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
+                       if (ret) {
+                               pr_err("Failed to decrypt post msg page: %d\n", ret);
+                               /* Just leak the page, as it's unsafe to free the page. */
+                               hv_cpu->post_msg_page = NULL;
+                               goto err;
+                       }
+
+                       memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
+               }
+
                /*
                 * Synic message and event pages are allocated by paravisor.
                 * Skip these pages allocation here.
                 */
-               if (!hv_isolation_type_snp() && !hv_root_partition) {
+               if (!ms_hyperv.paravisor_present && !hv_root_partition) {
                        hv_cpu->synic_message_page =
                                (void *)get_zeroed_page(GFP_ATOMIC);
                        if (hv_cpu->synic_message_page == NULL) {
@@ -129,7 +164,7 @@ int hv_synic_alloc(void)
                }
 
                if (!ms_hyperv.paravisor_present &&
-                   (hv_isolation_type_en_snp() || hv_isolation_type_tdx())) {
+                   (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
                        ret = set_memory_decrypted((unsigned long)
                                hv_cpu->synic_message_page, 1);
                        if (ret) {
@@ -178,8 +213,19 @@ void hv_synic_free(void)
                        = per_cpu_ptr(hv_context.cpu_context, cpu);
 
                /* It's better to leak the page if the encryption fails. */
+               if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+                       if (hv_cpu->post_msg_page) {
+                               ret = set_memory_encrypted((unsigned long)
+                                       hv_cpu->post_msg_page, 1);
+                               if (ret) {
+                                       pr_err("Failed to encrypt post msg page: %d\n", ret);
+                                       hv_cpu->post_msg_page = NULL;
+                               }
+                       }
+               }
+
                if (!ms_hyperv.paravisor_present &&
-                   (hv_isolation_type_en_snp() || hv_isolation_type_tdx())) {
+                   (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
                        if (hv_cpu->synic_message_page) {
                                ret = set_memory_encrypted((unsigned long)
                                        hv_cpu->synic_message_page, 1);
@@ -199,6 +245,7 @@ void hv_synic_free(void)
                        }
                }
 
+               free_page((unsigned long)hv_cpu->post_msg_page);
                free_page((unsigned long)hv_cpu->synic_event_page);
                free_page((unsigned long)hv_cpu->synic_message_page);
        }
@@ -226,7 +273,7 @@ void hv_synic_enable_regs(unsigned int cpu)
        simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
        simp.simp_enabled = 1;
 
-       if (hv_isolation_type_snp() || hv_root_partition) {
+       if (ms_hyperv.paravisor_present || hv_root_partition) {
                /* Mask out vTOM bit. ioremap_cache() maps decrypted */
                u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
                                ~ms_hyperv.shared_gpa_boundary;
@@ -245,7 +292,7 @@ void hv_synic_enable_regs(unsigned int cpu)
        siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
        siefp.siefp_enabled = 1;
 
-       if (hv_isolation_type_snp() || hv_root_partition) {
+       if (ms_hyperv.paravisor_present || hv_root_partition) {
                /* Mask out vTOM bit. ioremap_cache() maps decrypted */
                u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
                                ~ms_hyperv.shared_gpa_boundary;
@@ -328,7 +375,7 @@ void hv_synic_disable_regs(unsigned int cpu)
         * addresses.
         */
        simp.simp_enabled = 0;
-       if (hv_isolation_type_snp() || hv_root_partition) {
+       if (ms_hyperv.paravisor_present || hv_root_partition) {
                iounmap(hv_cpu->synic_message_page);
                hv_cpu->synic_message_page = NULL;
        } else {
@@ -340,7 +387,7 @@ void hv_synic_disable_regs(unsigned int cpu)
        siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
        siefp.siefp_enabled = 0;
 
-       if (hv_isolation_type_snp() || hv_root_partition) {
+       if (ms_hyperv.paravisor_present || hv_root_partition) {
                iounmap(hv_cpu->synic_event_page);
                hv_cpu->synic_event_page = NULL;
        } else {