KVM: x86: Use more verbose names for mem encrypt kvm_x86_ops hooks
authorSean Christopherson <seanjc@google.com>
Fri, 28 Jan 2022 00:52:05 +0000 (00:52 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 10 Feb 2022 18:50:29 +0000 (13:50 -0500)
Use slightly more verbose names for the so called "memory encrypt",
a.k.a. "mem enc", kvm_x86_ops hooks to bridge the gap between the current
super short kvm_x86_ops names and SVM's more verbose, but non-conforming
names.  This is a step toward using kvm-x86-ops.h with KVM_X86_CVM_OP()
to fill svm_x86_ops.

Opportunistically rename mem_enc_op() to mem_enc_ioctl() to better
reflect its true nature, as it really is a full fledged ioctl() of its
own.  Ideally, the hook would be named confidential_vm_ioctl() or so, as
the ioctl() is a gateway to more than just memory encryption, and because
its underlying purpose to support Confidential VMs, which can be provided
without memory encryption, e.g. if the TCB of the guest includes the host
kernel but not host userspace, or by isolation in hardware without
encrypting memory.  But, diverging from KVM_MEMORY_ENCRYPT_OP even
further is undeseriable, and short of creating alises for all related
ioctl()s, which introduces a different flavor of divergence, KVM is stuck
with the nomenclature.

Defer renaming SVM's functions to a future commit as there are additional
changes needed to make SVM fully conforming and to match reality (looking
at you, svm_vm_copy_asid_from()).

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220128005208.4008533-20-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/x86.c

index bd2b1e1c5c47f9d19efb9c60b85a6b2402e47d34..9e37dc3d886368f57428aaf7f887f89981358d53 100644 (file)
@@ -112,9 +112,9 @@ KVM_X86_OP(smi_allowed)
 KVM_X86_OP(enter_smm)
 KVM_X86_OP(leave_smm)
 KVM_X86_OP(enable_smi_window)
-KVM_X86_OP_NULL(mem_enc_op)
-KVM_X86_OP_NULL(mem_enc_reg_region)
-KVM_X86_OP_NULL(mem_enc_unreg_region)
+KVM_X86_OP_NULL(mem_enc_ioctl)
+KVM_X86_OP_NULL(mem_enc_register_region)
+KVM_X86_OP_NULL(mem_enc_unregister_region)
 KVM_X86_OP_NULL(vm_copy_enc_context_from)
 KVM_X86_OP_NULL(vm_move_enc_context_from)
 KVM_X86_OP(get_msr_feature)
index f50b6d5c190163067f7ce57d63f85a8e9b31e4ed..4b7eee9b22199d4bcaa40b4e447f3cbc0814ba9b 100644 (file)
@@ -1476,9 +1476,9 @@ struct kvm_x86_ops {
        int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
        void (*enable_smi_window)(struct kvm_vcpu *vcpu);
 
-       int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
-       int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
-       int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
+       int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
+       int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
+       int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
        int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
        int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
 
index b82eeef89a3ee7f3f9e2a5acbf5a6c2adb29c2c7..7f346ddcae0ad3d3b895774ff110a4920ada4cfc 100644 (file)
@@ -1761,7 +1761,7 @@ out_fput:
        return ret;
 }
 
-int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+int svm_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
 {
        struct kvm_sev_cmd sev_cmd;
        int r;
index 141d4229614301128fc8f66186d39229f22eacdd..19b6a60066726ec9ce9b4aaf5ee1285ca45dec50 100644 (file)
@@ -4605,9 +4605,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .leave_smm = svm_leave_smm,
        .enable_smi_window = svm_enable_smi_window,
 
-       .mem_enc_op = svm_mem_enc_op,
-       .mem_enc_reg_region = svm_register_enc_region,
-       .mem_enc_unreg_region = svm_unregister_enc_region,
+       .mem_enc_ioctl = svm_mem_enc_ioctl,
+       .mem_enc_register_region = svm_register_enc_region,
+       .mem_enc_unregister_region = svm_unregister_enc_region,
 
        .vm_copy_enc_context_from = svm_vm_copy_asid_from,
        .vm_move_enc_context_from = svm_vm_migrate_from,
index ceab55fc8325f87ba4fbcdf498e194e1d4aa2ef3..d6a415ab3a8c7e12b072b7b4cbf0ced67c1e11be 100644 (file)
@@ -587,7 +587,7 @@ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
 extern unsigned int max_sev_asid;
 
 void sev_vm_destroy(struct kvm *kvm);
-int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
+int svm_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
 int svm_register_enc_region(struct kvm *kvm,
                            struct kvm_enc_region *range);
 int svm_unregister_enc_region(struct kvm *kvm,
index a1672c5659c433c25db206418d9fa651161d7b76..a356b8a8cacaeab169a2203e10b406db710c5fc8 100644 (file)
@@ -6461,8 +6461,10 @@ set_pit2_out:
                break;
        case KVM_MEMORY_ENCRYPT_OP: {
                r = -ENOTTY;
-               if (kvm_x86_ops.mem_enc_op)
-                       r = static_call(kvm_x86_mem_enc_op)(kvm, argp);
+               if (!kvm_x86_ops.mem_enc_ioctl)
+                       goto out;
+
+               r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp);
                break;
        }
        case KVM_MEMORY_ENCRYPT_REG_REGION: {
@@ -6473,8 +6475,10 @@ set_pit2_out:
                        goto out;
 
                r = -ENOTTY;
-               if (kvm_x86_ops.mem_enc_reg_region)
-                       r = static_call(kvm_x86_mem_enc_reg_region)(kvm, &region);
+               if (!kvm_x86_ops.mem_enc_register_region)
+                       goto out;
+
+               r = static_call(kvm_x86_mem_enc_register_region)(kvm, &region);
                break;
        }
        case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
@@ -6485,8 +6489,10 @@ set_pit2_out:
                        goto out;
 
                r = -ENOTTY;
-               if (kvm_x86_ops.mem_enc_unreg_region)
-                       r = static_call(kvm_x86_mem_enc_unreg_region)(kvm, &region);
+               if (!kvm_x86_ops.mem_enc_unregister_region)
+                       goto out;
+
+               r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, &region);
                break;
        }
        case KVM_HYPERV_EVENTFD: {