KVM: Rename kvm_arch_flush_remote_tlb() to kvm_arch_flush_remote_tlbs()
authorDavid Matlack <dmatlack@google.com>
Fri, 11 Aug 2023 04:51:14 +0000 (04:51 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 17 Aug 2023 08:35:14 +0000 (09:35 +0100)
Rename kvm_arch_flush_remote_tlb() and the associated macro
__KVM_HAVE_ARCH_FLUSH_REMOTE_TLB to kvm_arch_flush_remote_tlbs() and
__KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS respectively.

Making the name plural matches kvm_flush_remote_tlbs() and makes it more
clear that this function can affect more than one remote TLB.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Acked-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230811045127.3308641-2-rananta@google.com
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/mips.c
arch/x86/include/asm/kvm_host.h
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 04cedf9..9b0ad8f 100644 (file)
@@ -896,7 +896,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
 
-#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-int kvm_arch_flush_remote_tlb(struct kvm *kvm);
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
 
 #endif /* __MIPS_KVM_HOST_H__ */
index aa5583a..4b7bc39 100644 (file)
@@ -981,7 +981,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 
 }
 
-int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
 {
        kvm_mips_callbacks->prepare_flush_shadow(kvm);
        return 1;
index 28bd383..a2d3cfc 100644 (file)
@@ -1794,8 +1794,8 @@ static inline struct kvm *kvm_arch_alloc_vm(void)
 #define __KVM_HAVE_ARCH_VM_FREE
 void kvm_arch_free_vm(struct kvm *kvm);
 
-#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
 {
        if (kvm_x86_ops.flush_remote_tlbs &&
            !static_call(kvm_x86_flush_remote_tlbs)(kvm))
index 9d3ac77..e3f968b 100644 (file)
@@ -1479,8 +1479,8 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
 }
 #endif
 
-#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
 {
        return -ENOTSUPP;
 }
index dfbaafb..70e5479 100644 (file)
@@ -361,7 +361,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
         * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
         * barrier here.
         */
-       if (!kvm_arch_flush_remote_tlb(kvm)
+       if (!kvm_arch_flush_remote_tlbs(kvm)
            || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
                ++kvm->stat.generic.remote_tlb_flush;
 }