kvm/vfio: avoid bouncing the mutex when adding and deleting groups
authorDmitry Torokhov <dmitry.torokhov@gmail.com>
Fri, 14 Jul 2023 22:45:33 +0000 (15:45 -0700)
committerAlex Williamson <alex.williamson@redhat.com>
Thu, 3 Aug 2023 18:01:56 +0000 (12:01 -0600)
Stop taking kv->lock mutex in kvm_vfio_update_coherency() and instead
call it with this mutex held: the callers of the function usually
already have it taken (and released) before calling
kvm_vfio_update_coherency(). This avoid bouncing the lock up and down.

The exception is kvm_vfio_release() where we do not take the lock, but
it is being executed when the very last reference to kvm_device is being
dropped, so there are no concerns about concurrency.

Suggested-by: Alex Williamson <alex.williamson@redhat.com>
Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20230714224538.404793-2-dmitry.torokhov@gmail.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
virt/kvm/vfio.c

index 59945f6..ca24ce1 100644 (file)
@@ -123,8 +123,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
        bool noncoherent = false;
        struct kvm_vfio_file *kvf;
 
-       mutex_lock(&kv->lock);
-
        list_for_each_entry(kvf, &kv->file_list, node) {
                if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
                        noncoherent = true;
@@ -140,8 +138,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
                else
                        kvm_arch_unregister_noncoherent_dma(dev->kvm);
        }
-
-       mutex_unlock(&kv->lock);
 }
 
 static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
@@ -149,7 +145,7 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
        struct kvm_vfio *kv = dev->private;
        struct kvm_vfio_file *kvf;
        struct file *filp;
-       int ret;
+       int ret = 0;
 
        filp = fget(fd);
        if (!filp)
@@ -158,7 +154,7 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
        /* Ensure the FD is a vfio FD. */
        if (!kvm_vfio_file_is_valid(filp)) {
                ret = -EINVAL;
-               goto err_fput;
+               goto out_fput;
        }
 
        mutex_lock(&kv->lock);
@@ -166,30 +162,26 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
        list_for_each_entry(kvf, &kv->file_list, node) {
                if (kvf->file == filp) {
                        ret = -EEXIST;
-                       goto err_unlock;
+                       goto out_unlock;
                }
        }
 
        kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
        if (!kvf) {
                ret = -ENOMEM;
-               goto err_unlock;
+               goto out_unlock;
        }
 
-       kvf->file = filp;
+       kvf->file = get_file(filp);
        list_add_tail(&kvf->node, &kv->file_list);
 
        kvm_arch_start_assignment(dev->kvm);
        kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
-
-       mutex_unlock(&kv->lock);
-
        kvm_vfio_update_coherency(dev);
 
-       return 0;
-err_unlock:
+out_unlock:
        mutex_unlock(&kv->lock);
-err_fput:
+out_fput:
        fput(filp);
        return ret;
 }
@@ -225,12 +217,12 @@ static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
                break;
        }
 
+       kvm_vfio_update_coherency(dev);
+
        mutex_unlock(&kv->lock);
 
        fdput(f);
 
-       kvm_vfio_update_coherency(dev);
-
        return ret;
 }