RDMA/odp: Fix missed unlock in non-blocking invalidate_start
authorJason Gunthorpe <jgg@mellanox.com>
Tue, 11 Jun 2019 16:09:51 +0000 (13:09 -0300)
committerDoug Ledford <dledford@redhat.com>
Wed, 19 Jun 2019 02:44:35 +0000 (22:44 -0400)
If invalidate_start returns with EAGAIN then the umem_rwsem needs to be
unlocked as no invalidate_end will be called.

Cc: <stable@vger.kernel.org>
Fixes: ca748c39ea3f ("RDMA/umem: Get rid of per_mm->notifier_count")
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/core/umem_odp.c

index 9001cc1..eb9939d 100644 (file)
@@ -149,6 +149,7 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
 {
        struct ib_ucontext_per_mm *per_mm =
                container_of(mn, struct ib_ucontext_per_mm, mn);
+       int rc;
 
        if (mmu_notifier_range_blockable(range))
                down_read(&per_mm->umem_rwsem);
@@ -165,11 +166,14 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
                return 0;
        }
 
-       return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
-                                            range->end,
-                                            invalidate_range_start_trampoline,
-                                            mmu_notifier_range_blockable(range),
-                                            NULL);
+       rc = rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
+                                          range->end,
+                                          invalidate_range_start_trampoline,
+                                          mmu_notifier_range_blockable(range),
+                                          NULL);
+       if (rc)
+               up_read(&per_mm->umem_rwsem);
+       return rc;
 }
 
 static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,