rbd: take header_rwsem in rbd_dev_refresh() only when updating
authorIlya Dryomov <idryomov@gmail.com>
Wed, 20 Sep 2023 17:01:03 +0000 (19:01 +0200)
committerIlya Dryomov <idryomov@gmail.com>
Tue, 26 Sep 2023 08:33:19 +0000 (10:33 +0200)
rbd_dev_refresh() has been holding header_rwsem across header and
parent info read-in unnecessarily for ages.  With commit 870611e4877e
("rbd: get snapshot context after exclusive lock is ensured to be
held"), the potential for deadlocks became much more real owning to
a) header_rwsem now nesting inside lock_rwsem and b) rw_semaphores
not allowing new readers after a writer is registered.

For example, assuming that I/O request 1, I/O request 2 and header
read-in request all target the same OSD:

1. I/O request 1 comes in and gets submitted
2. watch error occurs
3. rbd_watch_errcb() takes lock_rwsem for write, clears owner_cid and
   releases lock_rwsem
4. after reestablishing the watch, rbd_reregister_watch() calls
   rbd_dev_refresh() which takes header_rwsem for write and submits
   a header read-in request
5. I/O request 2 comes in: after taking lock_rwsem for read in
   __rbd_img_handle_request(), it blocks trying to take header_rwsem
   for read in rbd_img_object_requests()
6. another watch error occurs
7. rbd_watch_errcb() blocks trying to take lock_rwsem for write
8. I/O request 1 completion is received by the messenger but can't be
   processed because lock_rwsem won't be granted anymore
9. header read-in request completion can't be received, let alone
   processed, because the messenger is stranded

Change rbd_dev_refresh() to take header_rwsem only for actually
updating rbd_dev->header.  Header and parent info read-in don't need
any locking.

Cc: stable@vger.kernel.org # 0b035401c570: rbd: move rbd_dev_refresh() definition
Cc: stable@vger.kernel.org # 510a7330c82a: rbd: decouple header read-in from updating rbd_dev->header
Cc: stable@vger.kernel.org # c10311776f0a: rbd: decouple parent info read-in from updating rbd_dev
Cc: stable@vger.kernel.org
Fixes: 870611e4877e ("rbd: get snapshot context after exclusive lock is ensured to be held")
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
drivers/block/rbd.c

index d62a029..a999b69 100644 (file)
@@ -6986,7 +6986,14 @@ static void rbd_dev_update_header(struct rbd_device *rbd_dev,
        rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
        rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
 
-       rbd_dev->header.image_size = header->image_size;
+       if (rbd_dev->header.image_size != header->image_size) {
+               rbd_dev->header.image_size = header->image_size;
+
+               if (!rbd_is_snap(rbd_dev)) {
+                       rbd_dev->mapping.size = header->image_size;
+                       rbd_dev_update_size(rbd_dev);
+               }
+       }
 
        ceph_put_snap_context(rbd_dev->header.snapc);
        rbd_dev->header.snapc = header->snapc;
@@ -7044,11 +7051,9 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
 {
        struct rbd_image_header header = { 0 };
        struct parent_image_info pii = { 0 };
-       u64 mapping_size;
        int ret;
 
-       down_write(&rbd_dev->header_rwsem);
-       mapping_size = rbd_dev->mapping.size;
+       dout("%s rbd_dev %p\n", __func__, rbd_dev);
 
        ret = rbd_dev_header_info(rbd_dev, &header, false);
        if (ret)
@@ -7064,18 +7069,13 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
                        goto out;
        }
 
+       down_write(&rbd_dev->header_rwsem);
        rbd_dev_update_header(rbd_dev, &header);
        if (rbd_dev->parent)
                rbd_dev_update_parent(rbd_dev, &pii);
-
-       rbd_assert(!rbd_is_snap(rbd_dev));
-       rbd_dev->mapping.size = rbd_dev->header.image_size;
-
-out:
        up_write(&rbd_dev->header_rwsem);
-       if (!ret && mapping_size != rbd_dev->mapping.size)
-               rbd_dev_update_size(rbd_dev);
 
+out:
        rbd_parent_info_cleanup(&pii);
        rbd_image_header_cleanup(&header);
        return ret;