ceph: wait for async create reply before sending any cap messages
authorJeff Layton <jlayton@kernel.org>
Sat, 5 Feb 2022 13:39:33 +0000 (08:39 -0500)
committerIlya Dryomov <idryomov@gmail.com>
Tue, 1 Mar 2022 17:26:36 +0000 (18:26 +0100)
If we haven't received a reply to an async create request, then we don't
want to send any cap messages to the MDS for that inode yet.

Just have ceph_check_caps  and __kick_flushing_caps return without doing
anything, and have ceph_write_inode wait for the reply if we were asked
to wait on the inode writeback.

URL: https://tracker.ceph.com/issues/54107
Signed-off-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
fs/ceph/caps.c

index b472cd0..991c6c8 100644 (file)
@@ -1915,6 +1915,13 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                ceph_get_mds_session(session);
 
        spin_lock(&ci->i_ceph_lock);
+       if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
+               /* Don't send messages until we get async create reply */
+               spin_unlock(&ci->i_ceph_lock);
+               ceph_put_mds_session(session);
+               return;
+       }
+
        if (ci->i_ceph_flags & CEPH_I_FLUSH)
                flags |= CHECK_CAPS_FLUSH;
 retry:
@@ -2409,6 +2416,9 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
        dout("write_inode %p wait=%d\n", inode, wait);
        ceph_fscache_unpin_writeback(inode, wbc);
        if (wait) {
+               err = ceph_wait_on_async_create(inode);
+               if (err)
+                       return err;
                dirty = try_flush_caps(inode, &flush_tid);
                if (dirty)
                        err = wait_event_interruptible(ci->i_cap_wq,
@@ -2439,6 +2449,10 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
        u64 first_tid = 0;
        u64 last_snap_flush = 0;
 
+       /* Don't do anything until create reply comes in */
+       if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE)
+               return;
+
        ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
 
        list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {