ceph: flush the mdlog for filesystem sync
authorXiubo Li <xiubli@redhat.com>
Tue, 19 Apr 2022 00:58:49 +0000 (08:58 +0800)
committerIlya Dryomov <idryomov@gmail.com>
Wed, 25 May 2022 18:45:14 +0000 (20:45 +0200)
Before waiting for a request's safe reply, we will send the mdlog flush
request to the relevant MDS. And this will also flush the mdlog for all
the other unsafe requests in the same session, so we can record the last
session and no need to flush mdlog again in the next loop. But there
still have cases that it may send the mdlog flush requst twice or more,
but that should be not often.

Rename wait_unsafe_requests() to
flush_mdlog_and_wait_mdsc_unsafe_requests() to make it more
descriptive.

[xiubli: fold in MDS request refcount leak fix from Jeff]

URL: https://tracker.ceph.com/issues/55284
URL: https://tracker.ceph.com/issues/55411
Signed-off-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
fs/ceph/mds_client.c

index b513938..23a6c11 100644 (file)
@@ -4740,15 +4740,17 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
 }
 
 /*
- * wait for all write mds requests to flush.
+ * flush the mdlog and wait for all write mds requests to flush.
  */
-static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
+static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
+                                                u64 want_tid)
 {
        struct ceph_mds_request *req = NULL, *nextreq;
+       struct ceph_mds_session *last_session = NULL;
        struct rb_node *n;
 
        mutex_lock(&mdsc->mutex);
-       dout("wait_unsafe_requests want %lld\n", want_tid);
+       dout("%s want %lld\n", __func__, want_tid);
 restart:
        req = __get_oldest_req(mdsc);
        while (req && req->r_tid <= want_tid) {
@@ -4760,14 +4762,32 @@ restart:
                        nextreq = NULL;
                if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
                    (req->r_op & CEPH_MDS_OP_WRITE)) {
+                       struct ceph_mds_session *s = req->r_session;
+
+                       if (!s) {
+                               req = nextreq;
+                               continue;
+                       }
+
                        /* write op */
                        ceph_mdsc_get_request(req);
                        if (nextreq)
                                ceph_mdsc_get_request(nextreq);
+                       s = ceph_get_mds_session(s);
                        mutex_unlock(&mdsc->mutex);
-                       dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
+
+                       /* send flush mdlog request to MDS */
+                       if (last_session != s) {
+                               send_flush_mdlog(s);
+                               ceph_put_mds_session(last_session);
+                               last_session = s;
+                       } else {
+                               ceph_put_mds_session(s);
+                       }
+                       dout("%s wait on %llu (want %llu)\n", __func__,
                             req->r_tid, want_tid);
                        wait_for_completion(&req->r_safe_completion);
+
                        mutex_lock(&mdsc->mutex);
                        ceph_mdsc_put_request(req);
                        if (!nextreq)
@@ -4782,7 +4802,8 @@ restart:
                req = nextreq;
        }
        mutex_unlock(&mdsc->mutex);
-       dout("wait_unsafe_requests done\n");
+       ceph_put_mds_session(last_session);
+       dout("%s done\n", __func__);
 }
 
 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
@@ -4811,7 +4832,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
        dout("sync want tid %lld flush_seq %lld\n",
             want_tid, want_flush);
 
-       wait_unsafe_requests(mdsc, want_tid);
+       flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
        wait_caps_flush(mdsc, want_flush);
 }