gfs2: Revert 'Fix "truncate in progress" hang'
authorAndreas Gruenbacher <agruenba@redhat.com>
Thu, 2 Jun 2022 20:15:02 +0000 (15:15 -0500)
committerAndreas Gruenbacher <agruenba@redhat.com>
Wed, 29 Jun 2022 14:54:59 +0000 (16:54 +0200)
Now that interrupted truncates are completed in the context of the
process taking the glock, there is no need for the glock state engine to
delegate that task to gfs2_quotad or for quotad to perform those
truncates anymore.  Get rid of the obsolete associated infrastructure.

Reverts commit 813e0c46c9e2 ("GFS2: Fix "truncate in progress" hang").

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/main.c
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c

index 41bee3db8c0d0e7b31565aab61c8c4a6495420bb..347c7bc1fae355ae8af55e82b6e6248b6018740a 100644 (file)
@@ -518,8 +518,7 @@ again:
  * do_promote - promote as many requests as possible on the current queue
  * @gl: The glock
  * 
- * Returns: 1 if there is a blocked holder at the head of the list, or 2
- *          if a type specific operation is underway.
+ * Returns: 1 if there is a blocked holder at the head of the list
  */
 
 static int do_promote(struct gfs2_glock *gl)
@@ -627,7 +626,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
        const struct gfs2_glock_operations *glops = gl->gl_ops;
        struct gfs2_holder *gh;
        unsigned state = ret & LM_OUT_ST_MASK;
-       int rv;
 
        spin_lock(&gl->gl_lockref.lock);
        trace_gfs2_glock_state_change(gl, state);
@@ -685,6 +683,8 @@ retry:
                gfs2_demote_wake(gl);
        if (state != LM_ST_UNLOCKED) {
                if (glops->go_xmote_bh) {
+                       int rv;
+
                        spin_unlock(&gl->gl_lockref.lock);
                        rv = glops->go_xmote_bh(gl);
                        spin_lock(&gl->gl_lockref.lock);
@@ -693,13 +693,10 @@ retry:
                                goto out;
                        }
                }
-               rv = do_promote(gl);
-               if (rv == 2)
-                       goto out_locked;
+               do_promote(gl);
        }
 out:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-out_locked:
        spin_unlock(&gl->gl_lockref.lock);
 }
 
@@ -856,7 +853,6 @@ __releases(&gl->gl_lockref.lock)
 __acquires(&gl->gl_lockref.lock)
 {
        struct gfs2_holder *gh = NULL;
-       int ret;
 
        if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
                return;
@@ -875,18 +871,14 @@ __acquires(&gl->gl_lockref.lock)
        } else {
                if (test_bit(GLF_DEMOTE, &gl->gl_flags))
                        gfs2_demote_wake(gl);
-               ret = do_promote(gl);
-               if (ret == 0)
+               if (do_promote(gl) == 0)
                        goto out_unlock;
-               if (ret == 2)
-                       goto out;
                gh = find_first_waiter(gl);
                gl->gl_target = gh->gh_state;
                if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
                        do_error(gl, 0); /* Fail queued try locks */
        }
        do_xmote(gl, gh, gl->gl_target);
-out:
        return;
 
 out_sched:
@@ -2213,29 +2205,6 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
        glock_hash_walk(dump_glock_func, sdp);
 }
 
-void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
-{
-       struct gfs2_glock *gl = ip->i_gl;
-       int ret;
-
-       ret = gfs2_truncatei_resume(ip);
-       gfs2_glock_assert_withdraw(gl, ret == 0);
-
-       spin_lock(&gl->gl_lockref.lock);
-       clear_bit(GLF_LOCK, &gl->gl_flags);
-       run_queue(gl, 1);
-       wake_up_glock(gl);
-       spin_unlock(&gl->gl_lockref.lock);
-}
-
-void gfs2_wait_truncate(struct gfs2_inode *ip)
-{
-       struct gfs2_glock *gl = ip->i_gl;
-       wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
-
-       wait_event(*wq, !(ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG));
-}
-
 static const char *state2str(unsigned state)
 {
        switch(state) {
index 2796d5414ec9d61c5289b395a490f60492cceab7..5aed8b500cf5ab88015b394d26a147afb1e17dc9 100644 (file)
@@ -274,8 +274,6 @@ extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
 extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
 extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
 extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
-extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
-extern void gfs2_wait_truncate(struct gfs2_inode *ip);
 extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
 extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
 extern void gfs2_glock_free(struct gfs2_glock *gl);
index 6bc09661065418d8594c9e8cd81c862332f6bc48..c387f80ca65e8f48da1b8e86f1e8947561f9f8f3 100644 (file)
@@ -488,7 +488,6 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
 static int inode_go_instantiate(struct gfs2_holder *gh)
 {
        struct gfs2_glock *gl = gh->gh_gl;
-       struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
        struct gfs2_inode *ip = gl->gl_object;
        int error = 0;
 
@@ -504,14 +503,8 @@ static int inode_go_instantiate(struct gfs2_holder *gh)
 
        if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
            (gl->gl_state == LM_ST_EXCLUSIVE) &&
-           (gh->gh_state == LM_ST_EXCLUSIVE)) {
-               spin_lock(&sdp->sd_trunc_lock);
-               if (list_empty(&ip->i_trunc_list))
-                       list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
-               spin_unlock(&sdp->sd_trunc_lock);
-               wake_up(&sdp->sd_quota_wait);
-               gfs2_wait_truncate(ip);
-       }
+           (gh->gh_state == LM_ST_EXCLUSIVE))
+               error = gfs2_truncatei_resume(ip);
 
 out:
        return error;
index 8c00fb389ae5e1daf8cce844f5c1bfe3f8bb60ce..9e319c8f9efda86faaf39bd16cb6e24a55b67049 100644 (file)
@@ -396,7 +396,6 @@ struct gfs2_inode {
        atomic_t i_sizehint;  /* hint of the write size */
        struct rw_semaphore i_rw_mutex;
        struct list_head i_ordered;
-       struct list_head i_trunc_list;
        __be64 *i_hash_cache;
        u32 i_entries;
        u32 i_diskflags;
@@ -784,8 +783,6 @@ struct gfs2_sbd {
        struct mutex sd_quota_mutex;
        struct mutex sd_quota_sync_mutex;
        wait_queue_head_t sd_quota_wait;
-       struct list_head sd_trunc_list;
-       spinlock_t sd_trunc_lock;
 
        unsigned int sd_quota_slots;
        unsigned long *sd_quota_bitmap;
index 244187e3e70f7eeb1a91588b1b3f1f86a07b8b66..d94791527dcb42fff29dbd12ce34bf75960300a5 100644 (file)
@@ -38,7 +38,6 @@ static void gfs2_init_inode_once(void *foo)
        inode_init_once(&ip->i_inode);
        atomic_set(&ip->i_sizehint, 0);
        init_rwsem(&ip->i_rw_mutex);
-       INIT_LIST_HEAD(&ip->i_trunc_list);
        INIT_LIST_HEAD(&ip->i_ordered);
        ip->i_qadata = NULL;
        gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
index c9b423c874a32e64cd0b7439a696e958fa66e709..549879929c847c143905d03bfac6bbc1b9564f4b 100644 (file)
@@ -106,8 +106,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
        mutex_init(&sdp->sd_quota_mutex);
        mutex_init(&sdp->sd_quota_sync_mutex);
        init_waitqueue_head(&sdp->sd_quota_wait);
-       INIT_LIST_HEAD(&sdp->sd_trunc_list);
-       spin_lock_init(&sdp->sd_trunc_lock);
        spin_lock_init(&sdp->sd_bitmap_lock);
 
        INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
index 59d727a4ae2cbdc78d8aa42db552e4021f92672e..a6667e8d781f24aa8ac1632b7f5077d47b4343f4 100644 (file)
@@ -1517,25 +1517,6 @@ static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
        }
 }
 
-static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
-{
-       struct gfs2_inode *ip;
-
-       while(1) {
-               ip = NULL;
-               spin_lock(&sdp->sd_trunc_lock);
-               if (!list_empty(&sdp->sd_trunc_list)) {
-                       ip = list_first_entry(&sdp->sd_trunc_list,
-                                       struct gfs2_inode, i_trunc_list);
-                       list_del_init(&ip->i_trunc_list);
-               }
-               spin_unlock(&sdp->sd_trunc_lock);
-               if (ip == NULL)
-                       return;
-               gfs2_glock_finish_truncate(ip);
-       }
-}
-
 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
        if (!sdp->sd_statfs_force_sync) {
                sdp->sd_statfs_force_sync = 1;
@@ -1558,7 +1539,6 @@ int gfs2_quotad(void *data)
        unsigned long quotad_timeo = 0;
        unsigned long t = 0;
        DEFINE_WAIT(wait);
-       int empty;
 
        while (!kthread_should_stop()) {
 
@@ -1579,19 +1559,13 @@ int gfs2_quotad(void *data)
                quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
                                   &quotad_timeo, &tune->gt_quota_quantum);
 
-               /* Check for & recover partially truncated inodes */
-               quotad_check_trunc_list(sdp);
-
                try_to_freeze();
 
 bypass:
                t = min(quotad_timeo, statfs_timeo);
 
                prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
-               spin_lock(&sdp->sd_trunc_lock);
-               empty = list_empty(&sdp->sd_trunc_list);
-               spin_unlock(&sdp->sd_trunc_lock);
-               if (empty && !sdp->sd_statfs_force_sync)
+               if (!sdp->sd_statfs_force_sync)
                        t -= schedule_timeout(t);
                else
                        t = 0;