reiserfs: Make cancel_old_flush() reliable
authorJan Kara <jack@suse.cz>
Wed, 5 Apr 2017 12:09:48 +0000 (14:09 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 22 Mar 2018 08:17:47 +0000 (09:17 +0100)
[ Upstream commit 71b0576bdb862e964a82c73327cdd1a249c53e67 ]

Currently canceling of delayed work that flushes old data using
cancel_old_flush() does not prevent work from being requeued. Thus
in theory new work can be queued after cancel_old_flush() from
reiserfs_freeze() has run. This will become larger problem once
flush_old_commits() can requeue the work itself.

Fix the problem by recording in sbi->work_queue that flushing work is
canceled and should not be requeued.

Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/reiserfs/journal.c
fs/reiserfs/reiserfs.h
fs/reiserfs/super.c

index bc2dde2..7610818 100644 (file)
@@ -1959,7 +1959,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
         * will be requeued because superblock is being shutdown and doesn't
         * have MS_ACTIVE set.
         */
-       cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
+       reiserfs_cancel_old_flush(sb);
        /* wait for all commits to finish */
        cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
 
index 5dcf3ab..6ca0047 100644 (file)
@@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s,
                                   struct reiserfs_list_bitmap *, unsigned int);
 
 void reiserfs_schedule_old_flush(struct super_block *s);
+void reiserfs_cancel_old_flush(struct super_block *s);
 void add_save_link(struct reiserfs_transaction_handle *th,
                   struct inode *inode, int truncate);
 int remove_save_link(struct inode *inode, int truncate);
index e101d70..dec6c93 100644 (file)
@@ -90,7 +90,9 @@ static void flush_old_commits(struct work_struct *work)
        s = sbi->s_journal->j_work_sb;
 
        spin_lock(&sbi->old_work_lock);
-       sbi->work_queued = 0;
+       /* Avoid clobbering the cancel state... */
+       if (sbi->work_queued == 1)
+               sbi->work_queued = 0;
        spin_unlock(&sbi->old_work_lock);
 
        reiserfs_sync_fs(s, 1);
@@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s)
        spin_unlock(&sbi->old_work_lock);
 }
 
-static void cancel_old_flush(struct super_block *s)
+void reiserfs_cancel_old_flush(struct super_block *s)
 {
        struct reiserfs_sb_info *sbi = REISERFS_SB(s);
 
-       cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
        spin_lock(&sbi->old_work_lock);
-       sbi->work_queued = 0;
+       /* Make sure no new flushes will be queued */
+       sbi->work_queued = 2;
        spin_unlock(&sbi->old_work_lock);
+       cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
 }
 
 static int reiserfs_freeze(struct super_block *s)
 {
        struct reiserfs_transaction_handle th;
 
-       cancel_old_flush(s);
+       reiserfs_cancel_old_flush(s);
 
        reiserfs_write_lock(s);
        if (!(s->s_flags & MS_RDONLY)) {
@@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s)
 
 static int reiserfs_unfreeze(struct super_block *s)
 {
+       struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+
        reiserfs_allow_writes(s);
+       spin_lock(&sbi->old_work_lock);
+       /* Allow old_work to run again */
+       sbi->work_queued = 0;
+       spin_unlock(&sbi->old_work_lock);
        return 0;
 }
 
@@ -2194,7 +2203,7 @@ error_unlocked:
        if (sbi->commit_wq)
                destroy_workqueue(sbi->commit_wq);
 
-       cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+       reiserfs_cancel_old_flush(s);
 
        reiserfs_free_bitmap_cache(s);
        if (SB_BUFFER_WITH_SB(s))