return 0;
}
-static inline int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
- unsigned long nr_pages,
- enum wb_reason reason)
-{
- /* the flusher is dealing with the dirty inodes now. */
- if (writeback_in_progress(sb->s_bdi))
- return 1;
-
- if (down_read_trylock(&sb->s_umount)) {
- writeback_inodes_sb_nr(sb, nr_pages, reason);
- up_read(&sb->s_umount);
- return 1;
- }
-
- return 0;
-}
-
+ void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+ unsigned long nr_pages)
+ {
+ struct super_block *sb = root->fs_info->sb;
+ int started;
+
+ /* If we can not start writeback, just sync all the delalloc file. */
- started = writeback_inodes_sb_nr_if_idle_safe(sb, nr_pages,
++ started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
+ WB_REASON_FS_FREE_SPACE);
+ if (!started) {
+ /*
+ * We needn't worry the filesystem going from r/w to r/o though
+ * we don't acquire ->s_umount mutex, because the filesystem
+ * should guarantee the delalloc inodes list be empty after
+ * the filesystem is readonly(all dirty pages are written to
+ * the disk).
+ */
+ btrfs_start_delalloc_inodes(root, 0);
+ btrfs_wait_ordered_extents(root, 0);
+ }
+ }
+
/*
* shrink metadata reservation for delalloc
*/
* to redo the trans_no_join checks above
*/
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
- cur_trans = fs_info->running_transaction;
goto loop;
- } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
spin_unlock(&fs_info->trans_lock);
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
return -EROFS;