free_extent_buffer(tmp);
goto again;
} else {
+ if (tmp)
+ free_extent_buffer(tmp);
b = read_node_slot(root, b, slot);
}
}
free_extent_buffer(c);
path->nodes[level] = next;
path->slots[level] = 0;
- path->locks[level] = 1;
+ if (!path->skip_locking)
+ path->locks[level] = 1;
if (!level)
break;
if (level == 1 && path->locks[1] && path->reada)
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/fs.h>
-#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/backing-dev.h>
#include <asm/kmap_types.h>
struct backing_dev_info bdi;
spinlock_t hash_lock;
struct mutex trans_mutex;
+ struct mutex transaction_kthread_mutex;
+ struct mutex cleaner_mutex;
struct mutex alloc_mutex;
struct mutex chunk_mutex;
struct mutex drop_mutex;
struct list_head trans_list;
struct list_head hashers;
struct list_head dead_roots;
- struct list_head end_io_work_list;
- struct work_struct end_io_work;
- spinlock_t end_io_work_lock;
atomic_t nr_async_submits;
/*
struct btrfs_workers workers;
struct btrfs_workers endio_workers;
struct btrfs_workers submit_workers;
+ struct task_struct *transaction_kthread;
+ struct task_struct *cleaner_kthread;
int thread_pool_size;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
- struct work_struct trans_work;
-#else
- struct delayed_work trans_work;
-#endif
struct kobject super_kobj;
struct completion kobj_unregister;
int do_barriers;
* Boston, MA 021110-1307, USA.
*/
+#include <linux/version.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> // for block_sync_page
#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+# include <linux/freezer.h>
+#else
+# include <linux/sched.h>
+#endif
#include "crc32c.h"
#include "ctree.h"
#include "disk-io.h"
#endif
}
+static int cleaner_kthread(void *arg)
+{
+ struct btrfs_root *root = arg;
+
+ do {
+ smp_mb();
+ if (root->fs_info->closing)
+ break;
+
+ vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
+ mutex_lock(&root->fs_info->cleaner_mutex);
+printk("cleaner awake\n");
+ btrfs_clean_old_snapshots(root);
+printk("cleaner done\n");
+ mutex_unlock(&root->fs_info->cleaner_mutex);
+
+ if (freezing(current)) {
+ refrigerator();
+ } else {
+ smp_mb();
+ if (root->fs_info->closing)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static int transaction_kthread(void *arg)
+{
+ struct btrfs_root *root = arg;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_transaction *cur;
+ unsigned long now;
+ unsigned long delay;
+ int ret;
+
+ do {
+ smp_mb();
+ if (root->fs_info->closing)
+ break;
+
+ delay = HZ * 30;
+ vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
+ mutex_lock(&root->fs_info->transaction_kthread_mutex);
+
+ mutex_lock(&root->fs_info->trans_mutex);
+ cur = root->fs_info->running_transaction;
+ if (!cur) {
+ mutex_unlock(&root->fs_info->trans_mutex);
+ goto sleep;
+ }
+ now = get_seconds();
+ if (now < cur->start_time || now - cur->start_time < 30) {
+ mutex_unlock(&root->fs_info->trans_mutex);
+ delay = HZ * 5;
+ goto sleep;
+ }
+ mutex_unlock(&root->fs_info->trans_mutex);
+ btrfs_defrag_dirty_roots(root->fs_info);
+ trans = btrfs_start_transaction(root, 1);
+ ret = btrfs_commit_transaction(trans, root);
+sleep:
+ wake_up_process(root->fs_info->cleaner_kthread);
+ mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+
+ if (freezing(current)) {
+ refrigerator();
+ } else {
+ if (root->fs_info->closing)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay);
+ __set_current_state(TASK_RUNNING);
+ }
+ } while (!kthread_should_stop());
+ return 0;
+}
+
struct btrfs_root *open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options)
fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->do_barriers = 1;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
- INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
-#else
- INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
-#endif
BTRFS_I(fs_info->btree_inode)->root = tree_root;
memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
sizeof(struct btrfs_key));
mutex_init(&fs_info->drop_mutex);
mutex_init(&fs_info->alloc_mutex);
mutex_init(&fs_info->chunk_mutex);
+ mutex_init(&fs_info->transaction_kthread_mutex);
+ mutex_init(&fs_info->cleaner_mutex);
#if 0
ret = add_hasher(fs_info, "crc32c");
btrfs_start_workers(&fs_info->submit_workers, 1);
btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
-
err = -EINVAL;
if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
printk("Btrfs: wanted %llu devices, but found %llu\n",
fs_info->data_alloc_profile = (u64)-1;
fs_info->metadata_alloc_profile = (u64)-1;
fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
+ fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
+ "btrfs-cleaner");
+ if (!fs_info->cleaner_kthread)
+ goto fail_extent_root;
+
+ fs_info->transaction_kthread = kthread_run(transaction_kthread,
+ tree_root,
+ "btrfs-transaction");
+ if (!fs_info->transaction_kthread)
+ goto fail_trans_kthread;
+
return tree_root;
+fail_trans_kthread:
+ kthread_stop(fs_info->cleaner_kthread);
fail_extent_root:
free_extent_buffer(extent_root->node);
fail_tree_root:
fs_info->closing = 1;
smp_mb();
- btrfs_transaction_flush_work(root);
+ kthread_stop(root->fs_info->transaction_kthread);
+ kthread_stop(root->fs_info->cleaner_kthread);
+
btrfs_defrag_dirty_roots(root->fs_info);
+ btrfs_clean_old_snapshots(root);
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
/* run commit again to drop the original snapshot */
write_ctree_super(NULL, root);
- btrfs_transaction_flush_work(root);
-
if (fs_info->delalloc_bytes) {
printk("btrfs: at unmount delalloc count %Lu\n",
fs_info->delalloc_bytes);
if (ret == -ENOSPC) {
printk("space info full %Lu\n", flags);
space_info->full = 1;
- goto out;
+ goto out_unlock;
}
BUG_ON(ret);
ret = btrfs_make_block_group(trans, extent_root, 0, flags,
BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
BUG_ON(ret);
-out:
+out_unlock:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
+out:
return 0;
}
free_extent_buffer(next);
mutex_unlock(&root->fs_info->alloc_mutex);
- reada_walk_down(root, cur, path->slots[*level]);
+ if (path->slots[*level] == 0)
+ reada_walk_down(root, cur, path->slots[*level]);
next = read_tree_block(root, bytenr, blocksize,
ptr_gen);
break;
if (wret < 0)
ret = wret;
- ret = -EAGAIN;
- break;
}
for (i = 0; i <= orig_level; i++) {
if (path->nodes[i]) {
goto fail_close;
sb->s_root = root_dentry;
- btrfs_transaction_queue_work(tree_root, HZ * 30);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
save_mount_options(sb, data);
goto error_free_subvol_name;
bdev = fs_devices->latest_bdev;
- btrfs_lock_volumes();
s = sget(fs_type, btrfs_test_super, set_anon_super, fs_devices);
- btrfs_unlock_volumes();
if (IS_ERR(s))
goto error_s;
static void btrfs_write_super_lockfs(struct super_block *sb)
{
struct btrfs_root *root = btrfs_sb(sb);
- btrfs_transaction_flush_work(root);
+ mutex_lock(&root->fs_info->transaction_kthread_mutex);
+ mutex_lock(&root->fs_info->cleaner_mutex);
}
static void btrfs_unlockfs(struct super_block *sb)
{
struct btrfs_root *root = btrfs_sb(sb);
- btrfs_transaction_queue_work(root, HZ * 30);
+ mutex_unlock(&root->fs_info->cleaner_mutex);
+ mutex_unlock(&root->fs_info->transaction_kthread_mutex);
}
static struct super_operations btrfs_super_ops = {
if (err)
return err;
- btrfs_init_transaction_sys();
err = btrfs_init_cachep();
if (err)
- goto free_transaction_sys;
+ goto free_sysfs;
err = extent_io_init();
if (err)
extent_io_exit();
free_cachep:
btrfs_destroy_cachep();
-free_transaction_sys:
- btrfs_exit_transaction_sys();
+free_sysfs:
btrfs_exit_sysfs();
return err;
}
static void __exit exit_btrfs_fs(void)
{
- btrfs_exit_transaction_sys();
btrfs_destroy_cachep();
extent_map_exit();
extent_io_exit();
extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_transaction_cachep;
-static struct workqueue_struct *trans_wq;
-
#define BTRFS_ROOT_TRANS_TAG 0
#define BTRFS_ROOT_DEFRAG_TAG 1
{
struct list_head dirty_roots;
INIT_LIST_HEAD(&dirty_roots);
-
+again:
mutex_lock(&root->fs_info->trans_mutex);
list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
mutex_unlock(&root->fs_info->trans_mutex);
if (!list_empty(&dirty_roots)) {
drop_dirty_roots(root, &dirty_roots);
+ goto again;
}
return 0;
}
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-void btrfs_transaction_cleaner(void *p)
-#else
-void btrfs_transaction_cleaner(struct work_struct *work)
-#endif
-{
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
- struct btrfs_fs_info *fs_info = p;
-#else
- struct btrfs_fs_info *fs_info = container_of(work,
- struct btrfs_fs_info,
- trans_work.work);
-
-#endif
- struct btrfs_root *root = fs_info->tree_root;
- struct btrfs_transaction *cur;
- struct btrfs_trans_handle *trans;
- unsigned long now;
- unsigned long delay = HZ * 30;
- int ret;
-
- smp_mb();
- if (root->fs_info->closing)
- goto out;
-
- mutex_lock(&root->fs_info->trans_mutex);
- cur = root->fs_info->running_transaction;
- if (!cur) {
- mutex_unlock(&root->fs_info->trans_mutex);
- goto out;
- }
- now = get_seconds();
- if (now < cur->start_time || now - cur->start_time < 30) {
- mutex_unlock(&root->fs_info->trans_mutex);
- delay = HZ * 5;
- goto out;
- }
- mutex_unlock(&root->fs_info->trans_mutex);
- btrfs_defrag_dirty_roots(root->fs_info);
- trans = btrfs_start_transaction(root, 1);
- ret = btrfs_commit_transaction(trans, root);
-out:
- btrfs_clean_old_snapshots(root);
- btrfs_transaction_queue_work(root, delay);
-}
-
-void btrfs_transaction_queue_work(struct btrfs_root *root, int delay)
-{
- if (!root->fs_info->closing)
- queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay);
-}
-
-void btrfs_transaction_flush_work(struct btrfs_root *root)
-{
- cancel_delayed_work(&root->fs_info->trans_work);
- flush_workqueue(trans_wq);
-}
-
-void __init btrfs_init_transaction_sys(void)
-{
- trans_wq = create_workqueue("btrfs-transaction");
-}
-
-void btrfs_exit_transaction_sys(void)
-{
- destroy_workqueue(trans_wq);
-}
int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-void btrfs_transaction_cleaner(void *p);
-#else
-void btrfs_transaction_cleaner(struct work_struct *work);
-#endif
-
-void btrfs_transaction_flush_work(struct btrfs_root *root);
-void btrfs_transaction_queue_work(struct btrfs_root *root, int delay);
-void btrfs_init_transaction_sys(void);
-void btrfs_exit_transaction_sys(void);
int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest,
struct list_head *dead_list);
int btrfs_defrag_dirty_roots(struct btrfs_fs_info *info);
list_for_each(cur, head) {
device = list_entry(cur, struct btrfs_device, dev_list);
if (!device->in_fs_metadata) {
- if (device->bdev) {
- close_bdev_excl(device->bdev);
- fs_devices->open_devices--;
- }
+ struct block_device *bdev;
list_del(&device->dev_list);
list_del(&device->dev_alloc_list);
fs_devices->num_devices--;
+ if (device->bdev) {
+ bdev = device->bdev;
+ fs_devices->open_devices--;
+ mutex_unlock(&uuid_mutex);
+ close_bdev_excl(bdev);
+ mutex_lock(&uuid_mutex);
+ }
kfree(device->name);
kfree(device);
goto again;