1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
9 #include "print-tree.h"
10 #include "transaction.h"
13 static struct kmem_cache *btrfs_inode_defrag_cachep;
16 * When auto defrag is enabled we queue up these defrag structs to remember
17 * which inodes need defragging passes.
20 struct rb_node rb_node;
24 * Transid where the defrag was added, we search for extents newer than
33 * The extent size threshold for autodefrag.
35 * This value is different for compressed/non-compressed extents, thus
36 * needs to be passed from higher layer.
37 * (aka, inode_should_defrag())
42 static int __compare_inode_defrag(struct inode_defrag *defrag1,
43 struct inode_defrag *defrag2)
45 if (defrag1->root > defrag2->root)
47 else if (defrag1->root < defrag2->root)
49 else if (defrag1->ino > defrag2->ino)
51 else if (defrag1->ino < defrag2->ino)
58 * Pop a record for an inode into the defrag tree. The lock must be held
61 * If you're inserting a record for an older transid than an existing record,
62 * the transid already in the tree is lowered.
64 * If an existing record is found the defrag item you pass in is freed.
66 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
67 struct inode_defrag *defrag)
69 struct btrfs_fs_info *fs_info = inode->root->fs_info;
70 struct inode_defrag *entry;
72 struct rb_node *parent = NULL;
75 p = &fs_info->defrag_inodes.rb_node;
78 entry = rb_entry(parent, struct inode_defrag, rb_node);
80 ret = __compare_inode_defrag(defrag, entry);
84 p = &parent->rb_right;
87 * If we're reinserting an entry for an old defrag run,
88 * make sure to lower the transid of our existing
91 if (defrag->transid < entry->transid)
92 entry->transid = defrag->transid;
93 entry->extent_thresh = min(defrag->extent_thresh,
94 entry->extent_thresh);
98 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
99 rb_link_node(&defrag->rb_node, parent, p);
100 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
104 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
106 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
109 if (btrfs_fs_closing(fs_info))
116 * Insert a defrag record for this inode if auto defrag is enabled.
118 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
119 struct btrfs_inode *inode, u32 extent_thresh)
121 struct btrfs_root *root = inode->root;
122 struct btrfs_fs_info *fs_info = root->fs_info;
123 struct inode_defrag *defrag;
127 if (!__need_auto_defrag(fs_info))
130 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
134 transid = trans->transid;
136 transid = inode->root->last_trans;
138 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
142 defrag->ino = btrfs_ino(inode);
143 defrag->transid = transid;
144 defrag->root = root->root_key.objectid;
145 defrag->extent_thresh = extent_thresh;
147 spin_lock(&fs_info->defrag_inodes_lock);
148 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
150 * If we set IN_DEFRAG flag and evict the inode from memory,
151 * and then re-read this inode, this new inode doesn't have
152 * IN_DEFRAG flag. At the case, we may find the existed defrag.
154 ret = __btrfs_add_inode_defrag(inode, defrag);
156 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
158 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
160 spin_unlock(&fs_info->defrag_inodes_lock);
165 * Pick the defragable inode that we want, if it doesn't exist, we will get the
168 static struct inode_defrag *btrfs_pick_defrag_inode(
169 struct btrfs_fs_info *fs_info, u64 root, u64 ino)
171 struct inode_defrag *entry = NULL;
172 struct inode_defrag tmp;
174 struct rb_node *parent = NULL;
180 spin_lock(&fs_info->defrag_inodes_lock);
181 p = fs_info->defrag_inodes.rb_node;
184 entry = rb_entry(parent, struct inode_defrag, rb_node);
186 ret = __compare_inode_defrag(&tmp, entry);
190 p = parent->rb_right;
195 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
196 parent = rb_next(parent);
198 entry = rb_entry(parent, struct inode_defrag, rb_node);
204 rb_erase(parent, &fs_info->defrag_inodes);
205 spin_unlock(&fs_info->defrag_inodes_lock);
209 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
211 struct inode_defrag *defrag;
212 struct rb_node *node;
214 spin_lock(&fs_info->defrag_inodes_lock);
215 node = rb_first(&fs_info->defrag_inodes);
217 rb_erase(node, &fs_info->defrag_inodes);
218 defrag = rb_entry(node, struct inode_defrag, rb_node);
219 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
221 cond_resched_lock(&fs_info->defrag_inodes_lock);
223 node = rb_first(&fs_info->defrag_inodes);
225 spin_unlock(&fs_info->defrag_inodes_lock);
228 #define BTRFS_DEFRAG_BATCH 1024
230 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
231 struct inode_defrag *defrag)
233 struct btrfs_root *inode_root;
235 struct btrfs_ioctl_defrag_range_args range;
240 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
242 if (!__need_auto_defrag(fs_info))
246 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
247 if (IS_ERR(inode_root)) {
248 ret = PTR_ERR(inode_root);
252 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
253 btrfs_put_root(inode_root);
255 ret = PTR_ERR(inode);
259 if (cur >= i_size_read(inode)) {
264 /* Do a chunk of defrag */
265 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
266 memset(&range, 0, sizeof(range));
269 range.extent_thresh = defrag->extent_thresh;
271 sb_start_write(fs_info->sb);
272 ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
274 sb_end_write(fs_info->sb);
280 cur = max(cur + fs_info->sectorsize, range.start);
284 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
289 * Run through the list of inodes in the FS that need defragging.
291 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
293 struct inode_defrag *defrag;
295 u64 root_objectid = 0;
297 atomic_inc(&fs_info->defrag_running);
299 /* Pause the auto defragger. */
300 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
303 if (!__need_auto_defrag(fs_info))
306 /* find an inode to defrag */
307 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino);
309 if (root_objectid || first_ino) {
318 first_ino = defrag->ino + 1;
319 root_objectid = defrag->root;
321 __btrfs_run_defrag_inode(fs_info, defrag);
323 atomic_dec(&fs_info->defrag_running);
326 * During unmount, we use the transaction_wait queue to wait for the
329 wake_up(&fs_info->transaction_wait);
334 * Defrag all the leaves in a given btree.
335 * Read all the leaves and try to get key order to
336 * better reflect disk order
339 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
340 struct btrfs_root *root)
342 struct btrfs_path *path = NULL;
343 struct btrfs_key key;
347 int next_key_ret = 0;
350 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
353 path = btrfs_alloc_path();
359 level = btrfs_header_level(root->node);
364 if (root->defrag_progress.objectid == 0) {
365 struct extent_buffer *root_node;
368 root_node = btrfs_lock_root_node(root);
369 nritems = btrfs_header_nritems(root_node);
370 root->defrag_max.objectid = 0;
371 /* from above we know this is not a leaf */
372 btrfs_node_key_to_cpu(root_node, &root->defrag_max,
374 btrfs_tree_unlock(root_node);
375 free_extent_buffer(root_node);
376 memset(&key, 0, sizeof(key));
378 memcpy(&key, &root->defrag_progress, sizeof(key));
381 path->keep_locks = 1;
383 ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
390 btrfs_release_path(path);
392 * We don't need a lock on a leaf. btrfs_realloc_node() will lock all
393 * leafs from path->nodes[1], so set lowest_level to 1 to avoid later
394 * a deadlock (attempting to write lock an already write locked leaf).
396 path->lowest_level = 1;
397 wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
403 if (!path->nodes[1]) {
408 * The node at level 1 must always be locked when our path has
409 * keep_locks set and lowest_level is 1, regardless of the value of
412 BUG_ON(path->locks[1] == 0);
413 ret = btrfs_realloc_node(trans, root,
416 &root->defrag_progress);
418 WARN_ON(ret == -EAGAIN);
422 * Now that we reallocated the node we can find the next key. Note that
423 * btrfs_find_next_key() can release our path and do another search
424 * without COWing, this is because even with path->keep_locks = 1,
425 * btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a
426 * node when path->slots[node_level - 1] does not point to the last
427 * item or a slot beyond the last item (ctree.c:unlock_up()). Therefore
428 * we search for the next key after reallocating our node.
430 path->slots[1] = btrfs_header_nritems(path->nodes[1]);
431 next_key_ret = btrfs_find_next_key(root, path, &key, 1,
432 BTRFS_OLDEST_GENERATION);
433 if (next_key_ret == 0) {
434 memcpy(&root->defrag_progress, &key, sizeof(key));
438 btrfs_free_path(path);
439 if (ret == -EAGAIN) {
440 if (root->defrag_max.objectid > root->defrag_progress.objectid)
442 if (root->defrag_max.type > root->defrag_progress.type)
444 if (root->defrag_max.offset > root->defrag_progress.offset)
450 memset(&root->defrag_progress, 0,
451 sizeof(root->defrag_progress));
456 void __cold btrfs_auto_defrag_exit(void)
458 kmem_cache_destroy(btrfs_inode_defrag_cachep);
461 int __init btrfs_auto_defrag_init(void)
463 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
464 sizeof(struct inode_defrag), 0,
467 if (!btrfs_inode_defrag_cachep)