1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
9 #include "print-tree.h"
10 #include "transaction.h"
12 #include "accessors.h"
14 #include "delalloc-space.h"
17 #include "file-item.h"
20 static struct kmem_cache *btrfs_inode_defrag_cachep;
23 * When auto defrag is enabled we queue up these defrag structs to remember
24 * which inodes need defragging passes.
27 struct rb_node rb_node;
31 * Transid where the defrag was added, we search for extents newer than
40 * The extent size threshold for autodefrag.
42 * This value is different for compressed/non-compressed extents, thus
43 * needs to be passed from higher layer.
44 * (aka, inode_should_defrag())
49 static int __compare_inode_defrag(struct inode_defrag *defrag1,
50 struct inode_defrag *defrag2)
52 if (defrag1->root > defrag2->root)
54 else if (defrag1->root < defrag2->root)
56 else if (defrag1->ino > defrag2->ino)
58 else if (defrag1->ino < defrag2->ino)
65 * Pop a record for an inode into the defrag tree. The lock must be held
68 * If you're inserting a record for an older transid than an existing record,
69 * the transid already in the tree is lowered.
71 * If an existing record is found the defrag item you pass in is freed.
73 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
74 struct inode_defrag *defrag)
76 struct btrfs_fs_info *fs_info = inode->root->fs_info;
77 struct inode_defrag *entry;
79 struct rb_node *parent = NULL;
82 p = &fs_info->defrag_inodes.rb_node;
85 entry = rb_entry(parent, struct inode_defrag, rb_node);
87 ret = __compare_inode_defrag(defrag, entry);
91 p = &parent->rb_right;
94 * If we're reinserting an entry for an old defrag run,
95 * make sure to lower the transid of our existing
98 if (defrag->transid < entry->transid)
99 entry->transid = defrag->transid;
100 entry->extent_thresh = min(defrag->extent_thresh,
101 entry->extent_thresh);
105 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
106 rb_link_node(&defrag->rb_node, parent, p);
107 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
111 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
113 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
116 if (btrfs_fs_closing(fs_info))
123 * Insert a defrag record for this inode if auto defrag is enabled.
125 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
126 struct btrfs_inode *inode, u32 extent_thresh)
128 struct btrfs_root *root = inode->root;
129 struct btrfs_fs_info *fs_info = root->fs_info;
130 struct inode_defrag *defrag;
134 if (!__need_auto_defrag(fs_info))
137 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
141 transid = trans->transid;
143 transid = inode->root->last_trans;
145 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
149 defrag->ino = btrfs_ino(inode);
150 defrag->transid = transid;
151 defrag->root = root->root_key.objectid;
152 defrag->extent_thresh = extent_thresh;
154 spin_lock(&fs_info->defrag_inodes_lock);
155 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
157 * If we set IN_DEFRAG flag and evict the inode from memory,
158 * and then re-read this inode, this new inode doesn't have
159 * IN_DEFRAG flag. At the case, we may find the existed defrag.
161 ret = __btrfs_add_inode_defrag(inode, defrag);
163 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
165 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
167 spin_unlock(&fs_info->defrag_inodes_lock);
172 * Pick the defragable inode that we want, if it doesn't exist, we will get the
175 static struct inode_defrag *btrfs_pick_defrag_inode(
176 struct btrfs_fs_info *fs_info, u64 root, u64 ino)
178 struct inode_defrag *entry = NULL;
179 struct inode_defrag tmp;
181 struct rb_node *parent = NULL;
187 spin_lock(&fs_info->defrag_inodes_lock);
188 p = fs_info->defrag_inodes.rb_node;
191 entry = rb_entry(parent, struct inode_defrag, rb_node);
193 ret = __compare_inode_defrag(&tmp, entry);
197 p = parent->rb_right;
202 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
203 parent = rb_next(parent);
205 entry = rb_entry(parent, struct inode_defrag, rb_node);
211 rb_erase(parent, &fs_info->defrag_inodes);
212 spin_unlock(&fs_info->defrag_inodes_lock);
216 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
218 struct inode_defrag *defrag;
219 struct rb_node *node;
221 spin_lock(&fs_info->defrag_inodes_lock);
222 node = rb_first(&fs_info->defrag_inodes);
224 rb_erase(node, &fs_info->defrag_inodes);
225 defrag = rb_entry(node, struct inode_defrag, rb_node);
226 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
228 cond_resched_lock(&fs_info->defrag_inodes_lock);
230 node = rb_first(&fs_info->defrag_inodes);
232 spin_unlock(&fs_info->defrag_inodes_lock);
235 #define BTRFS_DEFRAG_BATCH 1024
237 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
238 struct inode_defrag *defrag)
240 struct btrfs_root *inode_root;
242 struct btrfs_ioctl_defrag_range_args range;
247 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
249 if (!__need_auto_defrag(fs_info))
253 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
254 if (IS_ERR(inode_root)) {
255 ret = PTR_ERR(inode_root);
259 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
260 btrfs_put_root(inode_root);
262 ret = PTR_ERR(inode);
266 if (cur >= i_size_read(inode)) {
271 /* Do a chunk of defrag */
272 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
273 memset(&range, 0, sizeof(range));
276 range.extent_thresh = defrag->extent_thresh;
278 sb_start_write(fs_info->sb);
279 ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
281 sb_end_write(fs_info->sb);
287 cur = max(cur + fs_info->sectorsize, range.start);
291 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
296 * Run through the list of inodes in the FS that need defragging.
298 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
300 struct inode_defrag *defrag;
302 u64 root_objectid = 0;
304 atomic_inc(&fs_info->defrag_running);
306 /* Pause the auto defragger. */
307 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
310 if (!__need_auto_defrag(fs_info))
313 /* find an inode to defrag */
314 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino);
316 if (root_objectid || first_ino) {
325 first_ino = defrag->ino + 1;
326 root_objectid = defrag->root;
328 __btrfs_run_defrag_inode(fs_info, defrag);
330 atomic_dec(&fs_info->defrag_running);
333 * During unmount, we use the transaction_wait queue to wait for the
336 wake_up(&fs_info->transaction_wait);
341 * Defrag all the leaves in a given btree.
342 * Read all the leaves and try to get key order to
343 * better reflect disk order
346 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
347 struct btrfs_root *root)
349 struct btrfs_path *path = NULL;
350 struct btrfs_key key;
354 int next_key_ret = 0;
357 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
360 path = btrfs_alloc_path();
366 level = btrfs_header_level(root->node);
371 if (root->defrag_progress.objectid == 0) {
372 struct extent_buffer *root_node;
375 root_node = btrfs_lock_root_node(root);
376 nritems = btrfs_header_nritems(root_node);
377 root->defrag_max.objectid = 0;
378 /* from above we know this is not a leaf */
379 btrfs_node_key_to_cpu(root_node, &root->defrag_max,
381 btrfs_tree_unlock(root_node);
382 free_extent_buffer(root_node);
383 memset(&key, 0, sizeof(key));
385 memcpy(&key, &root->defrag_progress, sizeof(key));
388 path->keep_locks = 1;
390 ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
397 btrfs_release_path(path);
399 * We don't need a lock on a leaf. btrfs_realloc_node() will lock all
400 * leafs from path->nodes[1], so set lowest_level to 1 to avoid later
401 * a deadlock (attempting to write lock an already write locked leaf).
403 path->lowest_level = 1;
404 wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
410 if (!path->nodes[1]) {
415 * The node at level 1 must always be locked when our path has
416 * keep_locks set and lowest_level is 1, regardless of the value of
419 BUG_ON(path->locks[1] == 0);
420 ret = btrfs_realloc_node(trans, root,
423 &root->defrag_progress);
425 WARN_ON(ret == -EAGAIN);
429 * Now that we reallocated the node we can find the next key. Note that
430 * btrfs_find_next_key() can release our path and do another search
431 * without COWing, this is because even with path->keep_locks = 1,
432 * btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a
433 * node when path->slots[node_level - 1] does not point to the last
434 * item or a slot beyond the last item (ctree.c:unlock_up()). Therefore
435 * we search for the next key after reallocating our node.
437 path->slots[1] = btrfs_header_nritems(path->nodes[1]);
438 next_key_ret = btrfs_find_next_key(root, path, &key, 1,
439 BTRFS_OLDEST_GENERATION);
440 if (next_key_ret == 0) {
441 memcpy(&root->defrag_progress, &key, sizeof(key));
445 btrfs_free_path(path);
446 if (ret == -EAGAIN) {
447 if (root->defrag_max.objectid > root->defrag_progress.objectid)
449 if (root->defrag_max.type > root->defrag_progress.type)
451 if (root->defrag_max.offset > root->defrag_progress.offset)
457 memset(&root->defrag_progress, 0,
458 sizeof(root->defrag_progress));
464 * Defrag specific helper to get an extent map.
466 * Differences between this and btrfs_get_extent() are:
468 * - No extent_map will be added to inode->extent_tree
469 * To reduce memory usage in the long run.
471 * - Extra optimization to skip file extents older than @newer_than
472 * By using btrfs_search_forward() we can skip entire file ranges that
473 * have extents created in past transactions, because btrfs_search_forward()
474 * will not visit leaves and nodes with a generation smaller than given
475 * minimal generation threshold (@newer_than).
477 * Return valid em if we find a file extent matching the requirement.
478 * Return NULL if we can not find a file extent matching the requirement.
480 * Return ERR_PTR() for error.
482 static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
483 u64 start, u64 newer_than)
485 struct btrfs_root *root = inode->root;
486 struct btrfs_file_extent_item *fi;
487 struct btrfs_path path = { 0 };
488 struct extent_map *em;
489 struct btrfs_key key;
490 u64 ino = btrfs_ino(inode);
493 em = alloc_extent_map();
500 key.type = BTRFS_EXTENT_DATA_KEY;
504 ret = btrfs_search_forward(root, &key, &path, newer_than);
507 /* Can't find anything newer */
511 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
515 if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
517 * If btrfs_search_slot() makes path to point beyond nritems,
518 * we should not have an empty leaf, as this inode must at
519 * least have its INODE_ITEM.
521 ASSERT(btrfs_header_nritems(path.nodes[0]));
522 path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
524 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
525 /* Perfect match, no need to go one slot back */
526 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
530 /* We didn't find a perfect match, needs to go one slot back */
531 if (path.slots[0] > 0) {
532 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
533 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
538 /* Iterate through the path to find a file extent covering @start */
542 if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
545 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
548 * We may go one slot back to INODE_REF/XATTR item, then
549 * need to go forward until we reach an EXTENT_DATA.
550 * But we should still has the correct ino as key.objectid.
552 if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
555 /* It's beyond our target range, definitely not extent found */
556 if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
560 * | |<- File extent ->|
563 * This means there is a hole between start and key.offset.
565 if (key.offset > start) {
567 em->orig_start = start;
568 em->block_start = EXTENT_MAP_HOLE;
569 em->len = key.offset - start;
573 fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
574 struct btrfs_file_extent_item);
575 extent_end = btrfs_file_extent_end(&path);
578 * |<- file extent ->| |
581 * We haven't reached start, search next slot.
583 if (extent_end <= start)
586 /* Now this extent covers @start, convert it to em */
587 btrfs_extent_item_to_extent_map(inode, &path, fi, em);
590 ret = btrfs_next_item(root, &path);
596 btrfs_release_path(&path);
600 btrfs_release_path(&path);
605 btrfs_release_path(&path);
610 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
611 u64 newer_than, bool locked)
613 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
614 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
615 struct extent_map *em;
616 const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
619 * Hopefully we have this extent in the tree already, try without the
622 read_lock(&em_tree->lock);
623 em = lookup_extent_mapping(em_tree, start, sectorsize);
624 read_unlock(&em_tree->lock);
627 * We can get a merged extent, in that case, we need to re-search
628 * tree to get the original em for defrag.
630 * If @newer_than is 0 or em::generation < newer_than, we can trust
631 * this em, as either we don't care about the generation, or the
632 * merged extent map will be rejected anyway.
634 if (em && test_bit(EXTENT_FLAG_MERGED, &em->flags) &&
635 newer_than && em->generation >= newer_than) {
641 struct extent_state *cached = NULL;
642 u64 end = start + sectorsize - 1;
644 /* Get the big lock and read metadata off disk. */
646 lock_extent(io_tree, start, end, &cached);
647 em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
649 unlock_extent(io_tree, start, end, &cached);
658 static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info,
659 const struct extent_map *em)
661 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
662 return BTRFS_MAX_COMPRESSED;
663 return fs_info->max_extent_size;
666 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
667 u32 extent_thresh, u64 newer_than, bool locked)
669 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
670 struct extent_map *next;
673 /* This is the last extent */
674 if (em->start + em->len >= i_size_read(inode))
678 * Here we need to pass @newer_then when checking the next extent, or
679 * we will hit a case we mark current extent for defrag, but the next
680 * one will not be a target.
681 * This will just cause extra IO without really reducing the fragments.
683 next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
684 /* No more em or hole */
685 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
687 if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags))
690 * If the next extent is at its max capacity, defragging current extent
691 * makes no sense, as the total number of extents won't change.
693 if (next->len >= get_extent_max_capacity(fs_info, em))
695 /* Skip older extent */
696 if (next->generation < newer_than)
698 /* Also check extent size */
699 if (next->len >= extent_thresh)
704 free_extent_map(next);
709 * Prepare one page to be defragged.
713 * - Returned page is locked and has been set up properly.
714 * - No ordered extent exists in the page.
715 * - The page is uptodate.
717 * NOTE: Caller should also wait for page writeback after the cluster is
718 * prepared, here we don't do writeback wait for each page.
720 static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t index)
722 struct address_space *mapping = inode->vfs_inode.i_mapping;
723 gfp_t mask = btrfs_alloc_write_mask(mapping);
724 u64 page_start = (u64)index << PAGE_SHIFT;
725 u64 page_end = page_start + PAGE_SIZE - 1;
726 struct extent_state *cached_state = NULL;
731 page = find_or_create_page(mapping, index, mask);
733 return ERR_PTR(-ENOMEM);
736 * Since we can defragment files opened read-only, we can encounter
737 * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We
738 * can't do I/O using huge pages yet, so return an error for now.
739 * Filesystem transparent huge pages are typically only used for
740 * executables that explicitly enable them, so this isn't very
743 if (PageCompound(page)) {
746 return ERR_PTR(-ETXTBSY);
749 ret = set_page_extent_mapped(page);
756 /* Wait for any existing ordered extent in the range */
758 struct btrfs_ordered_extent *ordered;
760 lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
761 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
762 unlock_extent(&inode->io_tree, page_start, page_end,
768 btrfs_start_ordered_extent(ordered);
769 btrfs_put_ordered_extent(ordered);
772 * We unlocked the page above, so we need check if it was
775 if (page->mapping != mapping || !PagePrivate(page)) {
783 * Now the page range has no ordered extent any more. Read the page to
786 if (!PageUptodate(page)) {
787 btrfs_read_folio(NULL, page_folio(page));
789 if (page->mapping != mapping || !PagePrivate(page)) {
794 if (!PageUptodate(page)) {
797 return ERR_PTR(-EIO);
803 struct defrag_target_range {
804 struct list_head list;
810 * Collect all valid target extents.
812 * @start: file offset to lookup
813 * @len: length to lookup
814 * @extent_thresh: file extent size threshold, any extent size >= this value
816 * @newer_than: only defrag extents newer than this value
817 * @do_compress: whether the defrag is doing compression
818 * if true, @extent_thresh will be ignored and all regular
819 * file extents meeting @newer_than will be targets.
820 * @locked: if the range has already held extent lock
821 * @target_list: list of targets file extents
823 static int defrag_collect_targets(struct btrfs_inode *inode,
824 u64 start, u64 len, u32 extent_thresh,
825 u64 newer_than, bool do_compress,
826 bool locked, struct list_head *target_list,
827 u64 *last_scanned_ret)
829 struct btrfs_fs_info *fs_info = inode->root->fs_info;
830 bool last_is_target = false;
834 while (cur < start + len) {
835 struct extent_map *em;
836 struct defrag_target_range *new;
837 bool next_mergeable = true;
840 last_is_target = false;
841 em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked);
846 * If the file extent is an inlined one, we may still want to
847 * defrag it (fallthrough) if it will cause a regular extent.
848 * This is for users who want to convert inline extents to
849 * regular ones through max_inline= mount option.
851 if (em->block_start == EXTENT_MAP_INLINE &&
852 em->len <= inode->root->fs_info->max_inline)
855 /* Skip hole/delalloc/preallocated extents */
856 if (em->block_start == EXTENT_MAP_HOLE ||
857 em->block_start == EXTENT_MAP_DELALLOC ||
858 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
861 /* Skip older extent */
862 if (em->generation < newer_than)
865 /* This em is under writeback, no need to defrag */
866 if (em->generation == (u64)-1)
870 * Our start offset might be in the middle of an existing extent
871 * map, so take that into account.
873 range_len = em->len - (cur - em->start);
875 * If this range of the extent map is already flagged for delalloc,
878 * 1) We could deadlock later, when trying to reserve space for
879 * delalloc, because in case we can't immediately reserve space
880 * the flusher can start delalloc and wait for the respective
881 * ordered extents to complete. The deadlock would happen
882 * because we do the space reservation while holding the range
883 * locked, and starting writeback, or finishing an ordered
884 * extent, requires locking the range;
886 * 2) If there's delalloc there, it means there's dirty pages for
887 * which writeback has not started yet (we clean the delalloc
888 * flag when starting writeback and after creating an ordered
889 * extent). If we mark pages in an adjacent range for defrag,
890 * then we will have a larger contiguous range for delalloc,
891 * very likely resulting in a larger extent after writeback is
892 * triggered (except in a case of free space fragmentation).
894 if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1,
895 EXTENT_DELALLOC, 0, NULL))
899 * For do_compress case, we want to compress all valid file
900 * extents, thus no @extent_thresh or mergeable check.
905 /* Skip too large extent */
906 if (range_len >= extent_thresh)
910 * Skip extents already at its max capacity, this is mostly for
911 * compressed extents, which max cap is only 128K.
913 if (em->len >= get_extent_max_capacity(fs_info, em))
917 * Normally there are no more extents after an inline one, thus
918 * @next_mergeable will normally be false and not defragged.
919 * So if an inline extent passed all above checks, just add it
920 * for defrag, and be converted to regular extents.
922 if (em->block_start == EXTENT_MAP_INLINE)
925 next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
926 extent_thresh, newer_than, locked);
927 if (!next_mergeable) {
928 struct defrag_target_range *last;
930 /* Empty target list, no way to merge with last entry */
931 if (list_empty(target_list))
933 last = list_entry(target_list->prev,
934 struct defrag_target_range, list);
935 /* Not mergeable with last entry */
936 if (last->start + last->len != cur)
939 /* Mergeable, fall through to add it to @target_list. */
943 last_is_target = true;
944 range_len = min(extent_map_end(em), start + len) - cur;
946 * This one is a good target, check if it can be merged into
947 * last range of the target list.
949 if (!list_empty(target_list)) {
950 struct defrag_target_range *last;
952 last = list_entry(target_list->prev,
953 struct defrag_target_range, list);
954 ASSERT(last->start + last->len <= cur);
955 if (last->start + last->len == cur) {
956 /* Mergeable, enlarge the last entry */
957 last->len += range_len;
960 /* Fall through to allocate a new entry */
963 /* Allocate new defrag_target_range */
964 new = kmalloc(sizeof(*new), GFP_NOFS);
971 new->len = range_len;
972 list_add_tail(&new->list, target_list);
975 cur = extent_map_end(em);
979 struct defrag_target_range *entry;
980 struct defrag_target_range *tmp;
982 list_for_each_entry_safe(entry, tmp, target_list, list) {
983 list_del_init(&entry->list);
987 if (!ret && last_scanned_ret) {
989 * If the last extent is not a target, the caller can skip to
990 * the end of that extent.
991 * Otherwise, we can only go the end of the specified range.
994 *last_scanned_ret = max(cur, *last_scanned_ret);
996 *last_scanned_ret = max(start + len, *last_scanned_ret);
1001 #define CLUSTER_SIZE (SZ_256K)
1002 static_assert(PAGE_ALIGNED(CLUSTER_SIZE));
1005 * Defrag one contiguous target range.
1007 * @inode: target inode
1008 * @target: target range to defrag
1009 * @pages: locked pages covering the defrag range
1010 * @nr_pages: number of locked pages
1012 * Caller should ensure:
1014 * - Pages are prepared
1015 * Pages should be locked, no ordered extent in the pages range,
1018 * - Extent bits are locked
1020 static int defrag_one_locked_target(struct btrfs_inode *inode,
1021 struct defrag_target_range *target,
1022 struct page **pages, int nr_pages,
1023 struct extent_state **cached_state)
1025 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1026 struct extent_changeset *data_reserved = NULL;
1027 const u64 start = target->start;
1028 const u64 len = target->len;
1029 unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
1030 unsigned long start_index = start >> PAGE_SHIFT;
1031 unsigned long first_index = page_index(pages[0]);
1035 ASSERT(last_index - first_index + 1 <= nr_pages);
1037 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
1040 clear_extent_bit(&inode->io_tree, start, start + len - 1,
1041 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
1042 EXTENT_DEFRAG, cached_state);
1043 set_extent_bit(&inode->io_tree, start, start + len - 1,
1044 EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state);
1046 /* Update the page status */
1047 for (i = start_index - first_index; i <= last_index - first_index; i++) {
1048 ClearPageChecked(pages[i]);
1049 btrfs_page_clamp_set_dirty(fs_info, pages[i], start, len);
1051 btrfs_delalloc_release_extents(inode, len);
1052 extent_changeset_free(data_reserved);
1057 static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
1058 u32 extent_thresh, u64 newer_than, bool do_compress,
1059 u64 *last_scanned_ret)
1061 struct extent_state *cached_state = NULL;
1062 struct defrag_target_range *entry;
1063 struct defrag_target_range *tmp;
1064 LIST_HEAD(target_list);
1065 struct page **pages;
1066 const u32 sectorsize = inode->root->fs_info->sectorsize;
1067 u64 last_index = (start + len - 1) >> PAGE_SHIFT;
1068 u64 start_index = start >> PAGE_SHIFT;
1069 unsigned int nr_pages = last_index - start_index + 1;
1073 ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
1074 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
1076 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
1080 /* Prepare all pages */
1081 for (i = 0; i < nr_pages; i++) {
1082 pages[i] = defrag_prepare_one_page(inode, start_index + i);
1083 if (IS_ERR(pages[i])) {
1084 ret = PTR_ERR(pages[i]);
1089 for (i = 0; i < nr_pages; i++)
1090 wait_on_page_writeback(pages[i]);
1092 /* Lock the pages range */
1093 lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
1094 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1097 * Now we have a consistent view about the extent map, re-check
1098 * which range really needs to be defragged.
1100 * And this time we have extent locked already, pass @locked = true
1101 * so that we won't relock the extent range and cause deadlock.
1103 ret = defrag_collect_targets(inode, start, len, extent_thresh,
1104 newer_than, do_compress, true,
1105 &target_list, last_scanned_ret);
1109 list_for_each_entry(entry, &target_list, list) {
1110 ret = defrag_one_locked_target(inode, entry, pages, nr_pages,
1116 list_for_each_entry_safe(entry, tmp, &target_list, list) {
1117 list_del_init(&entry->list);
1121 unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
1122 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1125 for (i = 0; i < nr_pages; i++) {
1127 unlock_page(pages[i]);
1135 static int defrag_one_cluster(struct btrfs_inode *inode,
1136 struct file_ra_state *ra,
1137 u64 start, u32 len, u32 extent_thresh,
1138 u64 newer_than, bool do_compress,
1139 unsigned long *sectors_defragged,
1140 unsigned long max_sectors,
1141 u64 *last_scanned_ret)
1143 const u32 sectorsize = inode->root->fs_info->sectorsize;
1144 struct defrag_target_range *entry;
1145 struct defrag_target_range *tmp;
1146 LIST_HEAD(target_list);
1149 ret = defrag_collect_targets(inode, start, len, extent_thresh,
1150 newer_than, do_compress, false,
1151 &target_list, NULL);
1155 list_for_each_entry(entry, &target_list, list) {
1156 u32 range_len = entry->len;
1158 /* Reached or beyond the limit */
1159 if (max_sectors && *sectors_defragged >= max_sectors) {
1165 range_len = min_t(u32, range_len,
1166 (max_sectors - *sectors_defragged) * sectorsize);
1169 * If defrag_one_range() has updated last_scanned_ret,
1170 * our range may already be invalid (e.g. hole punched).
1171 * Skip if our range is before last_scanned_ret, as there is
1172 * no need to defrag the range anymore.
1174 if (entry->start + range_len <= *last_scanned_ret)
1178 page_cache_sync_readahead(inode->vfs_inode.i_mapping,
1179 ra, NULL, entry->start >> PAGE_SHIFT,
1180 ((entry->start + range_len - 1) >> PAGE_SHIFT) -
1181 (entry->start >> PAGE_SHIFT) + 1);
1183 * Here we may not defrag any range if holes are punched before
1184 * we locked the pages.
1185 * But that's fine, it only affects the @sectors_defragged
1188 ret = defrag_one_range(inode, entry->start, range_len,
1189 extent_thresh, newer_than, do_compress,
1193 *sectors_defragged += range_len >>
1194 inode->root->fs_info->sectorsize_bits;
1197 list_for_each_entry_safe(entry, tmp, &target_list, list) {
1198 list_del_init(&entry->list);
1202 *last_scanned_ret = max(*last_scanned_ret, start + len);
1207 * Entry point to file defragmentation.
1209 * @inode: inode to be defragged
1210 * @ra: readahead state (can be NUL)
1211 * @range: defrag options including range and flags
1212 * @newer_than: minimum transid to defrag
1213 * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
1214 * will be defragged.
1216 * Return <0 for error.
1217 * Return >=0 for the number of sectors defragged, and range->start will be updated
1218 * to indicate the file offset where next defrag should be started at.
1219 * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
1220 * defragging all the range).
1222 int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
1223 struct btrfs_ioctl_defrag_range_args *range,
1224 u64 newer_than, unsigned long max_to_defrag)
1226 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1227 unsigned long sectors_defragged = 0;
1228 u64 isize = i_size_read(inode);
1231 bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
1232 bool ra_allocated = false;
1233 int compress_type = BTRFS_COMPRESS_ZLIB;
1235 u32 extent_thresh = range->extent_thresh;
1236 pgoff_t start_index;
1241 if (range->start >= isize)
1245 if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
1247 if (range->compress_type)
1248 compress_type = range->compress_type;
1251 if (extent_thresh == 0)
1252 extent_thresh = SZ_256K;
1254 if (range->start + range->len > range->start) {
1255 /* Got a specific range */
1256 last_byte = min(isize, range->start + range->len);
1258 /* Defrag until file end */
1262 /* Align the range */
1263 cur = round_down(range->start, fs_info->sectorsize);
1264 last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
1267 * If we were not given a ra, allocate a readahead context. As
1268 * readahead is just an optimization, defrag will work without it so
1269 * we don't error out.
1272 ra_allocated = true;
1273 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1275 file_ra_state_init(ra, inode->i_mapping);
1279 * Make writeback start from the beginning of the range, so that the
1280 * defrag range can be written sequentially.
1282 start_index = cur >> PAGE_SHIFT;
1283 if (start_index < inode->i_mapping->writeback_index)
1284 inode->i_mapping->writeback_index = start_index;
1286 while (cur < last_byte) {
1287 const unsigned long prev_sectors_defragged = sectors_defragged;
1288 u64 last_scanned = cur;
1291 if (btrfs_defrag_cancelled(fs_info)) {
1296 /* We want the cluster end at page boundary when possible */
1297 cluster_end = (((cur >> PAGE_SHIFT) +
1298 (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
1299 cluster_end = min(cluster_end, last_byte);
1301 btrfs_inode_lock(BTRFS_I(inode), 0);
1302 if (IS_SWAPFILE(inode)) {
1304 btrfs_inode_unlock(BTRFS_I(inode), 0);
1307 if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
1308 btrfs_inode_unlock(BTRFS_I(inode), 0);
1312 BTRFS_I(inode)->defrag_compress = compress_type;
1313 ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
1314 cluster_end + 1 - cur, extent_thresh,
1315 newer_than, do_compress, §ors_defragged,
1316 max_to_defrag, &last_scanned);
1318 if (sectors_defragged > prev_sectors_defragged)
1319 balance_dirty_pages_ratelimited(inode->i_mapping);
1321 btrfs_inode_unlock(BTRFS_I(inode), 0);
1324 cur = max(cluster_end + 1, last_scanned);
1335 * Update range.start for autodefrag, this will indicate where to start
1339 if (sectors_defragged) {
1341 * We have defragged some sectors, for compression case they
1342 * need to be written back immediately.
1344 if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
1345 filemap_flush(inode->i_mapping);
1346 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1347 &BTRFS_I(inode)->runtime_flags))
1348 filemap_flush(inode->i_mapping);
1350 if (range->compress_type == BTRFS_COMPRESS_LZO)
1351 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1352 else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
1353 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1354 ret = sectors_defragged;
1357 btrfs_inode_lock(BTRFS_I(inode), 0);
1358 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1359 btrfs_inode_unlock(BTRFS_I(inode), 0);
1364 void __cold btrfs_auto_defrag_exit(void)
1366 kmem_cache_destroy(btrfs_inode_defrag_cachep);
1369 int __init btrfs_auto_defrag_init(void)
1371 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
1372 sizeof(struct inode_defrag), 0,
1375 if (!btrfs_inode_defrag_cachep)