1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
22 #include "transaction.h"
23 #include "btrfs_inode.h"
24 #include "print-tree.h"
29 #include "compression.h"
30 #include "delalloc-space.h"
34 static struct kmem_cache *btrfs_inode_defrag_cachep;
36 * when auto defrag is enabled we
37 * queue up these defrag structs to remember which
38 * inodes need defragging passes
41 struct rb_node rb_node;
45 * transid where the defrag was added, we search for
46 * extents newer than this
53 /* last offset we were able to defrag */
56 /* if we've wrapped around back to zero once already */
60 static int __compare_inode_defrag(struct inode_defrag *defrag1,
61 struct inode_defrag *defrag2)
63 if (defrag1->root > defrag2->root)
65 else if (defrag1->root < defrag2->root)
67 else if (defrag1->ino > defrag2->ino)
69 else if (defrag1->ino < defrag2->ino)
75 /* pop a record for an inode into the defrag tree. The lock
76 * must be held already
78 * If you're inserting a record for an older transid than an
79 * existing record, the transid already in the tree is lowered
81 * If an existing record is found the defrag item you
84 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
85 struct inode_defrag *defrag)
87 struct btrfs_fs_info *fs_info = inode->root->fs_info;
88 struct inode_defrag *entry;
90 struct rb_node *parent = NULL;
93 p = &fs_info->defrag_inodes.rb_node;
96 entry = rb_entry(parent, struct inode_defrag, rb_node);
98 ret = __compare_inode_defrag(defrag, entry);
100 p = &parent->rb_left;
102 p = &parent->rb_right;
104 /* if we're reinserting an entry for
105 * an old defrag run, make sure to
106 * lower the transid of our existing record
108 if (defrag->transid < entry->transid)
109 entry->transid = defrag->transid;
110 if (defrag->last_offset > entry->last_offset)
111 entry->last_offset = defrag->last_offset;
115 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
116 rb_link_node(&defrag->rb_node, parent, p);
117 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
121 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
123 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
126 if (btrfs_fs_closing(fs_info))
133 * insert a defrag record for this inode if auto defrag is
136 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
137 struct btrfs_inode *inode)
139 struct btrfs_root *root = inode->root;
140 struct btrfs_fs_info *fs_info = root->fs_info;
141 struct inode_defrag *defrag;
145 if (!__need_auto_defrag(fs_info))
148 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
152 transid = trans->transid;
154 transid = inode->root->last_trans;
156 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
160 defrag->ino = btrfs_ino(inode);
161 defrag->transid = transid;
162 defrag->root = root->root_key.objectid;
164 spin_lock(&fs_info->defrag_inodes_lock);
165 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
167 * If we set IN_DEFRAG flag and evict the inode from memory,
168 * and then re-read this inode, this new inode doesn't have
169 * IN_DEFRAG flag. At the case, we may find the existed defrag.
171 ret = __btrfs_add_inode_defrag(inode, defrag);
173 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
175 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
177 spin_unlock(&fs_info->defrag_inodes_lock);
182 * Requeue the defrag object. If there is a defrag object that points to
183 * the same inode in the tree, we will merge them together (by
184 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
186 static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
187 struct inode_defrag *defrag)
189 struct btrfs_fs_info *fs_info = inode->root->fs_info;
192 if (!__need_auto_defrag(fs_info))
196 * Here we don't check the IN_DEFRAG flag, because we need merge
199 spin_lock(&fs_info->defrag_inodes_lock);
200 ret = __btrfs_add_inode_defrag(inode, defrag);
201 spin_unlock(&fs_info->defrag_inodes_lock);
206 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
210 * pick the defragable inode that we want, if it doesn't exist, we will get
213 static struct inode_defrag *
214 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
216 struct inode_defrag *entry = NULL;
217 struct inode_defrag tmp;
219 struct rb_node *parent = NULL;
225 spin_lock(&fs_info->defrag_inodes_lock);
226 p = fs_info->defrag_inodes.rb_node;
229 entry = rb_entry(parent, struct inode_defrag, rb_node);
231 ret = __compare_inode_defrag(&tmp, entry);
235 p = parent->rb_right;
240 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
241 parent = rb_next(parent);
243 entry = rb_entry(parent, struct inode_defrag, rb_node);
249 rb_erase(parent, &fs_info->defrag_inodes);
250 spin_unlock(&fs_info->defrag_inodes_lock);
254 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
256 struct inode_defrag *defrag;
257 struct rb_node *node;
259 spin_lock(&fs_info->defrag_inodes_lock);
260 node = rb_first(&fs_info->defrag_inodes);
262 rb_erase(node, &fs_info->defrag_inodes);
263 defrag = rb_entry(node, struct inode_defrag, rb_node);
264 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
266 cond_resched_lock(&fs_info->defrag_inodes_lock);
268 node = rb_first(&fs_info->defrag_inodes);
270 spin_unlock(&fs_info->defrag_inodes_lock);
273 #define BTRFS_DEFRAG_BATCH 1024
275 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
276 struct inode_defrag *defrag)
278 struct btrfs_root *inode_root;
280 struct btrfs_ioctl_defrag_range_args range;
285 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
286 if (IS_ERR(inode_root)) {
287 ret = PTR_ERR(inode_root);
291 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
292 btrfs_put_root(inode_root);
294 ret = PTR_ERR(inode);
298 /* do a chunk of defrag */
299 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
300 memset(&range, 0, sizeof(range));
302 range.start = defrag->last_offset;
304 sb_start_write(fs_info->sb);
305 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
307 sb_end_write(fs_info->sb);
309 * if we filled the whole defrag batch, there
310 * must be more work to do. Queue this defrag
313 if (num_defrag == BTRFS_DEFRAG_BATCH) {
314 defrag->last_offset = range.start;
315 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
316 } else if (defrag->last_offset && !defrag->cycled) {
318 * we didn't fill our defrag batch, but
319 * we didn't start at zero. Make sure we loop
320 * around to the start of the file.
322 defrag->last_offset = 0;
324 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
326 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
332 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
337 * run through the list of inodes in the FS that need
340 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
342 struct inode_defrag *defrag;
344 u64 root_objectid = 0;
346 atomic_inc(&fs_info->defrag_running);
348 /* Pause the auto defragger. */
349 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
353 if (!__need_auto_defrag(fs_info))
356 /* find an inode to defrag */
357 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
360 if (root_objectid || first_ino) {
369 first_ino = defrag->ino + 1;
370 root_objectid = defrag->root;
372 __btrfs_run_defrag_inode(fs_info, defrag);
374 atomic_dec(&fs_info->defrag_running);
377 * during unmount, we use the transaction_wait queue to
378 * wait for the defragger to stop
380 wake_up(&fs_info->transaction_wait);
384 /* simple helper to fault in pages and copy. This should go away
385 * and be replaced with calls into generic code.
387 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
388 struct page **prepared_pages,
392 size_t total_copied = 0;
394 int offset = offset_in_page(pos);
396 while (write_bytes > 0) {
397 size_t count = min_t(size_t,
398 PAGE_SIZE - offset, write_bytes);
399 struct page *page = prepared_pages[pg];
401 * Copy data from userspace to the current page
403 copied = copy_page_from_iter_atomic(page, offset, count, i);
405 /* Flush processor's dcache for this page */
406 flush_dcache_page(page);
409 * if we get a partial write, we can end up with
410 * partially up to date pages. These add
411 * a lot of complexity, so make sure they don't
412 * happen by forcing this copy to be retried.
414 * The rest of the btrfs_file_write code will fall
415 * back to page at a time copies after we return 0.
417 if (unlikely(copied < count)) {
418 if (!PageUptodate(page)) {
419 iov_iter_revert(i, copied);
426 write_bytes -= copied;
427 total_copied += copied;
429 if (offset == PAGE_SIZE) {
438 * unlocks pages after btrfs_file_write is done with them
440 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
443 for (i = 0; i < num_pages; i++) {
444 /* page checked is some magic around finding pages that
445 * have been modified without going through btrfs_set_page_dirty
446 * clear it here. There should be no need to mark the pages
447 * accessed as prepare_pages should have marked them accessed
448 * in prepare_pages via find_or_create_page()
450 ClearPageChecked(pages[i]);
451 unlock_page(pages[i]);
457 * After btrfs_copy_from_user(), update the following things for delalloc:
458 * - Mark newly dirtied pages as DELALLOC in the io tree.
459 * Used to advise which range is to be written back.
460 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
461 * - Update inode size for past EOF write
463 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
464 size_t num_pages, loff_t pos, size_t write_bytes,
465 struct extent_state **cached, bool noreserve)
467 struct btrfs_fs_info *fs_info = inode->root->fs_info;
472 u64 end_of_last_block;
473 u64 end_pos = pos + write_bytes;
474 loff_t isize = i_size_read(&inode->vfs_inode);
475 unsigned int extra_bits = 0;
477 if (write_bytes == 0)
481 extra_bits |= EXTENT_NORESERVE;
483 start_pos = round_down(pos, fs_info->sectorsize);
484 num_bytes = round_up(write_bytes + pos - start_pos,
485 fs_info->sectorsize);
486 ASSERT(num_bytes <= U32_MAX);
488 end_of_last_block = start_pos + num_bytes - 1;
491 * The pages may have already been dirty, clear out old accounting so
492 * we can set things up properly
494 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
495 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
498 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
503 for (i = 0; i < num_pages; i++) {
504 struct page *p = pages[i];
506 btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
508 btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
512 * we've only changed i_size in ram, and we haven't updated
513 * the disk i_size. There is no need to log the inode
517 i_size_write(&inode->vfs_inode, end_pos);
522 * this drops all the extents in the cache that intersect the range
523 * [start, end]. Existing extents are split as required.
525 void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
528 struct extent_map *em;
529 struct extent_map *split = NULL;
530 struct extent_map *split2 = NULL;
531 struct extent_map_tree *em_tree = &inode->extent_tree;
532 u64 len = end - start + 1;
540 WARN_ON(end < start);
541 if (end == (u64)-1) {
550 split = alloc_extent_map();
552 split2 = alloc_extent_map();
553 if (!split || !split2)
556 write_lock(&em_tree->lock);
557 em = lookup_extent_mapping(em_tree, start, len);
559 write_unlock(&em_tree->lock);
563 gen = em->generation;
564 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
565 if (testend && em->start + em->len >= start + len) {
567 write_unlock(&em_tree->lock);
570 start = em->start + em->len;
572 len = start + len - (em->start + em->len);
574 write_unlock(&em_tree->lock);
577 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
578 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
579 clear_bit(EXTENT_FLAG_LOGGING, &flags);
580 modified = !list_empty(&em->list);
584 if (em->start < start) {
585 split->start = em->start;
586 split->len = start - em->start;
588 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
589 split->orig_start = em->orig_start;
590 split->block_start = em->block_start;
593 split->block_len = em->block_len;
595 split->block_len = split->len;
596 split->orig_block_len = max(split->block_len,
598 split->ram_bytes = em->ram_bytes;
600 split->orig_start = split->start;
601 split->block_len = 0;
602 split->block_start = em->block_start;
603 split->orig_block_len = 0;
604 split->ram_bytes = split->len;
607 split->generation = gen;
608 split->flags = flags;
609 split->compress_type = em->compress_type;
610 replace_extent_mapping(em_tree, em, split, modified);
611 free_extent_map(split);
615 if (testend && em->start + em->len > start + len) {
616 u64 diff = start + len - em->start;
618 split->start = start + len;
619 split->len = em->start + em->len - (start + len);
620 split->flags = flags;
621 split->compress_type = em->compress_type;
622 split->generation = gen;
624 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
625 split->orig_block_len = max(em->block_len,
628 split->ram_bytes = em->ram_bytes;
630 split->block_len = em->block_len;
631 split->block_start = em->block_start;
632 split->orig_start = em->orig_start;
634 split->block_len = split->len;
635 split->block_start = em->block_start
637 split->orig_start = em->orig_start;
640 split->ram_bytes = split->len;
641 split->orig_start = split->start;
642 split->block_len = 0;
643 split->block_start = em->block_start;
644 split->orig_block_len = 0;
647 if (extent_map_in_tree(em)) {
648 replace_extent_mapping(em_tree, em, split,
651 ret = add_extent_mapping(em_tree, split,
653 ASSERT(ret == 0); /* Logic error */
655 free_extent_map(split);
659 if (extent_map_in_tree(em))
660 remove_extent_mapping(em_tree, em);
661 write_unlock(&em_tree->lock);
665 /* once for the tree*/
669 free_extent_map(split);
671 free_extent_map(split2);
675 * this is very complex, but the basic idea is to drop all extents
676 * in the range start - end. hint_block is filled in with a block number
677 * that would be a good hint to the block allocator for this file.
679 * If an extent intersects the range but is not entirely inside the range
680 * it is either truncated or split. Anything entirely inside the range
681 * is deleted from the tree.
683 * Note: the VFS' inode number of bytes is not updated, it's up to the caller
684 * to deal with that. We set the field 'bytes_found' of the arguments structure
685 * with the number of allocated bytes found in the target range, so that the
686 * caller can update the inode's number of bytes in an atomic way when
687 * replacing extents in a range to avoid races with stat(2).
689 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
690 struct btrfs_root *root, struct btrfs_inode *inode,
691 struct btrfs_drop_extents_args *args)
693 struct btrfs_fs_info *fs_info = root->fs_info;
694 struct extent_buffer *leaf;
695 struct btrfs_file_extent_item *fi;
696 struct btrfs_ref ref = { 0 };
697 struct btrfs_key key;
698 struct btrfs_key new_key;
699 u64 ino = btrfs_ino(inode);
700 u64 search_start = args->start;
703 u64 extent_offset = 0;
705 u64 last_end = args->start;
711 int modify_tree = -1;
714 int leafs_visited = 0;
715 struct btrfs_path *path = args->path;
717 args->bytes_found = 0;
718 args->extent_inserted = false;
720 /* Must always have a path if ->replace_extent is true */
721 ASSERT(!(args->replace_extent && !args->path));
724 path = btrfs_alloc_path();
731 if (args->drop_cache)
732 btrfs_drop_extent_cache(inode, args->start, args->end - 1, 0);
734 if (args->start >= inode->disk_i_size && !args->replace_extent)
737 update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
740 ret = btrfs_lookup_file_extent(trans, root, path, ino,
741 search_start, modify_tree);
744 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
745 leaf = path->nodes[0];
746 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
747 if (key.objectid == ino &&
748 key.type == BTRFS_EXTENT_DATA_KEY)
754 leaf = path->nodes[0];
755 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
757 ret = btrfs_next_leaf(root, path);
765 leaf = path->nodes[0];
769 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
771 if (key.objectid > ino)
773 if (WARN_ON_ONCE(key.objectid < ino) ||
774 key.type < BTRFS_EXTENT_DATA_KEY) {
779 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
782 fi = btrfs_item_ptr(leaf, path->slots[0],
783 struct btrfs_file_extent_item);
784 extent_type = btrfs_file_extent_type(leaf, fi);
786 if (extent_type == BTRFS_FILE_EXTENT_REG ||
787 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
788 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
789 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
790 extent_offset = btrfs_file_extent_offset(leaf, fi);
791 extent_end = key.offset +
792 btrfs_file_extent_num_bytes(leaf, fi);
793 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
794 extent_end = key.offset +
795 btrfs_file_extent_ram_bytes(leaf, fi);
802 * Don't skip extent items representing 0 byte lengths. They
803 * used to be created (bug) if while punching holes we hit
804 * -ENOSPC condition. So if we find one here, just ensure we
805 * delete it, otherwise we would insert a new file extent item
806 * with the same key (offset) as that 0 bytes length file
807 * extent item in the call to setup_items_for_insert() later
810 if (extent_end == key.offset && extent_end >= search_start) {
811 last_end = extent_end;
812 goto delete_extent_item;
815 if (extent_end <= search_start) {
821 search_start = max(key.offset, args->start);
822 if (recow || !modify_tree) {
824 btrfs_release_path(path);
829 * | - range to drop - |
830 * | -------- extent -------- |
832 if (args->start > key.offset && args->end < extent_end) {
834 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
839 memcpy(&new_key, &key, sizeof(new_key));
840 new_key.offset = args->start;
841 ret = btrfs_duplicate_item(trans, root, path,
843 if (ret == -EAGAIN) {
844 btrfs_release_path(path);
850 leaf = path->nodes[0];
851 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
852 struct btrfs_file_extent_item);
853 btrfs_set_file_extent_num_bytes(leaf, fi,
854 args->start - key.offset);
856 fi = btrfs_item_ptr(leaf, path->slots[0],
857 struct btrfs_file_extent_item);
859 extent_offset += args->start - key.offset;
860 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
861 btrfs_set_file_extent_num_bytes(leaf, fi,
862 extent_end - args->start);
863 btrfs_mark_buffer_dirty(leaf);
865 if (update_refs && disk_bytenr > 0) {
866 btrfs_init_generic_ref(&ref,
867 BTRFS_ADD_DELAYED_REF,
868 disk_bytenr, num_bytes, 0);
869 btrfs_init_data_ref(&ref,
870 root->root_key.objectid,
872 args->start - extent_offset,
874 ret = btrfs_inc_extent_ref(trans, &ref);
875 BUG_ON(ret); /* -ENOMEM */
877 key.offset = args->start;
880 * From here on out we will have actually dropped something, so
881 * last_end can be updated.
883 last_end = extent_end;
886 * | ---- range to drop ----- |
887 * | -------- extent -------- |
889 if (args->start <= key.offset && args->end < extent_end) {
890 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
895 memcpy(&new_key, &key, sizeof(new_key));
896 new_key.offset = args->end;
897 btrfs_set_item_key_safe(fs_info, path, &new_key);
899 extent_offset += args->end - key.offset;
900 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
901 btrfs_set_file_extent_num_bytes(leaf, fi,
902 extent_end - args->end);
903 btrfs_mark_buffer_dirty(leaf);
904 if (update_refs && disk_bytenr > 0)
905 args->bytes_found += args->end - key.offset;
909 search_start = extent_end;
911 * | ---- range to drop ----- |
912 * | -------- extent -------- |
914 if (args->start > key.offset && args->end >= extent_end) {
916 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
921 btrfs_set_file_extent_num_bytes(leaf, fi,
922 args->start - key.offset);
923 btrfs_mark_buffer_dirty(leaf);
924 if (update_refs && disk_bytenr > 0)
925 args->bytes_found += extent_end - args->start;
926 if (args->end == extent_end)
934 * | ---- range to drop ----- |
935 * | ------ extent ------ |
937 if (args->start <= key.offset && args->end >= extent_end) {
940 del_slot = path->slots[0];
943 BUG_ON(del_slot + del_nr != path->slots[0]);
948 extent_type == BTRFS_FILE_EXTENT_INLINE) {
949 args->bytes_found += extent_end - key.offset;
950 extent_end = ALIGN(extent_end,
951 fs_info->sectorsize);
952 } else if (update_refs && disk_bytenr > 0) {
953 btrfs_init_generic_ref(&ref,
954 BTRFS_DROP_DELAYED_REF,
955 disk_bytenr, num_bytes, 0);
956 btrfs_init_data_ref(&ref,
957 root->root_key.objectid,
959 key.offset - extent_offset, 0,
961 ret = btrfs_free_extent(trans, &ref);
962 BUG_ON(ret); /* -ENOMEM */
963 args->bytes_found += extent_end - key.offset;
966 if (args->end == extent_end)
969 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
974 ret = btrfs_del_items(trans, root, path, del_slot,
977 btrfs_abort_transaction(trans, ret);
984 btrfs_release_path(path);
991 if (!ret && del_nr > 0) {
993 * Set path->slots[0] to first slot, so that after the delete
994 * if items are move off from our leaf to its immediate left or
995 * right neighbor leafs, we end up with a correct and adjusted
996 * path->slots[0] for our insertion (if args->replace_extent).
998 path->slots[0] = del_slot;
999 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1001 btrfs_abort_transaction(trans, ret);
1004 leaf = path->nodes[0];
1006 * If btrfs_del_items() was called, it might have deleted a leaf, in
1007 * which case it unlocked our path, so check path->locks[0] matches a
1010 if (!ret && args->replace_extent && leafs_visited == 1 &&
1011 path->locks[0] == BTRFS_WRITE_LOCK &&
1012 btrfs_leaf_free_space(leaf) >=
1013 sizeof(struct btrfs_item) + args->extent_item_size) {
1016 key.type = BTRFS_EXTENT_DATA_KEY;
1017 key.offset = args->start;
1018 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1019 struct btrfs_key slot_key;
1021 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1022 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1025 setup_items_for_insert(root, path, &key,
1026 &args->extent_item_size, 1);
1027 args->extent_inserted = true;
1031 btrfs_free_path(path);
1032 else if (!args->extent_inserted)
1033 btrfs_release_path(path);
1035 args->drop_end = found ? min(args->end, last_end) : args->end;
1040 static int extent_mergeable(struct extent_buffer *leaf, int slot,
1041 u64 objectid, u64 bytenr, u64 orig_offset,
1042 u64 *start, u64 *end)
1044 struct btrfs_file_extent_item *fi;
1045 struct btrfs_key key;
1048 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1051 btrfs_item_key_to_cpu(leaf, &key, slot);
1052 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1055 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1056 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1057 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1058 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1059 btrfs_file_extent_compression(leaf, fi) ||
1060 btrfs_file_extent_encryption(leaf, fi) ||
1061 btrfs_file_extent_other_encoding(leaf, fi))
1064 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1065 if ((*start && *start != key.offset) || (*end && *end != extent_end))
1068 *start = key.offset;
1074 * Mark extent in the range start - end as written.
1076 * This changes extent type from 'pre-allocated' to 'regular'. If only
1077 * part of extent is marked as written, the extent will be split into
1080 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1081 struct btrfs_inode *inode, u64 start, u64 end)
1083 struct btrfs_fs_info *fs_info = trans->fs_info;
1084 struct btrfs_root *root = inode->root;
1085 struct extent_buffer *leaf;
1086 struct btrfs_path *path;
1087 struct btrfs_file_extent_item *fi;
1088 struct btrfs_ref ref = { 0 };
1089 struct btrfs_key key;
1090 struct btrfs_key new_key;
1102 u64 ino = btrfs_ino(inode);
1104 path = btrfs_alloc_path();
1111 key.type = BTRFS_EXTENT_DATA_KEY;
1114 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1117 if (ret > 0 && path->slots[0] > 0)
1120 leaf = path->nodes[0];
1121 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1122 if (key.objectid != ino ||
1123 key.type != BTRFS_EXTENT_DATA_KEY) {
1125 btrfs_abort_transaction(trans, ret);
1128 fi = btrfs_item_ptr(leaf, path->slots[0],
1129 struct btrfs_file_extent_item);
1130 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1132 btrfs_abort_transaction(trans, ret);
1135 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1136 if (key.offset > start || extent_end < end) {
1138 btrfs_abort_transaction(trans, ret);
1142 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1143 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1144 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1145 memcpy(&new_key, &key, sizeof(new_key));
1147 if (start == key.offset && end < extent_end) {
1150 if (extent_mergeable(leaf, path->slots[0] - 1,
1151 ino, bytenr, orig_offset,
1152 &other_start, &other_end)) {
1153 new_key.offset = end;
1154 btrfs_set_item_key_safe(fs_info, path, &new_key);
1155 fi = btrfs_item_ptr(leaf, path->slots[0],
1156 struct btrfs_file_extent_item);
1157 btrfs_set_file_extent_generation(leaf, fi,
1159 btrfs_set_file_extent_num_bytes(leaf, fi,
1161 btrfs_set_file_extent_offset(leaf, fi,
1163 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1164 struct btrfs_file_extent_item);
1165 btrfs_set_file_extent_generation(leaf, fi,
1167 btrfs_set_file_extent_num_bytes(leaf, fi,
1169 btrfs_mark_buffer_dirty(leaf);
1174 if (start > key.offset && end == extent_end) {
1177 if (extent_mergeable(leaf, path->slots[0] + 1,
1178 ino, bytenr, orig_offset,
1179 &other_start, &other_end)) {
1180 fi = btrfs_item_ptr(leaf, path->slots[0],
1181 struct btrfs_file_extent_item);
1182 btrfs_set_file_extent_num_bytes(leaf, fi,
1183 start - key.offset);
1184 btrfs_set_file_extent_generation(leaf, fi,
1187 new_key.offset = start;
1188 btrfs_set_item_key_safe(fs_info, path, &new_key);
1190 fi = btrfs_item_ptr(leaf, path->slots[0],
1191 struct btrfs_file_extent_item);
1192 btrfs_set_file_extent_generation(leaf, fi,
1194 btrfs_set_file_extent_num_bytes(leaf, fi,
1196 btrfs_set_file_extent_offset(leaf, fi,
1197 start - orig_offset);
1198 btrfs_mark_buffer_dirty(leaf);
1203 while (start > key.offset || end < extent_end) {
1204 if (key.offset == start)
1207 new_key.offset = split;
1208 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1209 if (ret == -EAGAIN) {
1210 btrfs_release_path(path);
1214 btrfs_abort_transaction(trans, ret);
1218 leaf = path->nodes[0];
1219 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1220 struct btrfs_file_extent_item);
1221 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1222 btrfs_set_file_extent_num_bytes(leaf, fi,
1223 split - key.offset);
1225 fi = btrfs_item_ptr(leaf, path->slots[0],
1226 struct btrfs_file_extent_item);
1228 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1229 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1230 btrfs_set_file_extent_num_bytes(leaf, fi,
1231 extent_end - split);
1232 btrfs_mark_buffer_dirty(leaf);
1234 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1236 btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1237 orig_offset, 0, false);
1238 ret = btrfs_inc_extent_ref(trans, &ref);
1240 btrfs_abort_transaction(trans, ret);
1244 if (split == start) {
1247 if (start != key.offset) {
1249 btrfs_abort_transaction(trans, ret);
1260 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1262 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
1264 if (extent_mergeable(leaf, path->slots[0] + 1,
1265 ino, bytenr, orig_offset,
1266 &other_start, &other_end)) {
1268 btrfs_release_path(path);
1271 extent_end = other_end;
1272 del_slot = path->slots[0] + 1;
1274 ret = btrfs_free_extent(trans, &ref);
1276 btrfs_abort_transaction(trans, ret);
1282 if (extent_mergeable(leaf, path->slots[0] - 1,
1283 ino, bytenr, orig_offset,
1284 &other_start, &other_end)) {
1286 btrfs_release_path(path);
1289 key.offset = other_start;
1290 del_slot = path->slots[0];
1292 ret = btrfs_free_extent(trans, &ref);
1294 btrfs_abort_transaction(trans, ret);
1299 fi = btrfs_item_ptr(leaf, path->slots[0],
1300 struct btrfs_file_extent_item);
1301 btrfs_set_file_extent_type(leaf, fi,
1302 BTRFS_FILE_EXTENT_REG);
1303 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1304 btrfs_mark_buffer_dirty(leaf);
1306 fi = btrfs_item_ptr(leaf, del_slot - 1,
1307 struct btrfs_file_extent_item);
1308 btrfs_set_file_extent_type(leaf, fi,
1309 BTRFS_FILE_EXTENT_REG);
1310 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1311 btrfs_set_file_extent_num_bytes(leaf, fi,
1312 extent_end - key.offset);
1313 btrfs_mark_buffer_dirty(leaf);
1315 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1317 btrfs_abort_transaction(trans, ret);
1322 btrfs_free_path(path);
1327 * on error we return an unlocked page and the error value
1328 * on success we return a locked page and 0
1330 static int prepare_uptodate_page(struct inode *inode,
1331 struct page *page, u64 pos,
1332 bool force_uptodate)
1336 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1337 !PageUptodate(page)) {
1338 ret = btrfs_readpage(NULL, page);
1342 if (!PageUptodate(page)) {
1348 * Since btrfs_readpage() will unlock the page before it
1349 * returns, there is a window where btrfs_releasepage() can be
1350 * called to release the page. Here we check both inode
1351 * mapping and PagePrivate() to make sure the page was not
1354 * The private flag check is essential for subpage as we need
1355 * to store extra bitmap using page->private.
1357 if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
1366 * this just gets pages into the page cache and locks them down.
1368 static noinline int prepare_pages(struct inode *inode, struct page **pages,
1369 size_t num_pages, loff_t pos,
1370 size_t write_bytes, bool force_uptodate)
1373 unsigned long index = pos >> PAGE_SHIFT;
1374 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1378 for (i = 0; i < num_pages; i++) {
1380 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1381 mask | __GFP_WRITE);
1388 err = set_page_extent_mapped(pages[i]);
1395 err = prepare_uptodate_page(inode, pages[i], pos,
1397 if (!err && i == num_pages - 1)
1398 err = prepare_uptodate_page(inode, pages[i],
1399 pos + write_bytes, false);
1402 if (err == -EAGAIN) {
1409 wait_on_page_writeback(pages[i]);
1414 while (faili >= 0) {
1415 unlock_page(pages[faili]);
1416 put_page(pages[faili]);
1424 * This function locks the extent and properly waits for data=ordered extents
1425 * to finish before allowing the pages to be modified if need.
1428 * 1 - the extent is locked
1429 * 0 - the extent is not locked, and everything is OK
1430 * -EAGAIN - need re-prepare the pages
1431 * the other < 0 number - Something wrong happens
1434 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1435 size_t num_pages, loff_t pos,
1437 u64 *lockstart, u64 *lockend,
1438 struct extent_state **cached_state)
1440 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1446 start_pos = round_down(pos, fs_info->sectorsize);
1447 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
1449 if (start_pos < inode->vfs_inode.i_size) {
1450 struct btrfs_ordered_extent *ordered;
1452 lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1454 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1455 last_pos - start_pos + 1);
1457 ordered->file_offset + ordered->num_bytes > start_pos &&
1458 ordered->file_offset <= last_pos) {
1459 unlock_extent_cached(&inode->io_tree, start_pos,
1460 last_pos, cached_state);
1461 for (i = 0; i < num_pages; i++) {
1462 unlock_page(pages[i]);
1465 btrfs_start_ordered_extent(ordered, 1);
1466 btrfs_put_ordered_extent(ordered);
1470 btrfs_put_ordered_extent(ordered);
1472 *lockstart = start_pos;
1473 *lockend = last_pos;
1478 * We should be called after prepare_pages() which should have locked
1479 * all pages in the range.
1481 for (i = 0; i < num_pages; i++)
1482 WARN_ON(!PageLocked(pages[i]));
1487 static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1488 size_t *write_bytes, bool nowait)
1490 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1491 struct btrfs_root *root = inode->root;
1492 u64 lockstart, lockend;
1496 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1499 if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
1502 lockstart = round_down(pos, fs_info->sectorsize);
1503 lockend = round_up(pos + *write_bytes,
1504 fs_info->sectorsize) - 1;
1505 num_bytes = lockend - lockstart + 1;
1508 struct btrfs_ordered_extent *ordered;
1510 if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
1513 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1516 btrfs_put_ordered_extent(ordered);
1521 btrfs_lock_and_flush_ordered_range(inode, lockstart,
1525 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1526 NULL, NULL, NULL, false);
1530 btrfs_drew_write_unlock(&root->snapshot_lock);
1532 *write_bytes = min_t(size_t, *write_bytes ,
1533 num_bytes - pos + lockstart);
1536 unlock_extent(&inode->io_tree, lockstart, lockend);
1541 static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
1542 size_t *write_bytes)
1544 return check_can_nocow(inode, pos, write_bytes, true);
1548 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1551 * @write_bytes: The length to write, will be updated to the nocow writeable
1554 * This function will flush ordered extents in the range to ensure proper
1558 * >0 and update @write_bytes if we can do nocow write
1559 * 0 if we can't do nocow write
1560 * -EAGAIN if we can't get the needed lock or there are ordered extents
1561 * for * (nowait == true) case
1562 * <0 if other error happened
1564 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
1566 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1567 size_t *write_bytes)
1569 return check_can_nocow(inode, pos, write_bytes, false);
1572 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1574 btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1577 static void update_time_for_write(struct inode *inode)
1579 struct timespec64 now;
1581 if (IS_NOCMTIME(inode))
1584 now = current_time(inode);
1585 if (!timespec64_equal(&inode->i_mtime, &now))
1586 inode->i_mtime = now;
1588 if (!timespec64_equal(&inode->i_ctime, &now))
1589 inode->i_ctime = now;
1591 if (IS_I_VERSION(inode))
1592 inode_inc_iversion(inode);
1595 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1598 struct file *file = iocb->ki_filp;
1599 struct inode *inode = file_inode(file);
1600 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1601 loff_t pos = iocb->ki_pos;
1606 if (iocb->ki_flags & IOCB_NOWAIT) {
1607 size_t nocow_bytes = count;
1609 /* We will allocate space in case nodatacow is not set, so bail */
1610 if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) <= 0)
1613 * There are holes in the range or parts of the range that must
1614 * be COWed (shared extents, RO block groups, etc), so just bail
1617 if (nocow_bytes < count)
1621 current->backing_dev_info = inode_to_bdi(inode);
1622 ret = file_remove_privs(file);
1627 * We reserve space for updating the inode when we reserve space for the
1628 * extent we are going to write, so we will enospc out there. We don't
1629 * need to start yet another transaction to update the inode as we will
1630 * update the inode when we finish writing whatever data we write.
1632 update_time_for_write(inode);
1634 start_pos = round_down(pos, fs_info->sectorsize);
1635 oldsize = i_size_read(inode);
1636 if (start_pos > oldsize) {
1637 /* Expand hole size to cover write data, preventing empty gap */
1638 loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1640 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1642 current->backing_dev_info = NULL;
1650 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1653 struct file *file = iocb->ki_filp;
1655 struct inode *inode = file_inode(file);
1656 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1657 struct page **pages = NULL;
1658 struct extent_changeset *data_reserved = NULL;
1659 u64 release_bytes = 0;
1662 size_t num_written = 0;
1665 bool only_release_metadata = false;
1666 bool force_page_uptodate = false;
1667 loff_t old_isize = i_size_read(inode);
1668 unsigned int ilock_flags = 0;
1670 if (iocb->ki_flags & IOCB_NOWAIT)
1671 ilock_flags |= BTRFS_ILOCK_TRY;
1673 ret = btrfs_inode_lock(inode, ilock_flags);
1677 ret = generic_write_checks(iocb, i);
1681 ret = btrfs_write_check(iocb, i, ret);
1686 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1687 PAGE_SIZE / (sizeof(struct page *)));
1688 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1689 nrptrs = max(nrptrs, 8);
1690 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1696 while (iov_iter_count(i) > 0) {
1697 struct extent_state *cached_state = NULL;
1698 size_t offset = offset_in_page(pos);
1699 size_t sector_offset;
1700 size_t write_bytes = min(iov_iter_count(i),
1701 nrptrs * (size_t)PAGE_SIZE -
1704 size_t reserve_bytes;
1707 size_t dirty_sectors;
1712 * Fault pages before locking them in prepare_pages
1713 * to avoid recursive lock
1715 if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1720 only_release_metadata = false;
1721 sector_offset = pos & (fs_info->sectorsize - 1);
1723 extent_changeset_release(data_reserved);
1724 ret = btrfs_check_data_free_space(BTRFS_I(inode),
1725 &data_reserved, pos,
1729 * If we don't have to COW at the offset, reserve
1730 * metadata only. write_bytes may get smaller than
1733 if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1735 only_release_metadata = true;
1740 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1741 WARN_ON(num_pages > nrptrs);
1742 reserve_bytes = round_up(write_bytes + sector_offset,
1743 fs_info->sectorsize);
1744 WARN_ON(reserve_bytes == 0);
1745 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1748 if (!only_release_metadata)
1749 btrfs_free_reserved_data_space(BTRFS_I(inode),
1753 btrfs_check_nocow_unlock(BTRFS_I(inode));
1757 release_bytes = reserve_bytes;
1760 * This is going to setup the pages array with the number of
1761 * pages we want, so we don't really need to worry about the
1762 * contents of pages from loop to loop
1764 ret = prepare_pages(inode, pages, num_pages,
1766 force_page_uptodate);
1768 btrfs_delalloc_release_extents(BTRFS_I(inode),
1773 extents_locked = lock_and_cleanup_extent_if_need(
1774 BTRFS_I(inode), pages,
1775 num_pages, pos, write_bytes, &lockstart,
1776 &lockend, &cached_state);
1777 if (extents_locked < 0) {
1778 if (extents_locked == -EAGAIN)
1780 btrfs_delalloc_release_extents(BTRFS_I(inode),
1782 ret = extents_locked;
1786 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1788 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1789 dirty_sectors = round_up(copied + sector_offset,
1790 fs_info->sectorsize);
1791 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1794 * if we have trouble faulting in the pages, fall
1795 * back to one page at a time
1797 if (copied < write_bytes)
1801 force_page_uptodate = true;
1805 force_page_uptodate = false;
1806 dirty_pages = DIV_ROUND_UP(copied + offset,
1810 if (num_sectors > dirty_sectors) {
1811 /* release everything except the sectors we dirtied */
1812 release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1813 if (only_release_metadata) {
1814 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1815 release_bytes, true);
1819 __pos = round_down(pos,
1820 fs_info->sectorsize) +
1821 (dirty_pages << PAGE_SHIFT);
1822 btrfs_delalloc_release_space(BTRFS_I(inode),
1823 data_reserved, __pos,
1824 release_bytes, true);
1828 release_bytes = round_up(copied + sector_offset,
1829 fs_info->sectorsize);
1831 ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1832 dirty_pages, pos, copied,
1833 &cached_state, only_release_metadata);
1836 * If we have not locked the extent range, because the range's
1837 * start offset is >= i_size, we might still have a non-NULL
1838 * cached extent state, acquired while marking the extent range
1839 * as delalloc through btrfs_dirty_pages(). Therefore free any
1840 * possible cached extent state to avoid a memory leak.
1843 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1844 lockstart, lockend, &cached_state);
1846 free_extent_state(cached_state);
1848 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1850 btrfs_drop_pages(pages, num_pages);
1855 if (only_release_metadata)
1856 btrfs_check_nocow_unlock(BTRFS_I(inode));
1858 btrfs_drop_pages(pages, num_pages);
1862 balance_dirty_pages_ratelimited(inode->i_mapping);
1865 num_written += copied;
1870 if (release_bytes) {
1871 if (only_release_metadata) {
1872 btrfs_check_nocow_unlock(BTRFS_I(inode));
1873 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1874 release_bytes, true);
1876 btrfs_delalloc_release_space(BTRFS_I(inode),
1878 round_down(pos, fs_info->sectorsize),
1879 release_bytes, true);
1883 extent_changeset_free(data_reserved);
1884 if (num_written > 0) {
1885 pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1886 iocb->ki_pos += num_written;
1889 btrfs_inode_unlock(inode, ilock_flags);
1890 return num_written ? num_written : ret;
1893 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1894 const struct iov_iter *iter, loff_t offset)
1896 const u32 blocksize_mask = fs_info->sectorsize - 1;
1898 if (offset & blocksize_mask)
1901 if (iov_iter_alignment(iter) & blocksize_mask)
1907 static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1909 const bool is_sync_write = (iocb->ki_flags & IOCB_DSYNC);
1910 struct file *file = iocb->ki_filp;
1911 struct inode *inode = file_inode(file);
1912 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1914 ssize_t written = 0;
1915 ssize_t written_buffered;
1916 size_t prev_left = 0;
1919 unsigned int ilock_flags = 0;
1921 if (iocb->ki_flags & IOCB_NOWAIT)
1922 ilock_flags |= BTRFS_ILOCK_TRY;
1924 /* If the write DIO is within EOF, use a shared lock */
1925 if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
1926 ilock_flags |= BTRFS_ILOCK_SHARED;
1929 err = btrfs_inode_lock(inode, ilock_flags);
1933 err = generic_write_checks(iocb, from);
1935 btrfs_inode_unlock(inode, ilock_flags);
1939 err = btrfs_write_check(iocb, from, err);
1941 btrfs_inode_unlock(inode, ilock_flags);
1947 * Re-check since file size may have changed just before taking the
1948 * lock or pos may have changed because of O_APPEND in generic_write_check()
1950 if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1951 pos + iov_iter_count(from) > i_size_read(inode)) {
1952 btrfs_inode_unlock(inode, ilock_flags);
1953 ilock_flags &= ~BTRFS_ILOCK_SHARED;
1957 if (check_direct_IO(fs_info, from, pos)) {
1958 btrfs_inode_unlock(inode, ilock_flags);
1963 * We remove IOCB_DSYNC so that we don't deadlock when iomap_dio_rw()
1964 * calls generic_write_sync() (through iomap_dio_complete()), because
1965 * that results in calling fsync (btrfs_sync_file()) which will try to
1966 * lock the inode in exclusive/write mode.
1969 iocb->ki_flags &= ~IOCB_DSYNC;
1972 * The iov_iter can be mapped to the same file range we are writing to.
1973 * If that's the case, then we will deadlock in the iomap code, because
1974 * it first calls our callback btrfs_dio_iomap_begin(), which will create
1975 * an ordered extent, and after that it will fault in the pages that the
1976 * iov_iter refers to. During the fault in we end up in the readahead
1977 * pages code (starting at btrfs_readahead()), which will lock the range,
1978 * find that ordered extent and then wait for it to complete (at
1979 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1980 * obviously the ordered extent can never complete as we didn't submit
1981 * yet the respective bio(s). This always happens when the buffer is
1982 * memory mapped to the same file range, since the iomap DIO code always
1983 * invalidates pages in the target file range (after starting and waiting
1984 * for any writeback).
1986 * So here we disable page faults in the iov_iter and then retry if we
1987 * got -EFAULT, faulting in the pages before the retry.
1990 from->nofault = true;
1991 err = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
1992 IOMAP_DIO_PARTIAL, written);
1993 from->nofault = false;
1995 /* No increment (+=) because iomap returns a cumulative value. */
1999 if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
2000 const size_t left = iov_iter_count(from);
2002 * We have more data left to write. Try to fault in as many as
2003 * possible of the remainder pages and retry. We do this without
2004 * releasing and locking again the inode, to prevent races with
2007 * Also, in case the iov refers to pages in the file range of the
2008 * file we want to write to (due to a mmap), we could enter an
2009 * infinite loop if we retry after faulting the pages in, since
2010 * iomap will invalidate any pages in the range early on, before
2011 * it tries to fault in the pages of the iov. So we keep track of
2012 * how much was left of iov in the previous EFAULT and fallback
2013 * to buffered IO in case we haven't made any progress.
2015 if (left == prev_left) {
2018 fault_in_iov_iter_readable(from, left);
2024 btrfs_inode_unlock(inode, ilock_flags);
2027 * Add back IOCB_DSYNC. Our caller, btrfs_file_write_iter(), will do
2028 * the fsync (call generic_write_sync()).
2031 iocb->ki_flags |= IOCB_DSYNC;
2033 /* If 'err' is -ENOTBLK then it means we must fallback to buffered IO. */
2034 if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
2039 written_buffered = btrfs_buffered_write(iocb, from);
2040 if (written_buffered < 0) {
2041 err = written_buffered;
2045 * Ensure all data is persisted. We want the next direct IO read to be
2046 * able to read what was just written.
2048 endbyte = pos + written_buffered - 1;
2049 err = btrfs_fdatawrite_range(inode, pos, endbyte);
2052 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
2055 written += written_buffered;
2056 iocb->ki_pos = pos + written_buffered;
2057 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
2058 endbyte >> PAGE_SHIFT);
2060 return err < 0 ? err : written;
2063 static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
2064 struct iov_iter *from)
2066 struct file *file = iocb->ki_filp;
2067 struct btrfs_inode *inode = BTRFS_I(file_inode(file));
2068 ssize_t num_written = 0;
2069 const bool sync = iocb->ki_flags & IOCB_DSYNC;
2072 * If the fs flips readonly due to some impossible error, although we
2073 * have opened a file as writable, we have to stop this write operation
2074 * to ensure consistency.
2076 if (test_bit(BTRFS_FS_STATE_ERROR, &inode->root->fs_info->fs_state))
2079 if (!(iocb->ki_flags & IOCB_DIRECT) &&
2080 (iocb->ki_flags & IOCB_NOWAIT))
2084 atomic_inc(&inode->sync_writers);
2086 if (iocb->ki_flags & IOCB_DIRECT)
2087 num_written = btrfs_direct_write(iocb, from);
2089 num_written = btrfs_buffered_write(iocb, from);
2091 btrfs_set_inode_last_sub_trans(inode);
2093 if (num_written > 0)
2094 num_written = generic_write_sync(iocb, num_written);
2097 atomic_dec(&inode->sync_writers);
2099 current->backing_dev_info = NULL;
2103 int btrfs_release_file(struct inode *inode, struct file *filp)
2105 struct btrfs_file_private *private = filp->private_data;
2107 if (private && private->filldir_buf)
2108 kfree(private->filldir_buf);
2110 filp->private_data = NULL;
2113 * Set by setattr when we are about to truncate a file from a non-zero
2114 * size to a zero size. This tries to flush down new bytes that may
2115 * have been written if the application were using truncate to replace
2118 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
2119 &BTRFS_I(inode)->runtime_flags))
2120 filemap_flush(inode->i_mapping);
2124 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2127 struct blk_plug plug;
2130 * This is only called in fsync, which would do synchronous writes, so
2131 * a plug can merge adjacent IOs as much as possible. Esp. in case of
2132 * multiple disks using raid profile, a large IO can be split to
2133 * several segments of stripe length (currently 64K).
2135 blk_start_plug(&plug);
2136 atomic_inc(&BTRFS_I(inode)->sync_writers);
2137 ret = btrfs_fdatawrite_range(inode, start, end);
2138 atomic_dec(&BTRFS_I(inode)->sync_writers);
2139 blk_finish_plug(&plug);
2144 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
2146 struct btrfs_inode *inode = BTRFS_I(ctx->inode);
2147 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2149 if (btrfs_inode_in_log(inode, fs_info->generation) &&
2150 list_empty(&ctx->ordered_extents))
2154 * If we are doing a fast fsync we can not bail out if the inode's
2155 * last_trans is <= then the last committed transaction, because we only
2156 * update the last_trans of the inode during ordered extent completion,
2157 * and for a fast fsync we don't wait for that, we only wait for the
2158 * writeback to complete.
2160 if (inode->last_trans <= fs_info->last_trans_committed &&
2161 (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
2162 list_empty(&ctx->ordered_extents)))
2169 * fsync call for both files and directories. This logs the inode into
2170 * the tree log instead of forcing full commits whenever possible.
2172 * It needs to call filemap_fdatawait so that all ordered extent updates are
2173 * in the metadata btree are up to date for copying to the log.
2175 * It drops the inode mutex before doing the tree log commit. This is an
2176 * important optimization for directories because holding the mutex prevents
2177 * new operations on the dir while we write to disk.
2179 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2181 struct dentry *dentry = file_dentry(file);
2182 struct inode *inode = d_inode(dentry);
2183 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2184 struct btrfs_root *root = BTRFS_I(inode)->root;
2185 struct btrfs_trans_handle *trans;
2186 struct btrfs_log_ctx ctx;
2191 trace_btrfs_sync_file(file, datasync);
2193 btrfs_init_log_ctx(&ctx, inode);
2196 * Always set the range to a full range, otherwise we can get into
2197 * several problems, from missing file extent items to represent holes
2198 * when not using the NO_HOLES feature, to log tree corruption due to
2199 * races between hole detection during logging and completion of ordered
2200 * extents outside the range, to missing checksums due to ordered extents
2201 * for which we flushed only a subset of their pages.
2205 len = (u64)LLONG_MAX + 1;
2208 * We write the dirty pages in the range and wait until they complete
2209 * out of the ->i_mutex. If so, we can flush the dirty pages by
2210 * multi-task, and make the performance up. See
2211 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2213 ret = start_ordered_ops(inode, start, end);
2217 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2219 atomic_inc(&root->log_batch);
2222 * Always check for the full sync flag while holding the inode's lock,
2223 * to avoid races with other tasks. The flag must be either set all the
2224 * time during logging or always off all the time while logging.
2226 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2227 &BTRFS_I(inode)->runtime_flags);
2230 * Before we acquired the inode's lock and the mmap lock, someone may
2231 * have dirtied more pages in the target range. We need to make sure
2232 * that writeback for any such pages does not start while we are logging
2233 * the inode, because if it does, any of the following might happen when
2234 * we are not doing a full inode sync:
2236 * 1) We log an extent after its writeback finishes but before its
2237 * checksums are added to the csum tree, leading to -EIO errors
2238 * when attempting to read the extent after a log replay.
2240 * 2) We can end up logging an extent before its writeback finishes.
2241 * Therefore after the log replay we will have a file extent item
2242 * pointing to an unwritten extent (and no data checksums as well).
2244 * So trigger writeback for any eventual new dirty pages and then we
2245 * wait for all ordered extents to complete below.
2247 ret = start_ordered_ops(inode, start, end);
2249 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2254 * We have to do this here to avoid the priority inversion of waiting on
2255 * IO of a lower priority task while holding a transaction open.
2257 * For a full fsync we wait for the ordered extents to complete while
2258 * for a fast fsync we wait just for writeback to complete, and then
2259 * attach the ordered extents to the transaction so that a transaction
2260 * commit waits for their completion, to avoid data loss if we fsync,
2261 * the current transaction commits before the ordered extents complete
2262 * and a power failure happens right after that.
2264 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
2265 * logical address recorded in the ordered extent may change. We need
2266 * to wait for the IO to stabilize the logical address.
2268 if (full_sync || btrfs_is_zoned(fs_info)) {
2269 ret = btrfs_wait_ordered_range(inode, start, len);
2272 * Get our ordered extents as soon as possible to avoid doing
2273 * checksum lookups in the csum tree, and use instead the
2274 * checksums attached to the ordered extents.
2276 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
2277 &ctx.ordered_extents);
2278 ret = filemap_fdatawait_range(inode->i_mapping, start, end);
2282 goto out_release_extents;
2284 atomic_inc(&root->log_batch);
2287 if (skip_inode_logging(&ctx)) {
2289 * We've had everything committed since the last time we were
2290 * modified so clear this flag in case it was set for whatever
2291 * reason, it's no longer relevant.
2293 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2294 &BTRFS_I(inode)->runtime_flags);
2296 * An ordered extent might have started before and completed
2297 * already with io errors, in which case the inode was not
2298 * updated and we end up here. So check the inode's mapping
2299 * for any errors that might have happened since we last
2300 * checked called fsync.
2302 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2303 goto out_release_extents;
2307 * We use start here because we will need to wait on the IO to complete
2308 * in btrfs_sync_log, which could require joining a transaction (for
2309 * example checking cross references in the nocow path). If we use join
2310 * here we could get into a situation where we're waiting on IO to
2311 * happen that is blocked on a transaction trying to commit. With start
2312 * we inc the extwriter counter, so we wait for all extwriters to exit
2313 * before we start blocking joiners. This comment is to keep somebody
2314 * from thinking they are super smart and changing this to
2315 * btrfs_join_transaction *cough*Josef*cough*.
2317 trans = btrfs_start_transaction(root, 0);
2318 if (IS_ERR(trans)) {
2319 ret = PTR_ERR(trans);
2320 goto out_release_extents;
2322 trans->in_fsync = true;
2324 ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
2325 btrfs_release_log_ctx_extents(&ctx);
2327 /* Fallthrough and commit/free transaction. */
2331 /* we've logged all the items and now have a consistent
2332 * version of the file in the log. It is possible that
2333 * someone will come in and modify the file, but that's
2334 * fine because the log is consistent on disk, and we
2335 * have references to all of the file's extents
2337 * It is possible that someone will come in and log the
2338 * file again, but that will end up using the synchronization
2339 * inside btrfs_sync_log to keep things safe.
2341 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2343 if (ret == BTRFS_NO_LOG_SYNC) {
2344 ret = btrfs_end_transaction(trans);
2348 /* We successfully logged the inode, attempt to sync the log. */
2350 ret = btrfs_sync_log(trans, root, &ctx);
2352 ret = btrfs_end_transaction(trans);
2358 * At this point we need to commit the transaction because we had
2359 * btrfs_need_log_full_commit() or some other error.
2361 * If we didn't do a full sync we have to stop the trans handle, wait on
2362 * the ordered extents, start it again and commit the transaction. If
2363 * we attempt to wait on the ordered extents here we could deadlock with
2364 * something like fallocate() that is holding the extent lock trying to
2365 * start a transaction while some other thread is trying to commit the
2366 * transaction while we (fsync) are currently holding the transaction
2370 ret = btrfs_end_transaction(trans);
2373 ret = btrfs_wait_ordered_range(inode, start, len);
2378 * This is safe to use here because we're only interested in
2379 * making sure the transaction that had the ordered extents is
2380 * committed. We aren't waiting on anything past this point,
2381 * we're purely getting the transaction and committing it.
2383 trans = btrfs_attach_transaction_barrier(root);
2384 if (IS_ERR(trans)) {
2385 ret = PTR_ERR(trans);
2388 * We committed the transaction and there's no currently
2389 * running transaction, this means everything we care
2390 * about made it to disk and we are done.
2398 ret = btrfs_commit_transaction(trans);
2400 ASSERT(list_empty(&ctx.list));
2401 err = file_check_and_advance_wb_err(file);
2404 return ret > 0 ? -EIO : ret;
2406 out_release_extents:
2407 btrfs_release_log_ctx_extents(&ctx);
2408 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2412 static const struct vm_operations_struct btrfs_file_vm_ops = {
2413 .fault = filemap_fault,
2414 .map_pages = filemap_map_pages,
2415 .page_mkwrite = btrfs_page_mkwrite,
2418 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2420 struct address_space *mapping = filp->f_mapping;
2422 if (!mapping->a_ops->readpage)
2425 file_accessed(filp);
2426 vma->vm_ops = &btrfs_file_vm_ops;
2431 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2432 int slot, u64 start, u64 end)
2434 struct btrfs_file_extent_item *fi;
2435 struct btrfs_key key;
2437 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2440 btrfs_item_key_to_cpu(leaf, &key, slot);
2441 if (key.objectid != btrfs_ino(inode) ||
2442 key.type != BTRFS_EXTENT_DATA_KEY)
2445 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2447 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2450 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2453 if (key.offset == end)
2455 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2460 static int fill_holes(struct btrfs_trans_handle *trans,
2461 struct btrfs_inode *inode,
2462 struct btrfs_path *path, u64 offset, u64 end)
2464 struct btrfs_fs_info *fs_info = trans->fs_info;
2465 struct btrfs_root *root = inode->root;
2466 struct extent_buffer *leaf;
2467 struct btrfs_file_extent_item *fi;
2468 struct extent_map *hole_em;
2469 struct extent_map_tree *em_tree = &inode->extent_tree;
2470 struct btrfs_key key;
2473 if (btrfs_fs_incompat(fs_info, NO_HOLES))
2476 key.objectid = btrfs_ino(inode);
2477 key.type = BTRFS_EXTENT_DATA_KEY;
2478 key.offset = offset;
2480 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2483 * We should have dropped this offset, so if we find it then
2484 * something has gone horribly wrong.
2491 leaf = path->nodes[0];
2492 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2496 fi = btrfs_item_ptr(leaf, path->slots[0],
2497 struct btrfs_file_extent_item);
2498 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2500 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2501 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2502 btrfs_set_file_extent_offset(leaf, fi, 0);
2503 btrfs_mark_buffer_dirty(leaf);
2507 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2510 key.offset = offset;
2511 btrfs_set_item_key_safe(fs_info, path, &key);
2512 fi = btrfs_item_ptr(leaf, path->slots[0],
2513 struct btrfs_file_extent_item);
2514 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2516 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2517 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2518 btrfs_set_file_extent_offset(leaf, fi, 0);
2519 btrfs_mark_buffer_dirty(leaf);
2522 btrfs_release_path(path);
2524 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2525 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2530 btrfs_release_path(path);
2532 hole_em = alloc_extent_map();
2534 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2535 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2537 hole_em->start = offset;
2538 hole_em->len = end - offset;
2539 hole_em->ram_bytes = hole_em->len;
2540 hole_em->orig_start = offset;
2542 hole_em->block_start = EXTENT_MAP_HOLE;
2543 hole_em->block_len = 0;
2544 hole_em->orig_block_len = 0;
2545 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2546 hole_em->generation = trans->transid;
2549 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2550 write_lock(&em_tree->lock);
2551 ret = add_extent_mapping(em_tree, hole_em, 1);
2552 write_unlock(&em_tree->lock);
2553 } while (ret == -EEXIST);
2554 free_extent_map(hole_em);
2556 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2557 &inode->runtime_flags);
2564 * Find a hole extent on given inode and change start/len to the end of hole
2565 * extent.(hole/vacuum extent whose em->start <= start &&
2566 * em->start + em->len > start)
2567 * When a hole extent is found, return 1 and modify start/len.
2569 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2571 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2572 struct extent_map *em;
2575 em = btrfs_get_extent(inode, NULL, 0,
2576 round_down(*start, fs_info->sectorsize),
2577 round_up(*len, fs_info->sectorsize));
2581 /* Hole or vacuum extent(only exists in no-hole mode) */
2582 if (em->block_start == EXTENT_MAP_HOLE) {
2584 *len = em->start + em->len > *start + *len ?
2585 0 : *start + *len - em->start - em->len;
2586 *start = em->start + em->len;
2588 free_extent_map(em);
2592 static int btrfs_punch_hole_lock_range(struct inode *inode,
2593 const u64 lockstart,
2595 struct extent_state **cached_state)
2598 * For subpage case, if the range is not at page boundary, we could
2599 * have pages at the leading/tailing part of the range.
2600 * This could lead to dead loop since filemap_range_has_page()
2601 * will always return true.
2602 * So here we need to do extra page alignment for
2603 * filemap_range_has_page().
2605 const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2606 const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2609 struct btrfs_ordered_extent *ordered;
2612 truncate_pagecache_range(inode, lockstart, lockend);
2614 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2616 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
2620 * We need to make sure we have no ordered extents in this range
2621 * and nobody raced in and read a page in this range, if we did
2622 * we need to try again.
2625 (ordered->file_offset + ordered->num_bytes <= lockstart ||
2626 ordered->file_offset > lockend)) &&
2627 !filemap_range_has_page(inode->i_mapping,
2628 page_lockstart, page_lockend)) {
2630 btrfs_put_ordered_extent(ordered);
2634 btrfs_put_ordered_extent(ordered);
2635 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2636 lockend, cached_state);
2637 ret = btrfs_wait_ordered_range(inode, lockstart,
2638 lockend - lockstart + 1);
2645 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2646 struct btrfs_inode *inode,
2647 struct btrfs_path *path,
2648 struct btrfs_replace_extent_info *extent_info,
2649 const u64 replace_len,
2650 const u64 bytes_to_drop)
2652 struct btrfs_fs_info *fs_info = trans->fs_info;
2653 struct btrfs_root *root = inode->root;
2654 struct btrfs_file_extent_item *extent;
2655 struct extent_buffer *leaf;
2656 struct btrfs_key key;
2658 struct btrfs_ref ref = { 0 };
2661 if (replace_len == 0)
2664 if (extent_info->disk_offset == 0 &&
2665 btrfs_fs_incompat(fs_info, NO_HOLES)) {
2666 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2670 key.objectid = btrfs_ino(inode);
2671 key.type = BTRFS_EXTENT_DATA_KEY;
2672 key.offset = extent_info->file_offset;
2673 ret = btrfs_insert_empty_item(trans, root, path, &key,
2674 sizeof(struct btrfs_file_extent_item));
2677 leaf = path->nodes[0];
2678 slot = path->slots[0];
2679 write_extent_buffer(leaf, extent_info->extent_buf,
2680 btrfs_item_ptr_offset(leaf, slot),
2681 sizeof(struct btrfs_file_extent_item));
2682 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2683 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2684 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2685 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2686 if (extent_info->is_new_extent)
2687 btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2688 btrfs_mark_buffer_dirty(leaf);
2689 btrfs_release_path(path);
2691 ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2696 /* If it's a hole, nothing more needs to be done. */
2697 if (extent_info->disk_offset == 0) {
2698 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2702 btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2704 if (extent_info->is_new_extent && extent_info->insertions == 0) {
2705 key.objectid = extent_info->disk_offset;
2706 key.type = BTRFS_EXTENT_ITEM_KEY;
2707 key.offset = extent_info->disk_len;
2708 ret = btrfs_alloc_reserved_file_extent(trans, root,
2710 extent_info->file_offset,
2711 extent_info->qgroup_reserved,
2716 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2717 extent_info->disk_offset,
2718 extent_info->disk_len, 0);
2719 ref_offset = extent_info->file_offset - extent_info->data_offset;
2720 btrfs_init_data_ref(&ref, root->root_key.objectid,
2721 btrfs_ino(inode), ref_offset, 0, false);
2722 ret = btrfs_inc_extent_ref(trans, &ref);
2725 extent_info->insertions++;
2731 * The respective range must have been previously locked, as well as the inode.
2732 * The end offset is inclusive (last byte of the range).
2733 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2734 * the file range with an extent.
2735 * When not punching a hole, we don't want to end up in a state where we dropped
2736 * extents without inserting a new one, so we must abort the transaction to avoid
2739 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2740 struct btrfs_path *path, const u64 start,
2742 struct btrfs_replace_extent_info *extent_info,
2743 struct btrfs_trans_handle **trans_out)
2745 struct btrfs_drop_extents_args drop_args = { 0 };
2746 struct btrfs_root *root = inode->root;
2747 struct btrfs_fs_info *fs_info = root->fs_info;
2748 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2749 u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2750 struct btrfs_trans_handle *trans = NULL;
2751 struct btrfs_block_rsv *rsv;
2752 unsigned int rsv_count;
2754 u64 len = end - start;
2760 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2765 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2769 * 1 - update the inode
2770 * 1 - removing the extents in the range
2771 * 1 - adding the hole extent if no_holes isn't set or if we are
2772 * replacing the range with a new extent
2774 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2779 trans = btrfs_start_transaction(root, rsv_count);
2780 if (IS_ERR(trans)) {
2781 ret = PTR_ERR(trans);
2786 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2789 trans->block_rsv = rsv;
2792 drop_args.path = path;
2793 drop_args.end = end + 1;
2794 drop_args.drop_cache = true;
2795 while (cur_offset < end) {
2796 drop_args.start = cur_offset;
2797 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2798 /* If we are punching a hole decrement the inode's byte count */
2800 btrfs_update_inode_bytes(inode, 0,
2801 drop_args.bytes_found);
2802 if (ret != -ENOSPC) {
2804 * The only time we don't want to abort is if we are
2805 * attempting to clone a partial inline extent, in which
2806 * case we'll get EOPNOTSUPP. However if we aren't
2807 * clone we need to abort no matter what, because if we
2808 * got EOPNOTSUPP via prealloc then we messed up and
2812 (ret != -EOPNOTSUPP ||
2813 (extent_info && extent_info->is_new_extent)))
2814 btrfs_abort_transaction(trans, ret);
2818 trans->block_rsv = &fs_info->trans_block_rsv;
2820 if (!extent_info && cur_offset < drop_args.drop_end &&
2821 cur_offset < ino_size) {
2822 ret = fill_holes(trans, inode, path, cur_offset,
2823 drop_args.drop_end);
2826 * If we failed then we didn't insert our hole
2827 * entries for the area we dropped, so now the
2828 * fs is corrupted, so we must abort the
2831 btrfs_abort_transaction(trans, ret);
2834 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2836 * We are past the i_size here, but since we didn't
2837 * insert holes we need to clear the mapped area so we
2838 * know to not set disk_i_size in this area until a new
2839 * file extent is inserted here.
2841 ret = btrfs_inode_clear_file_extent_range(inode,
2843 drop_args.drop_end - cur_offset);
2846 * We couldn't clear our area, so we could
2847 * presumably adjust up and corrupt the fs, so
2850 btrfs_abort_transaction(trans, ret);
2856 drop_args.drop_end > extent_info->file_offset) {
2857 u64 replace_len = drop_args.drop_end -
2858 extent_info->file_offset;
2860 ret = btrfs_insert_replace_extent(trans, inode, path,
2861 extent_info, replace_len,
2862 drop_args.bytes_found);
2864 btrfs_abort_transaction(trans, ret);
2867 extent_info->data_len -= replace_len;
2868 extent_info->data_offset += replace_len;
2869 extent_info->file_offset += replace_len;
2872 ret = btrfs_update_inode(trans, root, inode);
2876 btrfs_end_transaction(trans);
2877 btrfs_btree_balance_dirty(fs_info);
2879 trans = btrfs_start_transaction(root, rsv_count);
2880 if (IS_ERR(trans)) {
2881 ret = PTR_ERR(trans);
2886 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2887 rsv, min_size, false);
2888 BUG_ON(ret); /* shouldn't happen */
2889 trans->block_rsv = rsv;
2891 cur_offset = drop_args.drop_end;
2892 len = end - cur_offset;
2893 if (!extent_info && len) {
2894 ret = find_first_non_hole(inode, &cur_offset, &len);
2895 if (unlikely(ret < 0))
2905 * If we were cloning, force the next fsync to be a full one since we
2906 * we replaced (or just dropped in the case of cloning holes when
2907 * NO_HOLES is enabled) file extent items and did not setup new extent
2908 * maps for the replacement extents (or holes).
2910 if (extent_info && !extent_info->is_new_extent)
2911 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2916 trans->block_rsv = &fs_info->trans_block_rsv;
2918 * If we are using the NO_HOLES feature we might have had already an
2919 * hole that overlaps a part of the region [lockstart, lockend] and
2920 * ends at (or beyond) lockend. Since we have no file extent items to
2921 * represent holes, drop_end can be less than lockend and so we must
2922 * make sure we have an extent map representing the existing hole (the
2923 * call to __btrfs_drop_extents() might have dropped the existing extent
2924 * map representing the existing hole), otherwise the fast fsync path
2925 * will not record the existence of the hole region
2926 * [existing_hole_start, lockend].
2928 if (drop_args.drop_end <= end)
2929 drop_args.drop_end = end + 1;
2931 * Don't insert file hole extent item if it's for a range beyond eof
2932 * (because it's useless) or if it represents a 0 bytes range (when
2933 * cur_offset == drop_end).
2935 if (!extent_info && cur_offset < ino_size &&
2936 cur_offset < drop_args.drop_end) {
2937 ret = fill_holes(trans, inode, path, cur_offset,
2938 drop_args.drop_end);
2940 /* Same comment as above. */
2941 btrfs_abort_transaction(trans, ret);
2944 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2945 /* See the comment in the loop above for the reasoning here. */
2946 ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2947 drop_args.drop_end - cur_offset);
2949 btrfs_abort_transaction(trans, ret);
2955 ret = btrfs_insert_replace_extent(trans, inode, path,
2956 extent_info, extent_info->data_len,
2957 drop_args.bytes_found);
2959 btrfs_abort_transaction(trans, ret);
2968 trans->block_rsv = &fs_info->trans_block_rsv;
2970 btrfs_end_transaction(trans);
2974 btrfs_free_block_rsv(fs_info, rsv);
2979 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2981 struct inode *inode = file_inode(file);
2982 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2983 struct btrfs_root *root = BTRFS_I(inode)->root;
2984 struct extent_state *cached_state = NULL;
2985 struct btrfs_path *path;
2986 struct btrfs_trans_handle *trans = NULL;
2991 u64 orig_start = offset;
2995 bool truncated_block = false;
2996 bool updated_inode = false;
2998 ret = btrfs_wait_ordered_range(inode, offset, len);
3002 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
3003 ino_size = round_up(inode->i_size, fs_info->sectorsize);
3004 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
3006 goto out_only_mutex;
3008 /* Already in a large hole */
3010 goto out_only_mutex;
3013 ret = file_modified(file);
3015 goto out_only_mutex;
3017 lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
3018 lockend = round_down(offset + len,
3019 btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
3020 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
3021 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
3023 * We needn't truncate any block which is beyond the end of the file
3024 * because we are sure there is no data there.
3027 * Only do this if we are in the same block and we aren't doing the
3030 if (same_block && len < fs_info->sectorsize) {
3031 if (offset < ino_size) {
3032 truncated_block = true;
3033 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3038 goto out_only_mutex;
3041 /* zero back part of the first block */
3042 if (offset < ino_size) {
3043 truncated_block = true;
3044 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
3046 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3051 /* Check the aligned pages after the first unaligned page,
3052 * if offset != orig_start, which means the first unaligned page
3053 * including several following pages are already in holes,
3054 * the extra check can be skipped */
3055 if (offset == orig_start) {
3056 /* after truncate page, check hole again */
3057 len = offset + len - lockstart;
3059 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
3061 goto out_only_mutex;
3064 goto out_only_mutex;
3069 /* Check the tail unaligned part is in a hole */
3070 tail_start = lockend + 1;
3071 tail_len = offset + len - tail_start;
3073 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
3074 if (unlikely(ret < 0))
3075 goto out_only_mutex;
3077 /* zero the front end of the last page */
3078 if (tail_start + tail_len < ino_size) {
3079 truncated_block = true;
3080 ret = btrfs_truncate_block(BTRFS_I(inode),
3081 tail_start + tail_len,
3084 goto out_only_mutex;
3089 if (lockend < lockstart) {
3091 goto out_only_mutex;
3094 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3097 goto out_only_mutex;
3099 path = btrfs_alloc_path();
3105 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
3106 lockend, NULL, &trans);
3107 btrfs_free_path(path);
3111 ASSERT(trans != NULL);
3112 inode_inc_iversion(inode);
3113 inode->i_mtime = inode->i_ctime = current_time(inode);
3114 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3115 updated_inode = true;
3116 btrfs_end_transaction(trans);
3117 btrfs_btree_balance_dirty(fs_info);
3119 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3122 if (!updated_inode && truncated_block && !ret) {
3124 * If we only end up zeroing part of a page, we still need to
3125 * update the inode item, so that all the time fields are
3126 * updated as well as the necessary btrfs inode in memory fields
3127 * for detecting, at fsync time, if the inode isn't yet in the
3128 * log tree or it's there but not up to date.
3130 struct timespec64 now = current_time(inode);
3132 inode_inc_iversion(inode);
3133 inode->i_mtime = now;
3134 inode->i_ctime = now;
3135 trans = btrfs_start_transaction(root, 1);
3136 if (IS_ERR(trans)) {
3137 ret = PTR_ERR(trans);
3141 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3142 ret2 = btrfs_end_transaction(trans);
3147 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3151 /* Helper structure to record which range is already reserved */
3152 struct falloc_range {
3153 struct list_head list;
3159 * Helper function to add falloc range
3161 * Caller should have locked the larger range of extent containing
3164 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
3166 struct falloc_range *range = NULL;
3168 if (!list_empty(head)) {
3170 * As fallocate iterates by bytenr order, we only need to check
3173 range = list_last_entry(head, struct falloc_range, list);
3174 if (range->start + range->len == start) {
3180 range = kmalloc(sizeof(*range), GFP_KERNEL);
3183 range->start = start;
3185 list_add_tail(&range->list, head);
3189 static int btrfs_fallocate_update_isize(struct inode *inode,
3193 struct btrfs_trans_handle *trans;
3194 struct btrfs_root *root = BTRFS_I(inode)->root;
3198 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
3201 trans = btrfs_start_transaction(root, 1);
3203 return PTR_ERR(trans);
3205 inode->i_ctime = current_time(inode);
3206 i_size_write(inode, end);
3207 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
3208 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3209 ret2 = btrfs_end_transaction(trans);
3211 return ret ? ret : ret2;
3215 RANGE_BOUNDARY_WRITTEN_EXTENT,
3216 RANGE_BOUNDARY_PREALLOC_EXTENT,
3217 RANGE_BOUNDARY_HOLE,
3220 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
3223 const u64 sectorsize = btrfs_inode_sectorsize(inode);
3224 struct extent_map *em;
3227 offset = round_down(offset, sectorsize);
3228 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
3232 if (em->block_start == EXTENT_MAP_HOLE)
3233 ret = RANGE_BOUNDARY_HOLE;
3234 else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3235 ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3237 ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3239 free_extent_map(em);
3243 static int btrfs_zero_range(struct inode *inode,
3248 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3249 struct extent_map *em;
3250 struct extent_changeset *data_reserved = NULL;
3253 const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode));
3254 u64 alloc_start = round_down(offset, sectorsize);
3255 u64 alloc_end = round_up(offset + len, sectorsize);
3256 u64 bytes_to_reserve = 0;
3257 bool space_reserved = false;
3259 inode_dio_wait(inode);
3261 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3262 alloc_end - alloc_start);
3269 * Avoid hole punching and extent allocation for some cases. More cases
3270 * could be considered, but these are unlikely common and we keep things
3271 * as simple as possible for now. Also, intentionally, if the target
3272 * range contains one or more prealloc extents together with regular
3273 * extents and holes, we drop all the existing extents and allocate a
3274 * new prealloc extent, so that we get a larger contiguous disk extent.
3276 if (em->start <= alloc_start &&
3277 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3278 const u64 em_end = em->start + em->len;
3280 if (em_end >= offset + len) {
3282 * The whole range is already a prealloc extent,
3283 * do nothing except updating the inode's i_size if
3286 free_extent_map(em);
3287 ret = btrfs_fallocate_update_isize(inode, offset + len,
3292 * Part of the range is already a prealloc extent, so operate
3293 * only on the remaining part of the range.
3295 alloc_start = em_end;
3296 ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3297 len = offset + len - alloc_start;
3298 offset = alloc_start;
3299 alloc_hint = em->block_start + em->len;
3301 free_extent_map(em);
3303 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3304 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3305 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3312 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3313 free_extent_map(em);
3314 ret = btrfs_fallocate_update_isize(inode, offset + len,
3318 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3319 free_extent_map(em);
3320 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3323 ret = btrfs_fallocate_update_isize(inode,
3328 free_extent_map(em);
3329 alloc_start = round_down(offset, sectorsize);
3330 alloc_end = alloc_start + sectorsize;
3334 alloc_start = round_up(offset, sectorsize);
3335 alloc_end = round_down(offset + len, sectorsize);
3338 * For unaligned ranges, check the pages at the boundaries, they might
3339 * map to an extent, in which case we need to partially zero them, or
3340 * they might map to a hole, in which case we need our allocation range
3343 if (!IS_ALIGNED(offset, sectorsize)) {
3344 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3348 if (ret == RANGE_BOUNDARY_HOLE) {
3349 alloc_start = round_down(offset, sectorsize);
3351 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3352 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
3360 if (!IS_ALIGNED(offset + len, sectorsize)) {
3361 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3365 if (ret == RANGE_BOUNDARY_HOLE) {
3366 alloc_end = round_up(offset + len, sectorsize);
3368 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3369 ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3379 if (alloc_start < alloc_end) {
3380 struct extent_state *cached_state = NULL;
3381 const u64 lockstart = alloc_start;
3382 const u64 lockend = alloc_end - 1;
3384 bytes_to_reserve = alloc_end - alloc_start;
3385 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3389 space_reserved = true;
3390 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3394 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3395 alloc_start, bytes_to_reserve);
3397 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3398 lockend, &cached_state);
3401 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3402 alloc_end - alloc_start,
3404 offset + len, &alloc_hint);
3405 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3406 lockend, &cached_state);
3407 /* btrfs_prealloc_file_range releases reserved space on error */
3409 space_reserved = false;
3413 ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3415 if (ret && space_reserved)
3416 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3417 alloc_start, bytes_to_reserve);
3418 extent_changeset_free(data_reserved);
3423 static long btrfs_fallocate(struct file *file, int mode,
3424 loff_t offset, loff_t len)
3426 struct inode *inode = file_inode(file);
3427 struct extent_state *cached_state = NULL;
3428 struct extent_changeset *data_reserved = NULL;
3429 struct falloc_range *range;
3430 struct falloc_range *tmp;
3431 struct list_head reserve_list;
3439 struct extent_map *em;
3440 int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode));
3443 /* Do not allow fallocate in ZONED mode */
3444 if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3447 alloc_start = round_down(offset, blocksize);
3448 alloc_end = round_up(offset + len, blocksize);
3449 cur_offset = alloc_start;
3451 /* Make sure we aren't being give some crap mode */
3452 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3453 FALLOC_FL_ZERO_RANGE))
3456 if (mode & FALLOC_FL_PUNCH_HOLE)
3457 return btrfs_punch_hole(file, offset, len);
3460 * Only trigger disk allocation, don't trigger qgroup reserve
3462 * For qgroup space, it will be checked later.
3464 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3465 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3466 alloc_end - alloc_start);
3471 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
3473 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3474 ret = inode_newsize_ok(inode, offset + len);
3479 ret = file_modified(file);
3484 * TODO: Move these two operations after we have checked
3485 * accurate reserved space, or fallocate can still fail but
3486 * with page truncated or size expanded.
3488 * But that's a minor problem and won't do much harm BTW.
3490 if (alloc_start > inode->i_size) {
3491 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3495 } else if (offset + len > inode->i_size) {
3497 * If we are fallocating from the end of the file onward we
3498 * need to zero out the end of the block if i_size lands in the
3499 * middle of a block.
3501 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3507 * wait for ordered IO before we have any locks. We'll loop again
3508 * below with the locks held.
3510 ret = btrfs_wait_ordered_range(inode, alloc_start,
3511 alloc_end - alloc_start);
3515 if (mode & FALLOC_FL_ZERO_RANGE) {
3516 ret = btrfs_zero_range(inode, offset, len, mode);
3517 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3521 locked_end = alloc_end - 1;
3523 struct btrfs_ordered_extent *ordered;
3525 /* the extent lock is ordered inside the running
3528 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3529 locked_end, &cached_state);
3530 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
3534 ordered->file_offset + ordered->num_bytes > alloc_start &&
3535 ordered->file_offset < alloc_end) {
3536 btrfs_put_ordered_extent(ordered);
3537 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3538 alloc_start, locked_end,
3541 * we can't wait on the range with the transaction
3542 * running or with the extent lock held
3544 ret = btrfs_wait_ordered_range(inode, alloc_start,
3545 alloc_end - alloc_start);
3550 btrfs_put_ordered_extent(ordered);
3555 /* First, check if we exceed the qgroup limit */
3556 INIT_LIST_HEAD(&reserve_list);
3557 while (cur_offset < alloc_end) {
3558 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3559 alloc_end - cur_offset);
3564 last_byte = min(extent_map_end(em), alloc_end);
3565 actual_end = min_t(u64, extent_map_end(em), offset + len);
3566 last_byte = ALIGN(last_byte, blocksize);
3567 if (em->block_start == EXTENT_MAP_HOLE ||
3568 (cur_offset >= inode->i_size &&
3569 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3570 ret = add_falloc_range(&reserve_list, cur_offset,
3571 last_byte - cur_offset);
3573 free_extent_map(em);
3576 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3577 &data_reserved, cur_offset,
3578 last_byte - cur_offset);
3580 cur_offset = last_byte;
3581 free_extent_map(em);
3586 * Do not need to reserve unwritten extent for this
3587 * range, free reserved data space first, otherwise
3588 * it'll result in false ENOSPC error.
3590 btrfs_free_reserved_data_space(BTRFS_I(inode),
3591 data_reserved, cur_offset,
3592 last_byte - cur_offset);
3594 free_extent_map(em);
3595 cur_offset = last_byte;
3599 * If ret is still 0, means we're OK to fallocate.
3600 * Or just cleanup the list and exit.
3602 list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3604 ret = btrfs_prealloc_file_range(inode, mode,
3606 range->len, i_blocksize(inode),
3607 offset + len, &alloc_hint);
3609 btrfs_free_reserved_data_space(BTRFS_I(inode),
3610 data_reserved, range->start,
3612 list_del(&range->list);
3619 * We didn't need to allocate any more space, but we still extended the
3620 * size of the file so we need to update i_size and the inode item.
3622 ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3624 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3627 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3628 /* Let go of our reservation. */
3629 if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3630 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3631 cur_offset, alloc_end - cur_offset);
3632 extent_changeset_free(data_reserved);
3636 static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
3639 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3640 struct extent_map *em = NULL;
3641 struct extent_state *cached_state = NULL;
3642 loff_t i_size = inode->vfs_inode.i_size;
3649 if (i_size == 0 || offset >= i_size)
3653 * offset can be negative, in this case we start finding DATA/HOLE from
3654 * the very start of the file.
3656 start = max_t(loff_t, 0, offset);
3658 lockstart = round_down(start, fs_info->sectorsize);
3659 lockend = round_up(i_size, fs_info->sectorsize);
3660 if (lockend <= lockstart)
3661 lockend = lockstart + fs_info->sectorsize;
3663 len = lockend - lockstart + 1;
3665 lock_extent_bits(&inode->io_tree, lockstart, lockend, &cached_state);
3667 while (start < i_size) {
3668 em = btrfs_get_extent_fiemap(inode, start, len);
3675 if (whence == SEEK_HOLE &&
3676 (em->block_start == EXTENT_MAP_HOLE ||
3677 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3679 else if (whence == SEEK_DATA &&
3680 (em->block_start != EXTENT_MAP_HOLE &&
3681 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3684 start = em->start + em->len;
3685 free_extent_map(em);
3689 free_extent_map(em);
3690 unlock_extent_cached(&inode->io_tree, lockstart, lockend,
3695 if (whence == SEEK_DATA && start >= i_size)
3698 offset = min_t(loff_t, start, i_size);
3704 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3706 struct inode *inode = file->f_mapping->host;
3710 return generic_file_llseek(file, offset, whence);
3713 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3714 offset = find_desired_extent(BTRFS_I(inode), offset, whence);
3715 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3722 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3725 static int btrfs_file_open(struct inode *inode, struct file *filp)
3729 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
3731 ret = fsverity_file_open(inode, filp);
3734 return generic_file_open(inode, filp);
3737 static int check_direct_read(struct btrfs_fs_info *fs_info,
3738 const struct iov_iter *iter, loff_t offset)
3743 ret = check_direct_IO(fs_info, iter, offset);
3747 if (!iter_is_iovec(iter))
3750 for (seg = 0; seg < iter->nr_segs; seg++)
3751 for (i = seg + 1; i < iter->nr_segs; i++)
3752 if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
3757 static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3759 struct inode *inode = file_inode(iocb->ki_filp);
3760 size_t prev_left = 0;
3764 if (fsverity_active(inode))
3767 if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3770 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3773 * This is similar to what we do for direct IO writes, see the comment
3774 * at btrfs_direct_write(), but we also disable page faults in addition
3775 * to disabling them only at the iov_iter level. This is because when
3776 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
3777 * which can still trigger page fault ins despite having set ->nofault
3778 * to true of our 'to' iov_iter.
3780 * The difference to direct IO writes is that we deadlock when trying
3781 * to lock the extent range in the inode's tree during he page reads
3782 * triggered by the fault in (while for writes it is due to waiting for
3783 * our own ordered extent). This is because for direct IO reads,
3784 * btrfs_dio_iomap_begin() returns with the extent range locked, which
3785 * is only unlocked in the endio callback (end_bio_extent_readpage()).
3787 pagefault_disable();
3789 ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
3790 IOMAP_DIO_PARTIAL, read);
3791 to->nofault = false;
3794 /* No increment (+=) because iomap returns a cumulative value. */
3798 if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
3799 const size_t left = iov_iter_count(to);
3801 if (left == prev_left) {
3803 * We didn't make any progress since the last attempt,
3804 * fallback to a buffered read for the remainder of the
3805 * range. This is just to avoid any possibility of looping
3811 * We made some progress since the last retry or this is
3812 * the first time we are retrying. Fault in as many pages
3813 * as possible and retry.
3815 fault_in_iov_iter_writeable(to, left);
3820 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3821 return ret < 0 ? ret : read;
3824 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3828 if (iocb->ki_flags & IOCB_DIRECT) {
3829 ret = btrfs_direct_read(iocb, to);
3830 if (ret < 0 || !iov_iter_count(to) ||
3831 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3835 return filemap_read(iocb, to, ret);
3838 const struct file_operations btrfs_file_operations = {
3839 .llseek = btrfs_file_llseek,
3840 .read_iter = btrfs_file_read_iter,
3841 .splice_read = generic_file_splice_read,
3842 .write_iter = btrfs_file_write_iter,
3843 .splice_write = iter_file_splice_write,
3844 .mmap = btrfs_file_mmap,
3845 .open = btrfs_file_open,
3846 .release = btrfs_release_file,
3847 .fsync = btrfs_sync_file,
3848 .fallocate = btrfs_fallocate,
3849 .unlocked_ioctl = btrfs_ioctl,
3850 #ifdef CONFIG_COMPAT
3851 .compat_ioctl = btrfs_compat_ioctl,
3853 .remap_file_range = btrfs_remap_file_range,
3856 void __cold btrfs_auto_defrag_exit(void)
3858 kmem_cache_destroy(btrfs_inode_defrag_cachep);
3861 int __init btrfs_auto_defrag_init(void)
3863 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3864 sizeof(struct inode_defrag), 0,
3867 if (!btrfs_inode_defrag_cachep)
3873 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3878 * So with compression we will find and lock a dirty page and clear the
3879 * first one as dirty, setup an async extent, and immediately return
3880 * with the entire range locked but with nobody actually marked with
3881 * writeback. So we can't just filemap_write_and_wait_range() and
3882 * expect it to work since it will just kick off a thread to do the
3883 * actual work. So we need to call filemap_fdatawrite_range _again_
3884 * since it will wait on the page lock, which won't be unlocked until
3885 * after the pages have been marked as writeback and so we're good to go
3886 * from there. We have to do this otherwise we'll miss the ordered
3887 * extents and that results in badness. Please Josef, do not think you
3888 * know better and pull this out at some point in the future, it is
3889 * right and you are wrong.
3891 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3892 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3893 &BTRFS_I(inode)->runtime_flags))
3894 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);