1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
47 #include "compression.h"
49 #include "free-space-cache.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
57 #include "inode-item.h"
59 #include "accessors.h"
60 #include "extent-tree.h"
61 #include "root-tree.h"
64 #include "file-item.h"
65 #include "uuid-tree.h"
69 #include "relocation.h"
75 struct btrfs_iget_args {
77 struct btrfs_root *root;
80 struct btrfs_dio_data {
82 struct extent_changeset *data_reserved;
83 struct btrfs_ordered_extent *ordered;
84 bool data_space_reserved;
88 struct btrfs_dio_private {
93 /* This must be last */
94 struct btrfs_bio bbio;
97 static struct bio_set btrfs_dio_bioset;
99 struct btrfs_rename_ctx {
100 /* Output field. Stores the index number of the old directory entry. */
105 * Used by data_reloc_print_warning_inode() to pass needed info for filename
106 * resolution and output of error message.
108 struct data_reloc_warn {
109 struct btrfs_path path;
110 struct btrfs_fs_info *fs_info;
111 u64 extent_item_size;
116 static const struct inode_operations btrfs_dir_inode_operations;
117 static const struct inode_operations btrfs_symlink_inode_operations;
118 static const struct inode_operations btrfs_special_inode_operations;
119 static const struct inode_operations btrfs_file_inode_operations;
120 static const struct address_space_operations btrfs_aops;
121 static const struct file_operations btrfs_dir_file_operations;
123 static struct kmem_cache *btrfs_inode_cachep;
125 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
126 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
128 static noinline int cow_file_range(struct btrfs_inode *inode,
129 struct page *locked_page,
130 u64 start, u64 end, u64 *done_offset,
131 bool keep_locked, bool no_inline);
132 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
133 u64 len, u64 orig_start, u64 block_start,
134 u64 block_len, u64 orig_block_len,
135 u64 ram_bytes, int compress_type,
138 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
139 u64 root, void *warn_ctx)
141 struct data_reloc_warn *warn = warn_ctx;
142 struct btrfs_fs_info *fs_info = warn->fs_info;
143 struct extent_buffer *eb;
144 struct btrfs_inode_item *inode_item;
145 struct inode_fs_paths *ipath = NULL;
146 struct btrfs_root *local_root;
147 struct btrfs_key key;
148 unsigned int nofs_flag;
152 local_root = btrfs_get_fs_root(fs_info, root, true);
153 if (IS_ERR(local_root)) {
154 ret = PTR_ERR(local_root);
158 /* This makes the path point to (inum INODE_ITEM ioff). */
160 key.type = BTRFS_INODE_ITEM_KEY;
163 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
165 btrfs_put_root(local_root);
166 btrfs_release_path(&warn->path);
170 eb = warn->path.nodes[0];
171 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
172 nlink = btrfs_inode_nlink(eb, inode_item);
173 btrfs_release_path(&warn->path);
175 nofs_flag = memalloc_nofs_save();
176 ipath = init_ipath(4096, local_root, &warn->path);
177 memalloc_nofs_restore(nofs_flag);
179 btrfs_put_root(local_root);
180 ret = PTR_ERR(ipath);
183 * -ENOMEM, not a critical error, just output an generic error
187 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
188 warn->logical, warn->mirror_num, root, inum, offset);
191 ret = paths_from_inode(inum, ipath);
196 * We deliberately ignore the bit ipath might have been too small to
197 * hold all of the paths here
199 for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
201 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
202 warn->logical, warn->mirror_num, root, inum, offset,
203 fs_info->sectorsize, nlink,
204 (char *)(unsigned long)ipath->fspath->val[i]);
207 btrfs_put_root(local_root);
213 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
214 warn->logical, warn->mirror_num, root, inum, offset, ret);
221 * Do extra user-friendly error output (e.g. lookup all the affected files).
223 * Return true if we succeeded doing the backref lookup.
224 * Return false if such lookup failed, and has to fallback to the old error message.
226 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
227 const u8 *csum, const u8 *csum_expected,
230 struct btrfs_fs_info *fs_info = inode->root->fs_info;
231 struct btrfs_path path = { 0 };
232 struct btrfs_key found_key = { 0 };
233 struct extent_buffer *eb;
234 struct btrfs_extent_item *ei;
235 const u32 csum_size = fs_info->csum_size;
241 mutex_lock(&fs_info->reloc_mutex);
242 logical = btrfs_get_reloc_bg_bytenr(fs_info);
243 mutex_unlock(&fs_info->reloc_mutex);
245 if (logical == U64_MAX) {
246 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
247 btrfs_warn_rl(fs_info,
248 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
249 inode->root->root_key.objectid, btrfs_ino(inode), file_off,
250 CSUM_FMT_VALUE(csum_size, csum),
251 CSUM_FMT_VALUE(csum_size, csum_expected),
257 btrfs_warn_rl(fs_info,
258 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
259 inode->root->root_key.objectid,
260 btrfs_ino(inode), file_off, logical,
261 CSUM_FMT_VALUE(csum_size, csum),
262 CSUM_FMT_VALUE(csum_size, csum_expected),
265 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
267 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
272 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
273 item_size = btrfs_item_size(eb, path.slots[0]);
274 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
275 unsigned long ptr = 0;
280 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
281 item_size, &ref_root,
284 btrfs_warn_rl(fs_info,
285 "failed to resolve tree backref for logical %llu: %d",
292 btrfs_warn_rl(fs_info,
293 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
295 (ref_level ? "node" : "leaf"),
296 ref_level, ref_root);
298 btrfs_release_path(&path);
300 struct btrfs_backref_walk_ctx ctx = { 0 };
301 struct data_reloc_warn reloc_warn = { 0 };
303 btrfs_release_path(&path);
305 ctx.bytenr = found_key.objectid;
306 ctx.extent_item_pos = logical - found_key.objectid;
307 ctx.fs_info = fs_info;
309 reloc_warn.logical = logical;
310 reloc_warn.extent_item_size = found_key.offset;
311 reloc_warn.mirror_num = mirror_num;
312 reloc_warn.fs_info = fs_info;
314 iterate_extent_inodes(&ctx, true,
315 data_reloc_print_warning_inode, &reloc_warn);
319 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
320 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
322 struct btrfs_root *root = inode->root;
323 const u32 csum_size = root->fs_info->csum_size;
325 /* For data reloc tree, it's better to do a backref lookup instead. */
326 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
327 return print_data_reloc_error(inode, logical_start, csum,
328 csum_expected, mirror_num);
330 /* Output without objectid, which is more meaningful */
331 if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) {
332 btrfs_warn_rl(root->fs_info,
333 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
334 root->root_key.objectid, btrfs_ino(inode),
336 CSUM_FMT_VALUE(csum_size, csum),
337 CSUM_FMT_VALUE(csum_size, csum_expected),
340 btrfs_warn_rl(root->fs_info,
341 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
342 root->root_key.objectid, btrfs_ino(inode),
344 CSUM_FMT_VALUE(csum_size, csum),
345 CSUM_FMT_VALUE(csum_size, csum_expected),
351 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
353 * ilock_flags can have the following bit set:
355 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
356 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
358 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
360 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
362 if (ilock_flags & BTRFS_ILOCK_SHARED) {
363 if (ilock_flags & BTRFS_ILOCK_TRY) {
364 if (!inode_trylock_shared(&inode->vfs_inode))
369 inode_lock_shared(&inode->vfs_inode);
371 if (ilock_flags & BTRFS_ILOCK_TRY) {
372 if (!inode_trylock(&inode->vfs_inode))
377 inode_lock(&inode->vfs_inode);
379 if (ilock_flags & BTRFS_ILOCK_MMAP)
380 down_write(&inode->i_mmap_lock);
385 * btrfs_inode_unlock - unock inode i_rwsem
387 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
388 * to decide whether the lock acquired is shared or exclusive.
390 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
392 if (ilock_flags & BTRFS_ILOCK_MMAP)
393 up_write(&inode->i_mmap_lock);
394 if (ilock_flags & BTRFS_ILOCK_SHARED)
395 inode_unlock_shared(&inode->vfs_inode);
397 inode_unlock(&inode->vfs_inode);
401 * Cleanup all submitted ordered extents in specified range to handle errors
402 * from the btrfs_run_delalloc_range() callback.
404 * NOTE: caller must ensure that when an error happens, it can not call
405 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
406 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
407 * to be released, which we want to happen only when finishing the ordered
408 * extent (btrfs_finish_ordered_io()).
410 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
411 struct page *locked_page,
412 u64 offset, u64 bytes)
414 unsigned long index = offset >> PAGE_SHIFT;
415 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
416 u64 page_start = 0, page_end = 0;
420 page_start = page_offset(locked_page);
421 page_end = page_start + PAGE_SIZE - 1;
424 while (index <= end_index) {
426 * For locked page, we will call btrfs_mark_ordered_io_finished
427 * through btrfs_mark_ordered_io_finished() on it
428 * in run_delalloc_range() for the error handling, which will
429 * clear page Ordered and run the ordered extent accounting.
431 * Here we can't just clear the Ordered bit, or
432 * btrfs_mark_ordered_io_finished() would skip the accounting
433 * for the page range, and the ordered extent will never finish.
435 if (locked_page && index == (page_start >> PAGE_SHIFT)) {
439 page = find_get_page(inode->vfs_inode.i_mapping, index);
445 * Here we just clear all Ordered bits for every page in the
446 * range, then btrfs_mark_ordered_io_finished() will handle
447 * the ordered extent accounting for the range.
449 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page,
455 /* The locked page covers the full range, nothing needs to be done */
456 if (bytes + offset <= page_start + PAGE_SIZE)
459 * In case this page belongs to the delalloc range being
460 * instantiated then skip it, since the first page of a range is
461 * going to be properly cleaned up by the caller of
464 if (page_start >= offset && page_end <= (offset + bytes - 1)) {
465 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
466 offset = page_offset(locked_page) + PAGE_SIZE;
470 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
473 static int btrfs_dirty_inode(struct btrfs_inode *inode);
475 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
476 struct btrfs_new_inode_args *args)
480 if (args->default_acl) {
481 err = __btrfs_set_acl(trans, args->inode, args->default_acl,
487 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
491 if (!args->default_acl && !args->acl)
492 cache_no_acl(args->inode);
493 return btrfs_xattr_security_init(trans, args->inode, args->dir,
494 &args->dentry->d_name);
498 * this does all the hard work for inserting an inline extent into
499 * the btree. The caller should have done a btrfs_drop_extents so that
500 * no overlapping inline items exist in the btree
502 static int insert_inline_extent(struct btrfs_trans_handle *trans,
503 struct btrfs_path *path,
504 struct btrfs_inode *inode, bool extent_inserted,
505 size_t size, size_t compressed_size,
507 struct page **compressed_pages,
510 struct btrfs_root *root = inode->root;
511 struct extent_buffer *leaf;
512 struct page *page = NULL;
515 struct btrfs_file_extent_item *ei;
517 size_t cur_size = size;
520 ASSERT((compressed_size > 0 && compressed_pages) ||
521 (compressed_size == 0 && !compressed_pages));
523 if (compressed_size && compressed_pages)
524 cur_size = compressed_size;
526 if (!extent_inserted) {
527 struct btrfs_key key;
530 key.objectid = btrfs_ino(inode);
532 key.type = BTRFS_EXTENT_DATA_KEY;
534 datasize = btrfs_file_extent_calc_inline_size(cur_size);
535 ret = btrfs_insert_empty_item(trans, root, path, &key,
540 leaf = path->nodes[0];
541 ei = btrfs_item_ptr(leaf, path->slots[0],
542 struct btrfs_file_extent_item);
543 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
544 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
545 btrfs_set_file_extent_encryption(leaf, ei, 0);
546 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
547 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
548 ptr = btrfs_file_extent_inline_start(ei);
550 if (compress_type != BTRFS_COMPRESS_NONE) {
553 while (compressed_size > 0) {
554 cpage = compressed_pages[i];
555 cur_size = min_t(unsigned long, compressed_size,
558 kaddr = kmap_local_page(cpage);
559 write_extent_buffer(leaf, kaddr, ptr, cur_size);
564 compressed_size -= cur_size;
566 btrfs_set_file_extent_compression(leaf, ei,
569 page = find_get_page(inode->vfs_inode.i_mapping, 0);
570 btrfs_set_file_extent_compression(leaf, ei, 0);
571 kaddr = kmap_local_page(page);
572 write_extent_buffer(leaf, kaddr, ptr, size);
576 btrfs_mark_buffer_dirty(leaf);
577 btrfs_release_path(path);
580 * We align size to sectorsize for inline extents just for simplicity
583 ret = btrfs_inode_set_file_extent_range(inode, 0,
584 ALIGN(size, root->fs_info->sectorsize));
589 * We're an inline extent, so nobody can extend the file past i_size
590 * without locking a page we already have locked.
592 * We must do any i_size and inode updates before we unlock the pages.
593 * Otherwise we could end up racing with unlink.
595 i_size = i_size_read(&inode->vfs_inode);
596 if (update_i_size && size > i_size) {
597 i_size_write(&inode->vfs_inode, size);
600 inode->disk_i_size = i_size;
608 * conditionally insert an inline extent into the file. This
609 * does the checks required to make sure the data is small enough
610 * to fit as an inline extent.
612 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
613 size_t compressed_size,
615 struct page **compressed_pages,
618 struct btrfs_drop_extents_args drop_args = { 0 };
619 struct btrfs_root *root = inode->root;
620 struct btrfs_fs_info *fs_info = root->fs_info;
621 struct btrfs_trans_handle *trans;
622 u64 data_len = (compressed_size ?: size);
624 struct btrfs_path *path;
627 * We can create an inline extent if it ends at or beyond the current
628 * i_size, is no larger than a sector (decompressed), and the (possibly
629 * compressed) data fits in a leaf and the configured maximum inline
632 if (size < i_size_read(&inode->vfs_inode) ||
633 size > fs_info->sectorsize ||
634 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
635 data_len > fs_info->max_inline)
638 path = btrfs_alloc_path();
642 trans = btrfs_join_transaction(root);
644 btrfs_free_path(path);
645 return PTR_ERR(trans);
647 trans->block_rsv = &inode->block_rsv;
649 drop_args.path = path;
651 drop_args.end = fs_info->sectorsize;
652 drop_args.drop_cache = true;
653 drop_args.replace_extent = true;
654 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
655 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
657 btrfs_abort_transaction(trans, ret);
661 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
662 size, compressed_size, compress_type,
663 compressed_pages, update_i_size);
664 if (ret && ret != -ENOSPC) {
665 btrfs_abort_transaction(trans, ret);
667 } else if (ret == -ENOSPC) {
672 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
673 ret = btrfs_update_inode(trans, root, inode);
674 if (ret && ret != -ENOSPC) {
675 btrfs_abort_transaction(trans, ret);
677 } else if (ret == -ENOSPC) {
682 btrfs_set_inode_full_sync(inode);
685 * Don't forget to free the reserved space, as for inlined extent
686 * it won't count as data extent, free them directly here.
687 * And at reserve time, it's always aligned to page size, so
688 * just free one page here.
690 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
691 btrfs_free_path(path);
692 btrfs_end_transaction(trans);
696 struct async_extent {
701 unsigned long nr_pages;
703 struct list_head list;
707 struct btrfs_inode *inode;
708 struct page *locked_page;
711 blk_opf_t write_flags;
712 struct list_head extents;
713 struct cgroup_subsys_state *blkcg_css;
714 struct btrfs_work work;
715 struct async_cow *async_cow;
720 struct async_chunk chunks[];
723 static noinline int add_async_extent(struct async_chunk *cow,
724 u64 start, u64 ram_size,
727 unsigned long nr_pages,
730 struct async_extent *async_extent;
732 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
733 BUG_ON(!async_extent); /* -ENOMEM */
734 async_extent->start = start;
735 async_extent->ram_size = ram_size;
736 async_extent->compressed_size = compressed_size;
737 async_extent->pages = pages;
738 async_extent->nr_pages = nr_pages;
739 async_extent->compress_type = compress_type;
740 list_add_tail(&async_extent->list, &cow->extents);
745 * Check if the inode needs to be submitted to compression, based on mount
746 * options, defragmentation, properties or heuristics.
748 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
751 struct btrfs_fs_info *fs_info = inode->root->fs_info;
753 if (!btrfs_inode_can_compress(inode)) {
754 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
755 KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
760 * Special check for subpage.
762 * We lock the full page then run each delalloc range in the page, thus
763 * for the following case, we will hit some subpage specific corner case:
766 * | |///////| |///////|
769 * In above case, both range A and range B will try to unlock the full
770 * page [0, 64K), causing the one finished later will have page
771 * unlocked already, triggering various page lock requirement BUG_ON()s.
773 * So here we add an artificial limit that subpage compression can only
774 * if the range is fully page aligned.
776 * In theory we only need to ensure the first page is fully covered, but
777 * the tailing partial page will be locked until the full compression
778 * finishes, delaying the write of other range.
780 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
781 * first to prevent any submitted async extent to unlock the full page.
782 * By this, we can ensure for subpage case that only the last async_cow
783 * will unlock the full page.
785 if (fs_info->sectorsize < PAGE_SIZE) {
786 if (!PAGE_ALIGNED(start) ||
787 !PAGE_ALIGNED(end + 1))
792 if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
795 if (inode->defrag_compress)
797 /* bad compression ratios */
798 if (inode->flags & BTRFS_INODE_NOCOMPRESS)
800 if (btrfs_test_opt(fs_info, COMPRESS) ||
801 inode->flags & BTRFS_INODE_COMPRESS ||
802 inode->prop_compress)
803 return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
807 static inline void inode_should_defrag(struct btrfs_inode *inode,
808 u64 start, u64 end, u64 num_bytes, u32 small_write)
810 /* If this is a small write inside eof, kick off a defrag */
811 if (num_bytes < small_write &&
812 (start > 0 || end + 1 < inode->disk_i_size))
813 btrfs_add_inode_defrag(NULL, inode, small_write);
817 * Work queue call back to started compression on a file and pages.
819 * This is done inside an ordered work queue, and the compression is spread
820 * across many cpus. The actual IO submission is step two, and the ordered work
821 * queue takes care of making sure that happens in the same order things were
822 * put onto the queue by writepages and friends.
824 * If this code finds it can't get good compression, it puts an entry onto the
825 * work queue to write the uncompressed bytes. This makes sure that both
826 * compressed inodes and uncompressed inodes are written in the same order that
827 * the flusher thread sent them down.
829 static void compress_file_range(struct btrfs_work *work)
831 struct async_chunk *async_chunk =
832 container_of(work, struct async_chunk, work);
833 struct btrfs_inode *inode = async_chunk->inode;
834 struct btrfs_fs_info *fs_info = inode->root->fs_info;
835 struct address_space *mapping = inode->vfs_inode.i_mapping;
836 u64 blocksize = fs_info->sectorsize;
837 u64 start = async_chunk->start;
838 u64 end = async_chunk->end;
843 unsigned long nr_pages;
844 unsigned long total_compressed = 0;
845 unsigned long total_in = 0;
848 int compress_type = fs_info->compress_type;
850 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
853 * We need to call clear_page_dirty_for_io on each page in the range.
854 * Otherwise applications with the file mmap'd can wander in and change
855 * the page contents while we are compressing them.
857 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
860 * We need to save i_size before now because it could change in between
861 * us evaluating the size and assigning it. This is because we lock and
862 * unlock the page in truncate and fallocate, and then modify the i_size
865 * The barriers are to emulate READ_ONCE, remove that once i_size_read
869 i_size = i_size_read(&inode->vfs_inode);
871 actual_end = min_t(u64, i_size, end + 1);
874 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
875 nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES);
878 * we don't want to send crud past the end of i_size through
879 * compression, that's just a waste of CPU time. So, if the
880 * end of the file is before the start of our current
881 * requested range of bytes, we bail out to the uncompressed
882 * cleanup code that can deal with all of this.
884 * It isn't really the fastest way to fix things, but this is a
885 * very uncommon corner.
887 if (actual_end <= start)
888 goto cleanup_and_bail_uncompressed;
890 total_compressed = actual_end - start;
893 * Skip compression for a small file range(<=blocksize) that
894 * isn't an inline extent, since it doesn't save disk space at all.
896 if (total_compressed <= blocksize &&
897 (start > 0 || end + 1 < inode->disk_i_size))
898 goto cleanup_and_bail_uncompressed;
901 * For subpage case, we require full page alignment for the sector
903 * Thus we must also check against @actual_end, not just @end.
905 if (blocksize < PAGE_SIZE) {
906 if (!PAGE_ALIGNED(start) ||
907 !PAGE_ALIGNED(round_up(actual_end, blocksize)))
908 goto cleanup_and_bail_uncompressed;
911 total_compressed = min_t(unsigned long, total_compressed,
912 BTRFS_MAX_UNCOMPRESSED);
917 * We do compression for mount -o compress and when the inode has not
918 * been flagged as NOCOMPRESS. This flag can change at any time if we
919 * discover bad compression ratios.
921 if (!inode_need_compress(inode, start, end))
922 goto cleanup_and_bail_uncompressed;
924 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
927 * Memory allocation failure is not a fatal error, we can fall
928 * back to uncompressed code.
930 goto cleanup_and_bail_uncompressed;
933 if (inode->defrag_compress)
934 compress_type = inode->defrag_compress;
935 else if (inode->prop_compress)
936 compress_type = inode->prop_compress;
938 /* Compression level is applied here. */
939 ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
940 mapping, start, pages, &nr_pages, &total_in,
943 goto mark_incompressible;
946 * Zero the tail end of the last page, as we might be sending it down
949 poff = offset_in_page(total_compressed);
951 memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff);
954 * Try to create an inline extent.
956 * If we didn't compress the entire range, try to create an uncompressed
957 * inline extent, else a compressed one.
959 * Check cow_file_range() for why we don't even try to create inline
960 * extent for the subpage case.
962 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
963 if (total_in < actual_end) {
964 ret = cow_file_range_inline(inode, actual_end, 0,
965 BTRFS_COMPRESS_NONE, NULL,
968 ret = cow_file_range_inline(inode, actual_end,
970 compress_type, pages,
974 unsigned long clear_flags = EXTENT_DELALLOC |
975 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
976 EXTENT_DO_ACCOUNTING;
979 mapping_set_error(mapping, -EIO);
982 * inline extent creation worked or returned error,
983 * we don't need to create any more async work items.
984 * Unlock and free up our temp pages.
986 * We use DO_ACCOUNTING here because we need the
987 * delalloc_release_metadata to be done _after_ we drop
988 * our outstanding extent for clearing delalloc for this
991 extent_clear_unlock_delalloc(inode, start, end,
995 PAGE_START_WRITEBACK |
1002 * We aren't doing an inline extent. Round the compressed size up to a
1003 * block size boundary so the allocator does sane things.
1005 total_compressed = ALIGN(total_compressed, blocksize);
1008 * One last check to make sure the compression is really a win, compare
1009 * the page count read with the blocks on disk, compression must free at
1012 total_in = round_up(total_in, fs_info->sectorsize);
1013 if (total_compressed + blocksize > total_in)
1014 goto mark_incompressible;
1017 * The async work queues will take care of doing actual allocation on
1018 * disk for these compressed pages, and will submit the bios.
1020 add_async_extent(async_chunk, start, total_in, total_compressed, pages,
1021 nr_pages, compress_type);
1022 if (start + total_in < end) {
1029 mark_incompressible:
1030 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1031 inode->flags |= BTRFS_INODE_NOCOMPRESS;
1032 cleanup_and_bail_uncompressed:
1033 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
1034 BTRFS_COMPRESS_NONE);
1037 for (i = 0; i < nr_pages; i++) {
1038 WARN_ON(pages[i]->mapping);
1045 static void free_async_extent_pages(struct async_extent *async_extent)
1049 if (!async_extent->pages)
1052 for (i = 0; i < async_extent->nr_pages; i++) {
1053 WARN_ON(async_extent->pages[i]->mapping);
1054 put_page(async_extent->pages[i]);
1056 kfree(async_extent->pages);
1057 async_extent->nr_pages = 0;
1058 async_extent->pages = NULL;
1061 static void submit_uncompressed_range(struct btrfs_inode *inode,
1062 struct async_extent *async_extent,
1063 struct page *locked_page)
1065 u64 start = async_extent->start;
1066 u64 end = async_extent->start + async_extent->ram_size - 1;
1068 struct writeback_control wbc = {
1069 .sync_mode = WB_SYNC_ALL,
1070 .range_start = start,
1072 .no_cgroup_owner = 1,
1076 * Call cow_file_range() to run the delalloc range directly, since we
1077 * won't go to NOCOW or async path again.
1079 * Also we call cow_file_range() with @unlock_page == 0, so that we
1080 * can directly submit them without interruption.
1082 ret = cow_file_range(inode, locked_page, start, end, NULL, true, false);
1083 /* Inline extent inserted, page gets unlocked and everything is done */
1088 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
1090 const u64 page_start = page_offset(locked_page);
1092 set_page_writeback(locked_page);
1093 end_page_writeback(locked_page);
1094 btrfs_mark_ordered_io_finished(inode, locked_page,
1095 page_start, PAGE_SIZE,
1097 btrfs_page_clear_uptodate(inode->root->fs_info,
1098 locked_page, page_start,
1100 mapping_set_error(locked_page->mapping, ret);
1101 unlock_page(locked_page);
1106 /* All pages will be unlocked, including @locked_page */
1107 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1108 extent_write_locked_range(&inode->vfs_inode, NULL, start, end, &wbc,
1110 wbc_detach_inode(&wbc);
1113 static void submit_one_async_extent(struct async_chunk *async_chunk,
1114 struct async_extent *async_extent,
1117 struct btrfs_inode *inode = async_chunk->inode;
1118 struct extent_io_tree *io_tree = &inode->io_tree;
1119 struct btrfs_root *root = inode->root;
1120 struct btrfs_fs_info *fs_info = root->fs_info;
1121 struct btrfs_ordered_extent *ordered;
1122 struct btrfs_key ins;
1123 struct page *locked_page = NULL;
1124 struct extent_map *em;
1126 u64 start = async_extent->start;
1127 u64 end = async_extent->start + async_extent->ram_size - 1;
1129 if (async_chunk->blkcg_css)
1130 kthread_associate_blkcg(async_chunk->blkcg_css);
1133 * If async_chunk->locked_page is in the async_extent range, we need to
1136 if (async_chunk->locked_page) {
1137 u64 locked_page_start = page_offset(async_chunk->locked_page);
1138 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
1140 if (!(start >= locked_page_end || end <= locked_page_start))
1141 locked_page = async_chunk->locked_page;
1143 lock_extent(io_tree, start, end, NULL);
1145 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
1146 submit_uncompressed_range(inode, async_extent, locked_page);
1150 ret = btrfs_reserve_extent(root, async_extent->ram_size,
1151 async_extent->compressed_size,
1152 async_extent->compressed_size,
1153 0, *alloc_hint, &ins, 1, 1);
1156 * Here we used to try again by going back to non-compressed
1157 * path for ENOSPC. But we can't reserve space even for
1158 * compressed size, how could it work for uncompressed size
1159 * which requires larger size? So here we directly go error
1165 /* Here we're doing allocation and writeback of the compressed pages */
1166 em = create_io_em(inode, start,
1167 async_extent->ram_size, /* len */
1168 start, /* orig_start */
1169 ins.objectid, /* block_start */
1170 ins.offset, /* block_len */
1171 ins.offset, /* orig_block_len */
1172 async_extent->ram_size, /* ram_bytes */
1173 async_extent->compress_type,
1174 BTRFS_ORDERED_COMPRESSED);
1177 goto out_free_reserve;
1179 free_extent_map(em);
1181 ordered = btrfs_alloc_ordered_extent(inode, start, /* file_offset */
1182 async_extent->ram_size, /* num_bytes */
1183 async_extent->ram_size, /* ram_bytes */
1184 ins.objectid, /* disk_bytenr */
1185 ins.offset, /* disk_num_bytes */
1187 1 << BTRFS_ORDERED_COMPRESSED,
1188 async_extent->compress_type);
1189 if (IS_ERR(ordered)) {
1190 btrfs_drop_extent_map_range(inode, start, end, false);
1191 ret = PTR_ERR(ordered);
1192 goto out_free_reserve;
1194 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1196 /* Clear dirty, set writeback and unlock the pages. */
1197 extent_clear_unlock_delalloc(inode, start, end,
1198 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
1199 PAGE_UNLOCK | PAGE_START_WRITEBACK);
1200 btrfs_submit_compressed_write(ordered,
1201 async_extent->pages, /* compressed_pages */
1202 async_extent->nr_pages,
1203 async_chunk->write_flags, true);
1204 *alloc_hint = ins.objectid + ins.offset;
1206 if (async_chunk->blkcg_css)
1207 kthread_associate_blkcg(NULL);
1208 kfree(async_extent);
1212 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1213 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1215 mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1216 extent_clear_unlock_delalloc(inode, start, end,
1217 NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
1218 EXTENT_DELALLOC_NEW |
1219 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1220 PAGE_UNLOCK | PAGE_START_WRITEBACK |
1221 PAGE_END_WRITEBACK);
1222 free_async_extent_pages(async_extent);
1223 if (async_chunk->blkcg_css)
1224 kthread_associate_blkcg(NULL);
1225 btrfs_debug(fs_info,
1226 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1227 root->root_key.objectid, btrfs_ino(inode), start,
1228 async_extent->ram_size, ret);
1229 kfree(async_extent);
1232 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1235 struct extent_map_tree *em_tree = &inode->extent_tree;
1236 struct extent_map *em;
1239 read_lock(&em_tree->lock);
1240 em = search_extent_mapping(em_tree, start, num_bytes);
1243 * if block start isn't an actual block number then find the
1244 * first block in this inode and use that as a hint. If that
1245 * block is also bogus then just don't worry about it.
1247 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1248 free_extent_map(em);
1249 em = search_extent_mapping(em_tree, 0, 0);
1250 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
1251 alloc_hint = em->block_start;
1253 free_extent_map(em);
1255 alloc_hint = em->block_start;
1256 free_extent_map(em);
1259 read_unlock(&em_tree->lock);
1265 * when extent_io.c finds a delayed allocation range in the file,
1266 * the call backs end up in this code. The basic idea is to
1267 * allocate extents on disk for the range, and create ordered data structs
1268 * in ram to track those extents.
1270 * locked_page is the page that writepage had locked already. We use
1271 * it to make sure we don't do extra locks or unlocks.
1273 * When this function fails, it unlocks all pages except @locked_page.
1275 * When this function successfully creates an inline extent, it returns 1 and
1276 * unlocks all pages including locked_page and starts I/O on them.
1277 * (In reality inline extents are limited to a single page, so locked_page is
1278 * the only page handled anyway).
1280 * When this function succeed and creates a normal extent, the page locking
1281 * status depends on the passed in flags:
1283 * - If @keep_locked is set, all pages are kept locked.
1284 * - Else all pages except for @locked_page are unlocked.
1286 * When a failure happens in the second or later iteration of the
1287 * while-loop, the ordered extents created in previous iterations are kept
1288 * intact. So, the caller must clean them up by calling
1289 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1292 static noinline int cow_file_range(struct btrfs_inode *inode,
1293 struct page *locked_page, u64 start, u64 end,
1295 bool keep_locked, bool no_inline)
1297 struct btrfs_root *root = inode->root;
1298 struct btrfs_fs_info *fs_info = root->fs_info;
1300 u64 orig_start = start;
1302 unsigned long ram_size;
1303 u64 cur_alloc_size = 0;
1305 u64 blocksize = fs_info->sectorsize;
1306 struct btrfs_key ins;
1307 struct extent_map *em;
1308 unsigned clear_bits;
1309 unsigned long page_ops;
1310 bool extent_reserved = false;
1313 if (btrfs_is_free_space_inode(inode)) {
1318 num_bytes = ALIGN(end - start + 1, blocksize);
1319 num_bytes = max(blocksize, num_bytes);
1320 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1322 inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1325 * Due to the page size limit, for subpage we can only trigger the
1326 * writeback for the dirty sectors of page, that means data writeback
1327 * is doing more writeback than what we want.
1329 * This is especially unexpected for some call sites like fallocate,
1330 * where we only increase i_size after everything is done.
1331 * This means we can trigger inline extent even if we didn't want to.
1332 * So here we skip inline extent creation completely.
1334 if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) {
1335 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
1338 /* lets try to make an inline extent */
1339 ret = cow_file_range_inline(inode, actual_end, 0,
1340 BTRFS_COMPRESS_NONE, NULL, false);
1343 * We use DO_ACCOUNTING here because we need the
1344 * delalloc_release_metadata to be run _after_ we drop
1345 * our outstanding extent for clearing delalloc for this
1348 extent_clear_unlock_delalloc(inode, start, end,
1350 EXTENT_LOCKED | EXTENT_DELALLOC |
1351 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1352 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1353 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
1355 * locked_page is locked by the caller of
1356 * writepage_delalloc(), not locked by
1357 * __process_pages_contig().
1359 * We can't let __process_pages_contig() to unlock it,
1360 * as it doesn't have any subpage::writers recorded.
1362 * Here we manually unlock the page, since the caller
1363 * can't determine if it's an inline extent or a
1364 * compressed extent.
1366 unlock_page(locked_page);
1369 } else if (ret < 0) {
1374 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1377 * Relocation relies on the relocated extents to have exactly the same
1378 * size as the original extents. Normally writeback for relocation data
1379 * extents follows a NOCOW path because relocation preallocates the
1380 * extents. However, due to an operation such as scrub turning a block
1381 * group to RO mode, it may fallback to COW mode, so we must make sure
1382 * an extent allocated during COW has exactly the requested size and can
1383 * not be split into smaller extents, otherwise relocation breaks and
1384 * fails during the stage where it updates the bytenr of file extent
1387 if (btrfs_is_data_reloc_root(root))
1388 min_alloc_size = num_bytes;
1390 min_alloc_size = fs_info->sectorsize;
1392 while (num_bytes > 0) {
1393 struct btrfs_ordered_extent *ordered;
1395 cur_alloc_size = num_bytes;
1396 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1397 min_alloc_size, 0, alloc_hint,
1399 if (ret == -EAGAIN) {
1401 * btrfs_reserve_extent only returns -EAGAIN for zoned
1402 * file systems, which is an indication that there are
1403 * no active zones to allocate from at the moment.
1405 * If this is the first loop iteration, wait for at
1406 * least one zone to finish before retrying the
1407 * allocation. Otherwise ask the caller to write out
1408 * the already allocated blocks before coming back to
1409 * us, or return -ENOSPC if it can't handle retries.
1411 ASSERT(btrfs_is_zoned(fs_info));
1412 if (start == orig_start) {
1413 wait_on_bit_io(&inode->root->fs_info->flags,
1414 BTRFS_FS_NEED_ZONE_FINISH,
1415 TASK_UNINTERRUPTIBLE);
1419 *done_offset = start - 1;
1426 cur_alloc_size = ins.offset;
1427 extent_reserved = true;
1429 ram_size = ins.offset;
1430 em = create_io_em(inode, start, ins.offset, /* len */
1431 start, /* orig_start */
1432 ins.objectid, /* block_start */
1433 ins.offset, /* block_len */
1434 ins.offset, /* orig_block_len */
1435 ram_size, /* ram_bytes */
1436 BTRFS_COMPRESS_NONE, /* compress_type */
1437 BTRFS_ORDERED_REGULAR /* type */);
1442 free_extent_map(em);
1444 ordered = btrfs_alloc_ordered_extent(inode, start, ram_size,
1445 ram_size, ins.objectid, cur_alloc_size,
1446 0, 1 << BTRFS_ORDERED_REGULAR,
1447 BTRFS_COMPRESS_NONE);
1448 if (IS_ERR(ordered)) {
1449 ret = PTR_ERR(ordered);
1450 goto out_drop_extent_cache;
1453 if (btrfs_is_data_reloc_root(root)) {
1454 ret = btrfs_reloc_clone_csums(ordered);
1457 * Only drop cache here, and process as normal.
1459 * We must not allow extent_clear_unlock_delalloc()
1460 * at out_unlock label to free meta of this ordered
1461 * extent, as its meta should be freed by
1462 * btrfs_finish_ordered_io().
1464 * So we must continue until @start is increased to
1465 * skip current ordered extent.
1468 btrfs_drop_extent_map_range(inode, start,
1469 start + ram_size - 1,
1472 btrfs_put_ordered_extent(ordered);
1474 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1477 * We're not doing compressed IO, don't unlock the first page
1478 * (which the caller expects to stay locked), don't clear any
1479 * dirty bits and don't set any writeback bits
1481 * Do set the Ordered (Private2) bit so we know this page was
1482 * properly setup for writepage.
1484 page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
1485 page_ops |= PAGE_SET_ORDERED;
1487 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1489 EXTENT_LOCKED | EXTENT_DELALLOC,
1491 if (num_bytes < cur_alloc_size)
1494 num_bytes -= cur_alloc_size;
1495 alloc_hint = ins.objectid + ins.offset;
1496 start += cur_alloc_size;
1497 extent_reserved = false;
1500 * btrfs_reloc_clone_csums() error, since start is increased
1501 * extent_clear_unlock_delalloc() at out_unlock label won't
1502 * free metadata of current ordered extent, we're OK to exit.
1512 out_drop_extent_cache:
1513 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false);
1515 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1516 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1519 * Now, we have three regions to clean up:
1521 * |-------(1)----|---(2)---|-------------(3)----------|
1522 * `- orig_start `- start `- start + cur_alloc_size `- end
1524 * We process each region below.
1527 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1528 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1529 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1532 * For the range (1). We have already instantiated the ordered extents
1533 * for this region. They are cleaned up by
1534 * btrfs_cleanup_ordered_extents() in e.g,
1535 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1536 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1537 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1540 * However, in case of @keep_locked, we still need to unlock the pages
1541 * (except @locked_page) to ensure all the pages are unlocked.
1543 if (keep_locked && orig_start < start) {
1545 mapping_set_error(inode->vfs_inode.i_mapping, ret);
1546 extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1547 locked_page, 0, page_ops);
1551 * For the range (2). If we reserved an extent for our delalloc range
1552 * (or a subrange) and failed to create the respective ordered extent,
1553 * then it means that when we reserved the extent we decremented the
1554 * extent's size from the data space_info's bytes_may_use counter and
1555 * incremented the space_info's bytes_reserved counter by the same
1556 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1557 * to decrement again the data space_info's bytes_may_use counter,
1558 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1560 if (extent_reserved) {
1561 extent_clear_unlock_delalloc(inode, start,
1562 start + cur_alloc_size - 1,
1566 start += cur_alloc_size;
1570 * For the range (3). We never touched the region. In addition to the
1571 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1572 * space_info's bytes_may_use counter, reserved in
1573 * btrfs_check_data_free_space().
1576 clear_bits |= EXTENT_CLEAR_DATA_RESV;
1577 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1578 clear_bits, page_ops);
1584 * Phase two of compressed writeback. This is the ordered portion of the code,
1585 * which only gets called in the order the work was queued. We walk all the
1586 * async extents created by compress_file_range and send them down to the disk.
1588 static noinline void submit_compressed_extents(struct btrfs_work *work)
1590 struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1592 struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1593 struct async_extent *async_extent;
1594 unsigned long nr_pages;
1597 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1600 while (!list_empty(&async_chunk->extents)) {
1601 async_extent = list_entry(async_chunk->extents.next,
1602 struct async_extent, list);
1603 list_del(&async_extent->list);
1604 submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1607 /* atomic_sub_return implies a barrier */
1608 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1610 cond_wake_up_nomb(&fs_info->async_submit_wait);
1613 static noinline void async_cow_free(struct btrfs_work *work)
1615 struct async_chunk *async_chunk;
1616 struct async_cow *async_cow;
1618 async_chunk = container_of(work, struct async_chunk, work);
1619 btrfs_add_delayed_iput(async_chunk->inode);
1620 if (async_chunk->blkcg_css)
1621 css_put(async_chunk->blkcg_css);
1623 async_cow = async_chunk->async_cow;
1624 if (atomic_dec_and_test(&async_cow->num_chunks))
1628 static bool run_delalloc_compressed(struct btrfs_inode *inode,
1629 struct page *locked_page, u64 start,
1630 u64 end, struct writeback_control *wbc)
1632 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1633 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1634 struct async_cow *ctx;
1635 struct async_chunk *async_chunk;
1636 unsigned long nr_pages;
1637 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1640 const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1642 nofs_flag = memalloc_nofs_save();
1643 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1644 memalloc_nofs_restore(nofs_flag);
1648 unlock_extent(&inode->io_tree, start, end, NULL);
1649 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1651 async_chunk = ctx->chunks;
1652 atomic_set(&ctx->num_chunks, num_chunks);
1654 for (i = 0; i < num_chunks; i++) {
1655 u64 cur_end = min(end, start + SZ_512K - 1);
1658 * igrab is called higher up in the call chain, take only the
1659 * lightweight reference for the callback lifetime
1661 ihold(&inode->vfs_inode);
1662 async_chunk[i].async_cow = ctx;
1663 async_chunk[i].inode = inode;
1664 async_chunk[i].start = start;
1665 async_chunk[i].end = cur_end;
1666 async_chunk[i].write_flags = write_flags;
1667 INIT_LIST_HEAD(&async_chunk[i].extents);
1670 * The locked_page comes all the way from writepage and its
1671 * the original page we were actually given. As we spread
1672 * this large delalloc region across multiple async_chunk
1673 * structs, only the first struct needs a pointer to locked_page
1675 * This way we don't need racey decisions about who is supposed
1680 * Depending on the compressibility, the pages might or
1681 * might not go through async. We want all of them to
1682 * be accounted against wbc once. Let's do it here
1683 * before the paths diverge. wbc accounting is used
1684 * only for foreign writeback detection and doesn't
1685 * need full accuracy. Just account the whole thing
1686 * against the first page.
1688 wbc_account_cgroup_owner(wbc, locked_page,
1690 async_chunk[i].locked_page = locked_page;
1693 async_chunk[i].locked_page = NULL;
1696 if (blkcg_css != blkcg_root_css) {
1698 async_chunk[i].blkcg_css = blkcg_css;
1699 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1701 async_chunk[i].blkcg_css = NULL;
1704 btrfs_init_work(&async_chunk[i].work, compress_file_range,
1705 submit_compressed_extents, async_cow_free);
1707 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1708 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1710 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1712 start = cur_end + 1;
1717 static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
1718 struct page *locked_page, u64 start,
1719 u64 end, struct writeback_control *wbc)
1721 u64 done_offset = end;
1724 while (start <= end) {
1725 ret = cow_file_range(inode, locked_page, start, end, &done_offset,
1730 extent_write_locked_range(&inode->vfs_inode, locked_page, start,
1731 done_offset, wbc, true);
1732 start = done_offset + 1;
1738 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1739 u64 bytenr, u64 num_bytes, bool nowait)
1741 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr);
1742 struct btrfs_ordered_sum *sums;
1746 ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1,
1748 if (ret == 0 && list_empty(&list))
1751 while (!list_empty(&list)) {
1752 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1753 list_del(&sums->list);
1761 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1762 const u64 start, const u64 end)
1764 const bool is_space_ino = btrfs_is_free_space_inode(inode);
1765 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1766 const u64 range_bytes = end + 1 - start;
1767 struct extent_io_tree *io_tree = &inode->io_tree;
1768 u64 range_start = start;
1773 * If EXTENT_NORESERVE is set it means that when the buffered write was
1774 * made we had not enough available data space and therefore we did not
1775 * reserve data space for it, since we though we could do NOCOW for the
1776 * respective file range (either there is prealloc extent or the inode
1777 * has the NOCOW bit set).
1779 * However when we need to fallback to COW mode (because for example the
1780 * block group for the corresponding extent was turned to RO mode by a
1781 * scrub or relocation) we need to do the following:
1783 * 1) We increment the bytes_may_use counter of the data space info.
1784 * If COW succeeds, it allocates a new data extent and after doing
1785 * that it decrements the space info's bytes_may_use counter and
1786 * increments its bytes_reserved counter by the same amount (we do
1787 * this at btrfs_add_reserved_bytes()). So we need to increment the
1788 * bytes_may_use counter to compensate (when space is reserved at
1789 * buffered write time, the bytes_may_use counter is incremented);
1791 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1792 * that if the COW path fails for any reason, it decrements (through
1793 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1794 * data space info, which we incremented in the step above.
1796 * If we need to fallback to cow and the inode corresponds to a free
1797 * space cache inode or an inode of the data relocation tree, we must
1798 * also increment bytes_may_use of the data space_info for the same
1799 * reason. Space caches and relocated data extents always get a prealloc
1800 * extent for them, however scrub or balance may have set the block
1801 * group that contains that extent to RO mode and therefore force COW
1802 * when starting writeback.
1804 count = count_range_bits(io_tree, &range_start, end, range_bytes,
1805 EXTENT_NORESERVE, 0, NULL);
1806 if (count > 0 || is_space_ino || is_reloc_ino) {
1808 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1809 struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1811 if (is_space_ino || is_reloc_ino)
1812 bytes = range_bytes;
1814 spin_lock(&sinfo->lock);
1815 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1816 spin_unlock(&sinfo->lock);
1819 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1824 * Don't try to create inline extents, as a mix of inline extent that
1825 * is written out and unlocked directly and a normal NOCOW extent
1828 ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
1833 struct can_nocow_file_extent_args {
1836 /* Start file offset of the range we want to NOCOW. */
1838 /* End file offset (inclusive) of the range we want to NOCOW. */
1840 bool writeback_path;
1843 * Free the path passed to can_nocow_file_extent() once it's not needed
1848 /* Output fields. Only set when can_nocow_file_extent() returns 1. */
1853 /* Number of bytes that can be written to in NOCOW mode. */
1858 * Check if we can NOCOW the file extent that the path points to.
1859 * This function may return with the path released, so the caller should check
1860 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1862 * Returns: < 0 on error
1863 * 0 if we can not NOCOW
1866 static int can_nocow_file_extent(struct btrfs_path *path,
1867 struct btrfs_key *key,
1868 struct btrfs_inode *inode,
1869 struct can_nocow_file_extent_args *args)
1871 const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1872 struct extent_buffer *leaf = path->nodes[0];
1873 struct btrfs_root *root = inode->root;
1874 struct btrfs_file_extent_item *fi;
1879 bool nowait = path->nowait;
1881 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1882 extent_type = btrfs_file_extent_type(leaf, fi);
1884 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1887 /* Can't access these fields unless we know it's not an inline extent. */
1888 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1889 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1890 args->extent_offset = btrfs_file_extent_offset(leaf, fi);
1892 if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1893 extent_type == BTRFS_FILE_EXTENT_REG)
1897 * If the extent was created before the generation where the last snapshot
1898 * for its subvolume was created, then this implies the extent is shared,
1899 * hence we must COW.
1901 if (!args->strict &&
1902 btrfs_file_extent_generation(leaf, fi) <=
1903 btrfs_root_last_snapshot(&root->root_item))
1906 /* An explicit hole, must COW. */
1907 if (args->disk_bytenr == 0)
1910 /* Compressed/encrypted/encoded extents must be COWed. */
1911 if (btrfs_file_extent_compression(leaf, fi) ||
1912 btrfs_file_extent_encryption(leaf, fi) ||
1913 btrfs_file_extent_other_encoding(leaf, fi))
1916 extent_end = btrfs_file_extent_end(path);
1919 * The following checks can be expensive, as they need to take other
1920 * locks and do btree or rbtree searches, so release the path to avoid
1921 * blocking other tasks for too long.
1923 btrfs_release_path(path);
1925 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
1926 key->offset - args->extent_offset,
1927 args->disk_bytenr, args->strict, path);
1928 WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1932 if (args->free_path) {
1934 * We don't need the path anymore, plus through the
1935 * csum_exist_in_range() call below we will end up allocating
1936 * another path. So free the path to avoid unnecessary extra
1939 btrfs_free_path(path);
1943 /* If there are pending snapshots for this root, we must COW. */
1944 if (args->writeback_path && !is_freespace_inode &&
1945 atomic_read(&root->snapshot_force_cow))
1948 args->disk_bytenr += args->extent_offset;
1949 args->disk_bytenr += args->start - key->offset;
1950 args->num_bytes = min(args->end + 1, extent_end) - args->start;
1953 * Force COW if csums exist in the range. This ensures that csums for a
1954 * given extent are either valid or do not exist.
1956 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes,
1958 WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1964 if (args->free_path && path)
1965 btrfs_free_path(path);
1967 return ret < 0 ? ret : can_nocow;
1971 * when nowcow writeback call back. This checks for snapshots or COW copies
1972 * of the extents that exist in the file, and COWs the file as required.
1974 * If no cow copies or snapshots exist, we write directly to the existing
1977 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1978 struct page *locked_page,
1979 const u64 start, const u64 end)
1981 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1982 struct btrfs_root *root = inode->root;
1983 struct btrfs_path *path;
1984 u64 cow_start = (u64)-1;
1985 u64 cur_offset = start;
1987 bool check_prev = true;
1988 u64 ino = btrfs_ino(inode);
1989 struct btrfs_block_group *bg;
1991 struct can_nocow_file_extent_args nocow_args = { 0 };
1993 path = btrfs_alloc_path();
1995 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1996 EXTENT_LOCKED | EXTENT_DELALLOC |
1997 EXTENT_DO_ACCOUNTING |
1998 EXTENT_DEFRAG, PAGE_UNLOCK |
1999 PAGE_START_WRITEBACK |
2000 PAGE_END_WRITEBACK);
2004 nocow_args.end = end;
2005 nocow_args.writeback_path = true;
2008 struct btrfs_ordered_extent *ordered;
2009 struct btrfs_key found_key;
2010 struct btrfs_file_extent_item *fi;
2011 struct extent_buffer *leaf;
2020 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2026 * If there is no extent for our range when doing the initial
2027 * search, then go back to the previous slot as it will be the
2028 * one containing the search offset
2030 if (ret > 0 && path->slots[0] > 0 && check_prev) {
2031 leaf = path->nodes[0];
2032 btrfs_item_key_to_cpu(leaf, &found_key,
2033 path->slots[0] - 1);
2034 if (found_key.objectid == ino &&
2035 found_key.type == BTRFS_EXTENT_DATA_KEY)
2040 /* Go to next leaf if we have exhausted the current one */
2041 leaf = path->nodes[0];
2042 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2043 ret = btrfs_next_leaf(root, path);
2045 if (cow_start != (u64)-1)
2046 cur_offset = cow_start;
2051 leaf = path->nodes[0];
2054 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2056 /* Didn't find anything for our INO */
2057 if (found_key.objectid > ino)
2060 * Keep searching until we find an EXTENT_ITEM or there are no
2061 * more extents for this inode
2063 if (WARN_ON_ONCE(found_key.objectid < ino) ||
2064 found_key.type < BTRFS_EXTENT_DATA_KEY) {
2069 /* Found key is not EXTENT_DATA_KEY or starts after req range */
2070 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2071 found_key.offset > end)
2075 * If the found extent starts after requested offset, then
2076 * adjust extent_end to be right before this extent begins
2078 if (found_key.offset > cur_offset) {
2079 extent_end = found_key.offset;
2085 * Found extent which begins before our range and potentially
2088 fi = btrfs_item_ptr(leaf, path->slots[0],
2089 struct btrfs_file_extent_item);
2090 extent_type = btrfs_file_extent_type(leaf, fi);
2091 /* If this is triggered then we have a memory corruption. */
2092 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2093 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2097 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
2098 extent_end = btrfs_file_extent_end(path);
2101 * If the extent we got ends before our current offset, skip to
2104 if (extent_end <= cur_offset) {
2109 nocow_args.start = cur_offset;
2110 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2112 if (cow_start != (u64)-1)
2113 cur_offset = cow_start;
2115 } else if (ret == 0) {
2120 bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
2125 * If nocow is false then record the beginning of the range
2126 * that needs to be COWed
2129 if (cow_start == (u64)-1)
2130 cow_start = cur_offset;
2131 cur_offset = extent_end;
2132 if (cur_offset > end)
2134 if (!path->nodes[0])
2141 * COW range from cow_start to found_key.offset - 1. As the key
2142 * will contain the beginning of the first extent that can be
2143 * NOCOW, following one which needs to be COW'ed
2145 if (cow_start != (u64)-1) {
2146 ret = fallback_to_cow(inode, locked_page,
2147 cow_start, found_key.offset - 1);
2150 cow_start = (u64)-1;
2153 nocow_end = cur_offset + nocow_args.num_bytes - 1;
2154 is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
2156 u64 orig_start = found_key.offset - nocow_args.extent_offset;
2157 struct extent_map *em;
2159 em = create_io_em(inode, cur_offset, nocow_args.num_bytes,
2161 nocow_args.disk_bytenr, /* block_start */
2162 nocow_args.num_bytes, /* block_len */
2163 nocow_args.disk_num_bytes, /* orig_block_len */
2164 ram_bytes, BTRFS_COMPRESS_NONE,
2165 BTRFS_ORDERED_PREALLOC);
2170 free_extent_map(em);
2173 ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
2174 nocow_args.num_bytes, nocow_args.num_bytes,
2175 nocow_args.disk_bytenr, nocow_args.num_bytes, 0,
2177 ? (1 << BTRFS_ORDERED_PREALLOC)
2178 : (1 << BTRFS_ORDERED_NOCOW),
2179 BTRFS_COMPRESS_NONE);
2180 if (IS_ERR(ordered)) {
2182 btrfs_drop_extent_map_range(inode, cur_offset,
2185 ret = PTR_ERR(ordered);
2190 btrfs_dec_nocow_writers(bg);
2194 if (btrfs_is_data_reloc_root(root))
2196 * Error handled later, as we must prevent
2197 * extent_clear_unlock_delalloc() in error handler
2198 * from freeing metadata of created ordered extent.
2200 ret = btrfs_reloc_clone_csums(ordered);
2201 btrfs_put_ordered_extent(ordered);
2203 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2204 locked_page, EXTENT_LOCKED |
2206 EXTENT_CLEAR_DATA_RESV,
2207 PAGE_UNLOCK | PAGE_SET_ORDERED);
2209 cur_offset = extent_end;
2212 * btrfs_reloc_clone_csums() error, now we're OK to call error
2213 * handler, as metadata for created ordered extent will only
2214 * be freed by btrfs_finish_ordered_io().
2218 if (cur_offset > end)
2221 btrfs_release_path(path);
2223 if (cur_offset <= end && cow_start == (u64)-1)
2224 cow_start = cur_offset;
2226 if (cow_start != (u64)-1) {
2228 ret = fallback_to_cow(inode, locked_page, cow_start, end);
2235 btrfs_dec_nocow_writers(bg);
2237 if (ret && cur_offset < end)
2238 extent_clear_unlock_delalloc(inode, cur_offset, end,
2239 locked_page, EXTENT_LOCKED |
2240 EXTENT_DELALLOC | EXTENT_DEFRAG |
2241 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2242 PAGE_START_WRITEBACK |
2243 PAGE_END_WRITEBACK);
2244 btrfs_free_path(path);
2248 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2250 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2251 if (inode->defrag_bytes &&
2252 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
2261 * Function to process delayed allocation (create CoW) for ranges which are
2262 * being touched for the first time.
2264 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2265 u64 start, u64 end, struct writeback_control *wbc)
2267 const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2271 * The range must cover part of the @locked_page, or a return of 1
2272 * can confuse the caller.
2274 ASSERT(!(end <= page_offset(locked_page) ||
2275 start >= page_offset(locked_page) + PAGE_SIZE));
2277 if (should_nocow(inode, start, end)) {
2279 * Normally on a zoned device we're only doing COW writes, but
2280 * in case of relocation on a zoned filesystem we have taken
2281 * precaution, that we're only writing sequentially. It's safe
2282 * to use run_delalloc_nocow() here, like for regular
2283 * preallocated inodes.
2285 ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
2286 ret = run_delalloc_nocow(inode, locked_page, start, end);
2290 if (btrfs_inode_can_compress(inode) &&
2291 inode_need_compress(inode, start, end) &&
2292 run_delalloc_compressed(inode, locked_page, start, end, wbc))
2296 ret = run_delalloc_zoned(inode, locked_page, start, end, wbc);
2298 ret = cow_file_range(inode, locked_page, start, end, NULL,
2303 btrfs_cleanup_ordered_extents(inode, locked_page, start,
2308 void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2309 struct extent_state *orig, u64 split)
2311 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2314 /* not delalloc, ignore it */
2315 if (!(orig->state & EXTENT_DELALLOC))
2318 size = orig->end - orig->start + 1;
2319 if (size > fs_info->max_extent_size) {
2324 * See the explanation in btrfs_merge_delalloc_extent, the same
2325 * applies here, just in reverse.
2327 new_size = orig->end - split + 1;
2328 num_extents = count_max_extents(fs_info, new_size);
2329 new_size = split - orig->start;
2330 num_extents += count_max_extents(fs_info, new_size);
2331 if (count_max_extents(fs_info, size) >= num_extents)
2335 spin_lock(&inode->lock);
2336 btrfs_mod_outstanding_extents(inode, 1);
2337 spin_unlock(&inode->lock);
2341 * Handle merged delayed allocation extents so we can keep track of new extents
2342 * that are just merged onto old extents, such as when we are doing sequential
2343 * writes, so we can properly account for the metadata space we'll need.
2345 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2346 struct extent_state *other)
2348 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2349 u64 new_size, old_size;
2352 /* not delalloc, ignore it */
2353 if (!(other->state & EXTENT_DELALLOC))
2356 if (new->start > other->start)
2357 new_size = new->end - other->start + 1;
2359 new_size = other->end - new->start + 1;
2361 /* we're not bigger than the max, unreserve the space and go */
2362 if (new_size <= fs_info->max_extent_size) {
2363 spin_lock(&inode->lock);
2364 btrfs_mod_outstanding_extents(inode, -1);
2365 spin_unlock(&inode->lock);
2370 * We have to add up either side to figure out how many extents were
2371 * accounted for before we merged into one big extent. If the number of
2372 * extents we accounted for is <= the amount we need for the new range
2373 * then we can return, otherwise drop. Think of it like this
2377 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2378 * need 2 outstanding extents, on one side we have 1 and the other side
2379 * we have 1 so they are == and we can return. But in this case
2381 * [MAX_SIZE+4k][MAX_SIZE+4k]
2383 * Each range on their own accounts for 2 extents, but merged together
2384 * they are only 3 extents worth of accounting, so we need to drop in
2387 old_size = other->end - other->start + 1;
2388 num_extents = count_max_extents(fs_info, old_size);
2389 old_size = new->end - new->start + 1;
2390 num_extents += count_max_extents(fs_info, old_size);
2391 if (count_max_extents(fs_info, new_size) >= num_extents)
2394 spin_lock(&inode->lock);
2395 btrfs_mod_outstanding_extents(inode, -1);
2396 spin_unlock(&inode->lock);
2399 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
2400 struct btrfs_inode *inode)
2402 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2404 spin_lock(&root->delalloc_lock);
2405 if (list_empty(&inode->delalloc_inodes)) {
2406 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2407 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags);
2408 root->nr_delalloc_inodes++;
2409 if (root->nr_delalloc_inodes == 1) {
2410 spin_lock(&fs_info->delalloc_root_lock);
2411 BUG_ON(!list_empty(&root->delalloc_root));
2412 list_add_tail(&root->delalloc_root,
2413 &fs_info->delalloc_roots);
2414 spin_unlock(&fs_info->delalloc_root_lock);
2417 spin_unlock(&root->delalloc_lock);
2420 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
2421 struct btrfs_inode *inode)
2423 struct btrfs_fs_info *fs_info = root->fs_info;
2425 if (!list_empty(&inode->delalloc_inodes)) {
2426 list_del_init(&inode->delalloc_inodes);
2427 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2428 &inode->runtime_flags);
2429 root->nr_delalloc_inodes--;
2430 if (!root->nr_delalloc_inodes) {
2431 ASSERT(list_empty(&root->delalloc_inodes));
2432 spin_lock(&fs_info->delalloc_root_lock);
2433 BUG_ON(list_empty(&root->delalloc_root));
2434 list_del_init(&root->delalloc_root);
2435 spin_unlock(&fs_info->delalloc_root_lock);
2440 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2441 struct btrfs_inode *inode)
2443 spin_lock(&root->delalloc_lock);
2444 __btrfs_del_delalloc_inode(root, inode);
2445 spin_unlock(&root->delalloc_lock);
2449 * Properly track delayed allocation bytes in the inode and to maintain the
2450 * list of inodes that have pending delalloc work to be done.
2452 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2455 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2457 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2460 * set_bit and clear bit hooks normally require _irqsave/restore
2461 * but in this case, we are only testing for the DELALLOC
2462 * bit, which is only set or cleared with irqs on
2464 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2465 struct btrfs_root *root = inode->root;
2466 u64 len = state->end + 1 - state->start;
2467 u32 num_extents = count_max_extents(fs_info, len);
2468 bool do_list = !btrfs_is_free_space_inode(inode);
2470 spin_lock(&inode->lock);
2471 btrfs_mod_outstanding_extents(inode, num_extents);
2472 spin_unlock(&inode->lock);
2474 /* For sanity tests */
2475 if (btrfs_is_testing(fs_info))
2478 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2479 fs_info->delalloc_batch);
2480 spin_lock(&inode->lock);
2481 inode->delalloc_bytes += len;
2482 if (bits & EXTENT_DEFRAG)
2483 inode->defrag_bytes += len;
2484 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2485 &inode->runtime_flags))
2486 btrfs_add_delalloc_inodes(root, inode);
2487 spin_unlock(&inode->lock);
2490 if (!(state->state & EXTENT_DELALLOC_NEW) &&
2491 (bits & EXTENT_DELALLOC_NEW)) {
2492 spin_lock(&inode->lock);
2493 inode->new_delalloc_bytes += state->end + 1 - state->start;
2494 spin_unlock(&inode->lock);
2499 * Once a range is no longer delalloc this function ensures that proper
2500 * accounting happens.
2502 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2503 struct extent_state *state, u32 bits)
2505 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2506 u64 len = state->end + 1 - state->start;
2507 u32 num_extents = count_max_extents(fs_info, len);
2509 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2510 spin_lock(&inode->lock);
2511 inode->defrag_bytes -= len;
2512 spin_unlock(&inode->lock);
2516 * set_bit and clear bit hooks normally require _irqsave/restore
2517 * but in this case, we are only testing for the DELALLOC
2518 * bit, which is only set or cleared with irqs on
2520 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2521 struct btrfs_root *root = inode->root;
2522 bool do_list = !btrfs_is_free_space_inode(inode);
2524 spin_lock(&inode->lock);
2525 btrfs_mod_outstanding_extents(inode, -num_extents);
2526 spin_unlock(&inode->lock);
2529 * We don't reserve metadata space for space cache inodes so we
2530 * don't need to call delalloc_release_metadata if there is an
2533 if (bits & EXTENT_CLEAR_META_RESV &&
2534 root != fs_info->tree_root)
2535 btrfs_delalloc_release_metadata(inode, len, false);
2537 /* For sanity tests. */
2538 if (btrfs_is_testing(fs_info))
2541 if (!btrfs_is_data_reloc_root(root) &&
2542 do_list && !(state->state & EXTENT_NORESERVE) &&
2543 (bits & EXTENT_CLEAR_DATA_RESV))
2544 btrfs_free_reserved_data_space_noquota(fs_info, len);
2546 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2547 fs_info->delalloc_batch);
2548 spin_lock(&inode->lock);
2549 inode->delalloc_bytes -= len;
2550 if (do_list && inode->delalloc_bytes == 0 &&
2551 test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2552 &inode->runtime_flags))
2553 btrfs_del_delalloc_inode(root, inode);
2554 spin_unlock(&inode->lock);
2557 if ((state->state & EXTENT_DELALLOC_NEW) &&
2558 (bits & EXTENT_DELALLOC_NEW)) {
2559 spin_lock(&inode->lock);
2560 ASSERT(inode->new_delalloc_bytes >= len);
2561 inode->new_delalloc_bytes -= len;
2562 if (bits & EXTENT_ADD_INODE_BYTES)
2563 inode_add_bytes(&inode->vfs_inode, len);
2564 spin_unlock(&inode->lock);
2568 static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
2569 struct btrfs_ordered_extent *ordered)
2571 u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
2572 u64 len = bbio->bio.bi_iter.bi_size;
2573 struct btrfs_ordered_extent *new;
2576 /* Must always be called for the beginning of an ordered extent. */
2577 if (WARN_ON_ONCE(start != ordered->disk_bytenr))
2580 /* No need to split if the ordered extent covers the entire bio. */
2581 if (ordered->disk_num_bytes == len) {
2582 refcount_inc(&ordered->refs);
2583 bbio->ordered = ordered;
2588 * Don't split the extent_map for NOCOW extents, as we're writing into
2589 * a pre-existing one.
2591 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
2592 ret = split_extent_map(bbio->inode, bbio->file_offset,
2593 ordered->num_bytes, len,
2594 ordered->disk_bytenr);
2599 new = btrfs_split_ordered_extent(ordered, len);
2601 return PTR_ERR(new);
2602 bbio->ordered = new;
2607 * given a list of ordered sums record them in the inode. This happens
2608 * at IO completion time based on sums calculated at bio submission time.
2610 static int add_pending_csums(struct btrfs_trans_handle *trans,
2611 struct list_head *list)
2613 struct btrfs_ordered_sum *sum;
2614 struct btrfs_root *csum_root = NULL;
2617 list_for_each_entry(sum, list, list) {
2618 trans->adding_csums = true;
2620 csum_root = btrfs_csum_root(trans->fs_info,
2622 ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2623 trans->adding_csums = false;
2630 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2633 struct extent_state **cached_state)
2635 u64 search_start = start;
2636 const u64 end = start + len - 1;
2638 while (search_start < end) {
2639 const u64 search_len = end - search_start + 1;
2640 struct extent_map *em;
2644 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
2648 if (em->block_start != EXTENT_MAP_HOLE)
2652 if (em->start < search_start)
2653 em_len -= search_start - em->start;
2654 if (em_len > search_len)
2655 em_len = search_len;
2657 ret = set_extent_bit(&inode->io_tree, search_start,
2658 search_start + em_len - 1,
2659 EXTENT_DELALLOC_NEW, cached_state);
2661 search_start = extent_map_end(em);
2662 free_extent_map(em);
2669 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2670 unsigned int extra_bits,
2671 struct extent_state **cached_state)
2673 WARN_ON(PAGE_ALIGNED(end));
2675 if (start >= i_size_read(&inode->vfs_inode) &&
2676 !(inode->flags & BTRFS_INODE_PREALLOC)) {
2678 * There can't be any extents following eof in this case so just
2679 * set the delalloc new bit for the range directly.
2681 extra_bits |= EXTENT_DELALLOC_NEW;
2685 ret = btrfs_find_new_delalloc_bytes(inode, start,
2692 return set_extent_bit(&inode->io_tree, start, end,
2693 EXTENT_DELALLOC | extra_bits, cached_state);
2696 /* see btrfs_writepage_start_hook for details on why this is required */
2697 struct btrfs_writepage_fixup {
2699 struct btrfs_inode *inode;
2700 struct btrfs_work work;
2703 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2705 struct btrfs_writepage_fixup *fixup =
2706 container_of(work, struct btrfs_writepage_fixup, work);
2707 struct btrfs_ordered_extent *ordered;
2708 struct extent_state *cached_state = NULL;
2709 struct extent_changeset *data_reserved = NULL;
2710 struct page *page = fixup->page;
2711 struct btrfs_inode *inode = fixup->inode;
2712 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2713 u64 page_start = page_offset(page);
2714 u64 page_end = page_offset(page) + PAGE_SIZE - 1;
2716 bool free_delalloc_space = true;
2719 * This is similar to page_mkwrite, we need to reserve the space before
2720 * we take the page lock.
2722 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2728 * Before we queued this fixup, we took a reference on the page.
2729 * page->mapping may go NULL, but it shouldn't be moved to a different
2732 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2734 * Unfortunately this is a little tricky, either
2736 * 1) We got here and our page had already been dealt with and
2737 * we reserved our space, thus ret == 0, so we need to just
2738 * drop our space reservation and bail. This can happen the
2739 * first time we come into the fixup worker, or could happen
2740 * while waiting for the ordered extent.
2741 * 2) Our page was already dealt with, but we happened to get an
2742 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2743 * this case we obviously don't have anything to release, but
2744 * because the page was already dealt with we don't want to
2745 * mark the page with an error, so make sure we're resetting
2746 * ret to 0. This is why we have this check _before_ the ret
2747 * check, because we do not want to have a surprise ENOSPC
2748 * when the page was already properly dealt with.
2751 btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2752 btrfs_delalloc_release_space(inode, data_reserved,
2753 page_start, PAGE_SIZE,
2761 * We can't mess with the page state unless it is locked, so now that
2762 * it is locked bail if we failed to make our space reservation.
2767 lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2769 /* already ordered? We're done */
2770 if (PageOrdered(page))
2773 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2775 unlock_extent(&inode->io_tree, page_start, page_end,
2778 btrfs_start_ordered_extent(ordered);
2779 btrfs_put_ordered_extent(ordered);
2783 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2789 * Everything went as planned, we're now the owner of a dirty page with
2790 * delayed allocation bits set and space reserved for our COW
2793 * The page was dirty when we started, nothing should have cleaned it.
2795 BUG_ON(!PageDirty(page));
2796 free_delalloc_space = false;
2798 btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2799 if (free_delalloc_space)
2800 btrfs_delalloc_release_space(inode, data_reserved, page_start,
2802 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2806 * We hit ENOSPC or other errors. Update the mapping and page
2807 * to reflect the errors and clean the page.
2809 mapping_set_error(page->mapping, ret);
2810 btrfs_mark_ordered_io_finished(inode, page, page_start,
2812 btrfs_page_clear_uptodate(fs_info, page, page_start, PAGE_SIZE);
2813 clear_page_dirty_for_io(page);
2815 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
2819 extent_changeset_free(data_reserved);
2821 * As a precaution, do a delayed iput in case it would be the last iput
2822 * that could need flushing space. Recursing back to fixup worker would
2825 btrfs_add_delayed_iput(inode);
2829 * There are a few paths in the higher layers of the kernel that directly
2830 * set the page dirty bit without asking the filesystem if it is a
2831 * good idea. This causes problems because we want to make sure COW
2832 * properly happens and the data=ordered rules are followed.
2834 * In our case any range that doesn't have the ORDERED bit set
2835 * hasn't been properly setup for IO. We kick off an async process
2836 * to fix it up. The async helper will wait for ordered extents, set
2837 * the delalloc bit and make it safe to write the page.
2839 int btrfs_writepage_cow_fixup(struct page *page)
2841 struct inode *inode = page->mapping->host;
2842 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2843 struct btrfs_writepage_fixup *fixup;
2845 /* This page has ordered extent covering it already */
2846 if (PageOrdered(page))
2850 * PageChecked is set below when we create a fixup worker for this page,
2851 * don't try to create another one if we're already PageChecked()
2853 * The extent_io writepage code will redirty the page if we send back
2856 if (PageChecked(page))
2859 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2864 * We are already holding a reference to this inode from
2865 * write_cache_pages. We need to hold it because the space reservation
2866 * takes place outside of the page lock, and we can't trust
2867 * page->mapping outside of the page lock.
2870 btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
2872 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
2874 fixup->inode = BTRFS_I(inode);
2875 btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2880 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2881 struct btrfs_inode *inode, u64 file_pos,
2882 struct btrfs_file_extent_item *stack_fi,
2883 const bool update_inode_bytes,
2884 u64 qgroup_reserved)
2886 struct btrfs_root *root = inode->root;
2887 const u64 sectorsize = root->fs_info->sectorsize;
2888 struct btrfs_path *path;
2889 struct extent_buffer *leaf;
2890 struct btrfs_key ins;
2891 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
2892 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
2893 u64 offset = btrfs_stack_file_extent_offset(stack_fi);
2894 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
2895 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
2896 struct btrfs_drop_extents_args drop_args = { 0 };
2899 path = btrfs_alloc_path();
2904 * we may be replacing one extent in the tree with another.
2905 * The new extent is pinned in the extent map, and we don't want
2906 * to drop it from the cache until it is completely in the btree.
2908 * So, tell btrfs_drop_extents to leave this extent in the cache.
2909 * the caller is expected to unpin it and allow it to be merged
2912 drop_args.path = path;
2913 drop_args.start = file_pos;
2914 drop_args.end = file_pos + num_bytes;
2915 drop_args.replace_extent = true;
2916 drop_args.extent_item_size = sizeof(*stack_fi);
2917 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2921 if (!drop_args.extent_inserted) {
2922 ins.objectid = btrfs_ino(inode);
2923 ins.offset = file_pos;
2924 ins.type = BTRFS_EXTENT_DATA_KEY;
2926 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2931 leaf = path->nodes[0];
2932 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
2933 write_extent_buffer(leaf, stack_fi,
2934 btrfs_item_ptr_offset(leaf, path->slots[0]),
2935 sizeof(struct btrfs_file_extent_item));
2937 btrfs_mark_buffer_dirty(leaf);
2938 btrfs_release_path(path);
2941 * If we dropped an inline extent here, we know the range where it is
2942 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2943 * number of bytes only for that range containing the inline extent.
2944 * The remaining of the range will be processed when clearning the
2945 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2947 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
2948 u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
2950 inline_size = drop_args.bytes_found - inline_size;
2951 btrfs_update_inode_bytes(inode, sectorsize, inline_size);
2952 drop_args.bytes_found -= inline_size;
2953 num_bytes -= sectorsize;
2956 if (update_inode_bytes)
2957 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
2959 ins.objectid = disk_bytenr;
2960 ins.offset = disk_num_bytes;
2961 ins.type = BTRFS_EXTENT_ITEM_KEY;
2963 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
2967 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
2969 qgroup_reserved, &ins);
2971 btrfs_free_path(path);
2976 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2979 struct btrfs_block_group *cache;
2981 cache = btrfs_lookup_block_group(fs_info, start);
2984 spin_lock(&cache->lock);
2985 cache->delalloc_bytes -= len;
2986 spin_unlock(&cache->lock);
2988 btrfs_put_block_group(cache);
2991 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
2992 struct btrfs_ordered_extent *oe)
2994 struct btrfs_file_extent_item stack_fi;
2995 bool update_inode_bytes;
2996 u64 num_bytes = oe->num_bytes;
2997 u64 ram_bytes = oe->ram_bytes;
2999 memset(&stack_fi, 0, sizeof(stack_fi));
3000 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3001 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3002 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3003 oe->disk_num_bytes);
3004 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3005 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) {
3006 num_bytes = oe->truncated_len;
3007 ram_bytes = num_bytes;
3009 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3010 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3011 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3012 /* Encryption and other encoding is reserved and all 0 */
3015 * For delalloc, when completing an ordered extent we update the inode's
3016 * bytes when clearing the range in the inode's io tree, so pass false
3017 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3018 * except if the ordered extent was truncated.
3020 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3021 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3022 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3024 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
3025 oe->file_offset, &stack_fi,
3026 update_inode_bytes, oe->qgroup_rsv);
3030 * As ordered data IO finishes, this gets called so we can finish
3031 * an ordered extent if the range of bytes in the file it covers are
3034 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3036 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
3037 struct btrfs_root *root = inode->root;
3038 struct btrfs_fs_info *fs_info = root->fs_info;
3039 struct btrfs_trans_handle *trans = NULL;
3040 struct extent_io_tree *io_tree = &inode->io_tree;
3041 struct extent_state *cached_state = NULL;
3043 int compress_type = 0;
3045 u64 logical_len = ordered_extent->num_bytes;
3046 bool freespace_inode;
3047 bool truncated = false;
3048 bool clear_reserved_extent = true;
3049 unsigned int clear_bits = EXTENT_DEFRAG;
3051 start = ordered_extent->file_offset;
3052 end = start + ordered_extent->num_bytes - 1;
3054 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3055 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3056 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3057 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3058 clear_bits |= EXTENT_DELALLOC_NEW;
3060 freespace_inode = btrfs_is_free_space_inode(inode);
3061 if (!freespace_inode)
3062 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3064 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3069 if (btrfs_is_zoned(fs_info))
3070 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3071 ordered_extent->disk_num_bytes);
3073 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3075 logical_len = ordered_extent->truncated_len;
3076 /* Truncated the entire extent, don't bother adding */
3081 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3082 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3084 btrfs_inode_safe_disk_i_size_write(inode, 0);
3085 if (freespace_inode)
3086 trans = btrfs_join_transaction_spacecache(root);
3088 trans = btrfs_join_transaction(root);
3089 if (IS_ERR(trans)) {
3090 ret = PTR_ERR(trans);
3094 trans->block_rsv = &inode->block_rsv;
3095 ret = btrfs_update_inode_fallback(trans, root, inode);
3096 if (ret) /* -ENOMEM or corruption */
3097 btrfs_abort_transaction(trans, ret);
3101 clear_bits |= EXTENT_LOCKED;
3102 lock_extent(io_tree, start, end, &cached_state);
3104 if (freespace_inode)
3105 trans = btrfs_join_transaction_spacecache(root);
3107 trans = btrfs_join_transaction(root);
3108 if (IS_ERR(trans)) {
3109 ret = PTR_ERR(trans);
3114 trans->block_rsv = &inode->block_rsv;
3116 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3117 compress_type = ordered_extent->compress_type;
3118 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3119 BUG_ON(compress_type);
3120 ret = btrfs_mark_extent_written(trans, inode,
3121 ordered_extent->file_offset,
3122 ordered_extent->file_offset +
3124 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3125 ordered_extent->disk_num_bytes);
3127 BUG_ON(root == fs_info->tree_root);
3128 ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3130 clear_reserved_extent = false;
3131 btrfs_release_delalloc_bytes(fs_info,
3132 ordered_extent->disk_bytenr,
3133 ordered_extent->disk_num_bytes);
3136 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset,
3137 ordered_extent->num_bytes, trans->transid);
3139 btrfs_abort_transaction(trans, ret);
3143 ret = add_pending_csums(trans, &ordered_extent->list);
3145 btrfs_abort_transaction(trans, ret);
3150 * If this is a new delalloc range, clear its new delalloc flag to
3151 * update the inode's number of bytes. This needs to be done first
3152 * before updating the inode item.
3154 if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3155 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3156 clear_extent_bit(&inode->io_tree, start, end,
3157 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3160 btrfs_inode_safe_disk_i_size_write(inode, 0);
3161 ret = btrfs_update_inode_fallback(trans, root, inode);
3162 if (ret) { /* -ENOMEM or corruption */
3163 btrfs_abort_transaction(trans, ret);
3168 clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3172 btrfs_end_transaction(trans);
3174 if (ret || truncated) {
3175 u64 unwritten_start = start;
3178 * If we failed to finish this ordered extent for any reason we
3179 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3180 * extent, and mark the inode with the error if it wasn't
3181 * already set. Any error during writeback would have already
3182 * set the mapping error, so we need to set it if we're the ones
3183 * marking this ordered extent as failed.
3185 if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3186 &ordered_extent->flags))
3187 mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3190 unwritten_start += logical_len;
3191 clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3193 /* Drop extent maps for the part of the extent we didn't write. */
3194 btrfs_drop_extent_map_range(inode, unwritten_start, end, false);
3197 * If the ordered extent had an IOERR or something else went
3198 * wrong we need to return the space for this ordered extent
3199 * back to the allocator. We only free the extent in the
3200 * truncated case if we didn't write out the extent at all.
3202 * If we made it past insert_reserved_file_extent before we
3203 * errored out then we don't need to do this as the accounting
3204 * has already been done.
3206 if ((ret || !logical_len) &&
3207 clear_reserved_extent &&
3208 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3209 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3211 * Discard the range before returning it back to the
3214 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3215 btrfs_discard_extent(fs_info,
3216 ordered_extent->disk_bytenr,
3217 ordered_extent->disk_num_bytes,
3219 btrfs_free_reserved_extent(fs_info,
3220 ordered_extent->disk_bytenr,
3221 ordered_extent->disk_num_bytes, 1);
3223 * Actually free the qgroup rsv which was released when
3224 * the ordered extent was created.
3226 btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
3227 ordered_extent->qgroup_rsv,
3228 BTRFS_QGROUP_RSV_DATA);
3233 * This needs to be done to make sure anybody waiting knows we are done
3234 * updating everything for this ordered extent.
3236 btrfs_remove_ordered_extent(inode, ordered_extent);
3239 btrfs_put_ordered_extent(ordered_extent);
3240 /* once for the tree */
3241 btrfs_put_ordered_extent(ordered_extent);
3246 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3248 if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) &&
3249 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
3250 btrfs_finish_ordered_zoned(ordered);
3251 return btrfs_finish_one_ordered(ordered);
3255 * Verify the checksum for a single sector without any extra action that depend
3256 * on the type of I/O.
3258 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
3259 u32 pgoff, u8 *csum, const u8 * const csum_expected)
3261 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3264 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE);
3266 shash->tfm = fs_info->csum_shash;
3268 kaddr = kmap_local_page(page) + pgoff;
3269 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
3270 kunmap_local(kaddr);
3272 if (memcmp(csum, csum_expected, fs_info->csum_size))
3278 * Verify the checksum of a single data sector.
3280 * @bbio: btrfs_io_bio which contains the csum
3281 * @dev: device the sector is on
3282 * @bio_offset: offset to the beginning of the bio (in bytes)
3283 * @bv: bio_vec to check
3285 * Check if the checksum on a data block is valid. When a checksum mismatch is
3286 * detected, report the error and fill the corrupted range with zero.
3288 * Return %true if the sector is ok or had no checksum to start with, else %false.
3290 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3291 u32 bio_offset, struct bio_vec *bv)
3293 struct btrfs_inode *inode = bbio->inode;
3294 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3295 u64 file_offset = bbio->file_offset + bio_offset;
3296 u64 end = file_offset + bv->bv_len - 1;
3298 u8 csum[BTRFS_CSUM_SIZE];
3300 ASSERT(bv->bv_len == fs_info->sectorsize);
3305 if (btrfs_is_data_reloc_root(inode->root) &&
3306 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3308 /* Skip the range without csum for data reloc inode */
3309 clear_extent_bits(&inode->io_tree, file_offset, end,
3314 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3316 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum,
3322 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3325 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3331 * btrfs_add_delayed_iput - perform a delayed iput on @inode
3333 * @inode: The inode we want to perform iput on
3335 * This function uses the generic vfs_inode::i_count to track whether we should
3336 * just decrement it (in case it's > 1) or if this is the last iput then link
3337 * the inode to the delayed iput machinery. Delayed iputs are processed at
3338 * transaction commit time/superblock commit/cleaner kthread.
3340 void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3342 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3343 unsigned long flags;
3345 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3348 atomic_inc(&fs_info->nr_delayed_iputs);
3350 * Need to be irq safe here because we can be called from either an irq
3351 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3354 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3355 ASSERT(list_empty(&inode->delayed_iput));
3356 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3357 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3358 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3359 wake_up_process(fs_info->cleaner_kthread);
3362 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3363 struct btrfs_inode *inode)
3365 list_del_init(&inode->delayed_iput);
3366 spin_unlock_irq(&fs_info->delayed_iput_lock);
3367 iput(&inode->vfs_inode);
3368 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3369 wake_up(&fs_info->delayed_iputs_wait);
3370 spin_lock_irq(&fs_info->delayed_iput_lock);
3373 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3374 struct btrfs_inode *inode)
3376 if (!list_empty(&inode->delayed_iput)) {
3377 spin_lock_irq(&fs_info->delayed_iput_lock);
3378 if (!list_empty(&inode->delayed_iput))
3379 run_delayed_iput_locked(fs_info, inode);
3380 spin_unlock_irq(&fs_info->delayed_iput_lock);
3384 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3387 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3388 * calls btrfs_add_delayed_iput() and that needs to lock
3389 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3390 * prevent a deadlock.
3392 spin_lock_irq(&fs_info->delayed_iput_lock);
3393 while (!list_empty(&fs_info->delayed_iputs)) {
3394 struct btrfs_inode *inode;
3396 inode = list_first_entry(&fs_info->delayed_iputs,
3397 struct btrfs_inode, delayed_iput);
3398 run_delayed_iput_locked(fs_info, inode);
3399 if (need_resched()) {
3400 spin_unlock_irq(&fs_info->delayed_iput_lock);
3402 spin_lock_irq(&fs_info->delayed_iput_lock);
3405 spin_unlock_irq(&fs_info->delayed_iput_lock);
3409 * Wait for flushing all delayed iputs
3411 * @fs_info: the filesystem
3413 * This will wait on any delayed iputs that are currently running with KILLABLE
3414 * set. Once they are all done running we will return, unless we are killed in
3415 * which case we return EINTR. This helps in user operations like fallocate etc
3416 * that might get blocked on the iputs.
3418 * Return EINTR if we were killed, 0 if nothing's pending
3420 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3422 int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3423 atomic_read(&fs_info->nr_delayed_iputs) == 0);
3430 * This creates an orphan entry for the given inode in case something goes wrong
3431 * in the middle of an unlink.
3433 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3434 struct btrfs_inode *inode)
3438 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3439 if (ret && ret != -EEXIST) {
3440 btrfs_abort_transaction(trans, ret);
3448 * We have done the delete so we can go ahead and remove the orphan item for
3449 * this particular inode.
3451 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3452 struct btrfs_inode *inode)
3454 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3458 * this cleans up any orphans that may be left on the list from the last use
3461 int btrfs_orphan_cleanup(struct btrfs_root *root)
3463 struct btrfs_fs_info *fs_info = root->fs_info;
3464 struct btrfs_path *path;
3465 struct extent_buffer *leaf;
3466 struct btrfs_key key, found_key;
3467 struct btrfs_trans_handle *trans;
3468 struct inode *inode;
3469 u64 last_objectid = 0;
3470 int ret = 0, nr_unlink = 0;
3472 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3475 path = btrfs_alloc_path();
3480 path->reada = READA_BACK;
3482 key.objectid = BTRFS_ORPHAN_OBJECTID;
3483 key.type = BTRFS_ORPHAN_ITEM_KEY;
3484 key.offset = (u64)-1;
3487 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3492 * if ret == 0 means we found what we were searching for, which
3493 * is weird, but possible, so only screw with path if we didn't
3494 * find the key and see if we have stuff that matches
3498 if (path->slots[0] == 0)
3503 /* pull out the item */
3504 leaf = path->nodes[0];
3505 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3507 /* make sure the item matches what we want */
3508 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3510 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3513 /* release the path since we're done with it */
3514 btrfs_release_path(path);
3517 * this is where we are basically btrfs_lookup, without the
3518 * crossing root thing. we store the inode number in the
3519 * offset of the orphan item.
3522 if (found_key.offset == last_objectid) {
3524 "Error removing orphan entry, stopping orphan cleanup");
3529 last_objectid = found_key.offset;
3531 found_key.objectid = found_key.offset;
3532 found_key.type = BTRFS_INODE_ITEM_KEY;
3533 found_key.offset = 0;
3534 inode = btrfs_iget(fs_info->sb, last_objectid, root);
3535 if (IS_ERR(inode)) {
3536 ret = PTR_ERR(inode);
3542 if (!inode && root == fs_info->tree_root) {
3543 struct btrfs_root *dead_root;
3544 int is_dead_root = 0;
3547 * This is an orphan in the tree root. Currently these
3548 * could come from 2 sources:
3549 * a) a root (snapshot/subvolume) deletion in progress
3550 * b) a free space cache inode
3551 * We need to distinguish those two, as the orphan item
3552 * for a root must not get deleted before the deletion
3553 * of the snapshot/subvolume's tree completes.
3555 * btrfs_find_orphan_roots() ran before us, which has
3556 * found all deleted roots and loaded them into
3557 * fs_info->fs_roots_radix. So here we can find if an
3558 * orphan item corresponds to a deleted root by looking
3559 * up the root from that radix tree.
3562 spin_lock(&fs_info->fs_roots_radix_lock);
3563 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3564 (unsigned long)found_key.objectid);
3565 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3567 spin_unlock(&fs_info->fs_roots_radix_lock);
3570 /* prevent this orphan from being found again */
3571 key.offset = found_key.objectid - 1;
3578 * If we have an inode with links, there are a couple of
3581 * 1. We were halfway through creating fsverity metadata for the
3582 * file. In that case, the orphan item represents incomplete
3583 * fsverity metadata which must be cleaned up with
3584 * btrfs_drop_verity_items and deleting the orphan item.
3586 * 2. Old kernels (before v3.12) used to create an
3587 * orphan item for truncate indicating that there were possibly
3588 * extent items past i_size that needed to be deleted. In v3.12,
3589 * truncate was changed to update i_size in sync with the extent
3590 * items, but the (useless) orphan item was still created. Since
3591 * v4.18, we don't create the orphan item for truncate at all.
3593 * So, this item could mean that we need to do a truncate, but
3594 * only if this filesystem was last used on a pre-v3.12 kernel
3595 * and was not cleanly unmounted. The odds of that are quite
3596 * slim, and it's a pain to do the truncate now, so just delete
3599 * It's also possible that this orphan item was supposed to be
3600 * deleted but wasn't. The inode number may have been reused,
3601 * but either way, we can delete the orphan item.
3603 if (!inode || inode->i_nlink) {
3605 ret = btrfs_drop_verity_items(BTRFS_I(inode));
3611 trans = btrfs_start_transaction(root, 1);
3612 if (IS_ERR(trans)) {
3613 ret = PTR_ERR(trans);
3616 btrfs_debug(fs_info, "auto deleting %Lu",
3617 found_key.objectid);
3618 ret = btrfs_del_orphan_item(trans, root,
3619 found_key.objectid);
3620 btrfs_end_transaction(trans);
3628 /* this will do delete_inode and everything for us */
3631 /* release the path since we're done with it */
3632 btrfs_release_path(path);
3634 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3635 trans = btrfs_join_transaction(root);
3637 btrfs_end_transaction(trans);
3641 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3645 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3646 btrfs_free_path(path);
3651 * very simple check to peek ahead in the leaf looking for xattrs. If we
3652 * don't find any xattrs, we know there can't be any acls.
3654 * slot is the slot the inode is in, objectid is the objectid of the inode
3656 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3657 int slot, u64 objectid,
3658 int *first_xattr_slot)
3660 u32 nritems = btrfs_header_nritems(leaf);
3661 struct btrfs_key found_key;
3662 static u64 xattr_access = 0;
3663 static u64 xattr_default = 0;
3666 if (!xattr_access) {
3667 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3668 strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3669 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3670 strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3674 *first_xattr_slot = -1;
3675 while (slot < nritems) {
3676 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3678 /* we found a different objectid, there must not be acls */
3679 if (found_key.objectid != objectid)
3682 /* we found an xattr, assume we've got an acl */
3683 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3684 if (*first_xattr_slot == -1)
3685 *first_xattr_slot = slot;
3686 if (found_key.offset == xattr_access ||
3687 found_key.offset == xattr_default)
3692 * we found a key greater than an xattr key, there can't
3693 * be any acls later on
3695 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3702 * it goes inode, inode backrefs, xattrs, extents,
3703 * so if there are a ton of hard links to an inode there can
3704 * be a lot of backrefs. Don't waste time searching too hard,
3705 * this is just an optimization
3710 /* we hit the end of the leaf before we found an xattr or
3711 * something larger than an xattr. We have to assume the inode
3714 if (*first_xattr_slot == -1)
3715 *first_xattr_slot = slot;
3720 * read an inode from the btree into the in-memory inode
3722 static int btrfs_read_locked_inode(struct inode *inode,
3723 struct btrfs_path *in_path)
3725 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3726 struct btrfs_path *path = in_path;
3727 struct extent_buffer *leaf;
3728 struct btrfs_inode_item *inode_item;
3729 struct btrfs_root *root = BTRFS_I(inode)->root;
3730 struct btrfs_key location;
3735 bool filled = false;
3736 int first_xattr_slot;
3738 ret = btrfs_fill_inode(inode, &rdev);
3743 path = btrfs_alloc_path();
3748 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3750 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3752 if (path != in_path)
3753 btrfs_free_path(path);
3757 leaf = path->nodes[0];
3762 inode_item = btrfs_item_ptr(leaf, path->slots[0],
3763 struct btrfs_inode_item);
3764 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3765 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3766 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3767 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3768 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3769 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3770 round_up(i_size_read(inode), fs_info->sectorsize));
3772 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3773 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3775 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3776 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3778 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3779 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3781 BTRFS_I(inode)->i_otime.tv_sec =
3782 btrfs_timespec_sec(leaf, &inode_item->otime);
3783 BTRFS_I(inode)->i_otime.tv_nsec =
3784 btrfs_timespec_nsec(leaf, &inode_item->otime);
3786 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3787 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3788 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3790 inode_set_iversion_queried(inode,
3791 btrfs_inode_sequence(leaf, inode_item));
3792 inode->i_generation = BTRFS_I(inode)->generation;
3794 rdev = btrfs_inode_rdev(leaf, inode_item);
3796 BTRFS_I(inode)->index_cnt = (u64)-1;
3797 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
3798 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
3802 * If we were modified in the current generation and evicted from memory
3803 * and then re-read we need to do a full sync since we don't have any
3804 * idea about which extents were modified before we were evicted from
3807 * This is required for both inode re-read from disk and delayed inode
3808 * in delayed_nodes_tree.
3810 if (BTRFS_I(inode)->last_trans == fs_info->generation)
3811 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3812 &BTRFS_I(inode)->runtime_flags);
3815 * We don't persist the id of the transaction where an unlink operation
3816 * against the inode was last made. So here we assume the inode might
3817 * have been evicted, and therefore the exact value of last_unlink_trans
3818 * lost, and set it to last_trans to avoid metadata inconsistencies
3819 * between the inode and its parent if the inode is fsync'ed and the log
3820 * replayed. For example, in the scenario:
3823 * ln mydir/foo mydir/bar
3826 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3827 * xfs_io -c fsync mydir/foo
3829 * mount fs, triggers fsync log replay
3831 * We must make sure that when we fsync our inode foo we also log its
3832 * parent inode, otherwise after log replay the parent still has the
3833 * dentry with the "bar" name but our inode foo has a link count of 1
3834 * and doesn't have an inode ref with the name "bar" anymore.
3836 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3837 * but it guarantees correctness at the expense of occasional full
3838 * transaction commits on fsync if our inode is a directory, or if our
3839 * inode is not a directory, logging its parent unnecessarily.
3841 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3844 * Same logic as for last_unlink_trans. We don't persist the generation
3845 * of the last transaction where this inode was used for a reflink
3846 * operation, so after eviction and reloading the inode we must be
3847 * pessimistic and assume the last transaction that modified the inode.
3849 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
3852 if (inode->i_nlink != 1 ||
3853 path->slots[0] >= btrfs_header_nritems(leaf))
3856 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3857 if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3860 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3861 if (location.type == BTRFS_INODE_REF_KEY) {
3862 struct btrfs_inode_ref *ref;
3864 ref = (struct btrfs_inode_ref *)ptr;
3865 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3866 } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3867 struct btrfs_inode_extref *extref;
3869 extref = (struct btrfs_inode_extref *)ptr;
3870 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3875 * try to precache a NULL acl entry for files that don't have
3876 * any xattrs or acls
3878 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3879 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3880 if (first_xattr_slot != -1) {
3881 path->slots[0] = first_xattr_slot;
3882 ret = btrfs_load_inode_props(inode, path);
3885 "error loading props for ino %llu (root %llu): %d",
3886 btrfs_ino(BTRFS_I(inode)),
3887 root->root_key.objectid, ret);
3889 if (path != in_path)
3890 btrfs_free_path(path);
3893 cache_no_acl(inode);
3895 switch (inode->i_mode & S_IFMT) {
3897 inode->i_mapping->a_ops = &btrfs_aops;
3898 inode->i_fop = &btrfs_file_operations;
3899 inode->i_op = &btrfs_file_inode_operations;
3902 inode->i_fop = &btrfs_dir_file_operations;
3903 inode->i_op = &btrfs_dir_inode_operations;
3906 inode->i_op = &btrfs_symlink_inode_operations;
3907 inode_nohighmem(inode);
3908 inode->i_mapping->a_ops = &btrfs_aops;
3911 inode->i_op = &btrfs_special_inode_operations;
3912 init_special_inode(inode, inode->i_mode, rdev);
3916 btrfs_sync_inode_flags_to_i_flags(inode);
3921 * given a leaf and an inode, copy the inode fields into the leaf
3923 static void fill_inode_item(struct btrfs_trans_handle *trans,
3924 struct extent_buffer *leaf,
3925 struct btrfs_inode_item *item,
3926 struct inode *inode)
3928 struct btrfs_map_token token;
3931 btrfs_init_map_token(&token, leaf);
3933 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3934 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3935 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
3936 btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3937 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3939 btrfs_set_token_timespec_sec(&token, &item->atime,
3940 inode->i_atime.tv_sec);
3941 btrfs_set_token_timespec_nsec(&token, &item->atime,
3942 inode->i_atime.tv_nsec);
3944 btrfs_set_token_timespec_sec(&token, &item->mtime,
3945 inode->i_mtime.tv_sec);
3946 btrfs_set_token_timespec_nsec(&token, &item->mtime,
3947 inode->i_mtime.tv_nsec);
3949 btrfs_set_token_timespec_sec(&token, &item->ctime,
3950 inode->i_ctime.tv_sec);
3951 btrfs_set_token_timespec_nsec(&token, &item->ctime,
3952 inode->i_ctime.tv_nsec);
3954 btrfs_set_token_timespec_sec(&token, &item->otime,
3955 BTRFS_I(inode)->i_otime.tv_sec);
3956 btrfs_set_token_timespec_nsec(&token, &item->otime,
3957 BTRFS_I(inode)->i_otime.tv_nsec);
3959 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
3960 btrfs_set_token_inode_generation(&token, item,
3961 BTRFS_I(inode)->generation);
3962 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
3963 btrfs_set_token_inode_transid(&token, item, trans->transid);
3964 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
3965 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
3966 BTRFS_I(inode)->ro_flags);
3967 btrfs_set_token_inode_flags(&token, item, flags);
3968 btrfs_set_token_inode_block_group(&token, item, 0);
3972 * copy everything in the in-memory inode into the btree.
3974 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3975 struct btrfs_root *root,
3976 struct btrfs_inode *inode)
3978 struct btrfs_inode_item *inode_item;
3979 struct btrfs_path *path;
3980 struct extent_buffer *leaf;
3983 path = btrfs_alloc_path();
3987 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
3994 leaf = path->nodes[0];
3995 inode_item = btrfs_item_ptr(leaf, path->slots[0],
3996 struct btrfs_inode_item);
3998 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
3999 btrfs_mark_buffer_dirty(leaf);
4000 btrfs_set_inode_last_trans(trans, inode);
4003 btrfs_free_path(path);
4008 * copy everything in the in-memory inode into the btree.
4010 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
4011 struct btrfs_root *root,
4012 struct btrfs_inode *inode)
4014 struct btrfs_fs_info *fs_info = root->fs_info;
4018 * If the inode is a free space inode, we can deadlock during commit
4019 * if we put it into the delayed code.
4021 * The data relocation inode should also be directly updated
4024 if (!btrfs_is_free_space_inode(inode)
4025 && !btrfs_is_data_reloc_root(root)
4026 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4027 btrfs_update_root_times(trans, root);
4029 ret = btrfs_delayed_update_inode(trans, root, inode);
4031 btrfs_set_inode_last_trans(trans, inode);
4035 return btrfs_update_inode_item(trans, root, inode);
4038 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4039 struct btrfs_root *root, struct btrfs_inode *inode)
4043 ret = btrfs_update_inode(trans, root, inode);
4045 return btrfs_update_inode_item(trans, root, inode);
4050 * unlink helper that gets used here in inode.c and in the tree logging
4051 * recovery code. It remove a link in a directory with a given name, and
4052 * also drops the back refs in the inode to the directory
4054 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4055 struct btrfs_inode *dir,
4056 struct btrfs_inode *inode,
4057 const struct fscrypt_str *name,
4058 struct btrfs_rename_ctx *rename_ctx)
4060 struct btrfs_root *root = dir->root;
4061 struct btrfs_fs_info *fs_info = root->fs_info;
4062 struct btrfs_path *path;
4064 struct btrfs_dir_item *di;
4066 u64 ino = btrfs_ino(inode);
4067 u64 dir_ino = btrfs_ino(dir);
4069 path = btrfs_alloc_path();
4075 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4076 if (IS_ERR_OR_NULL(di)) {
4077 ret = di ? PTR_ERR(di) : -ENOENT;
4080 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4083 btrfs_release_path(path);
4086 * If we don't have dir index, we have to get it by looking up
4087 * the inode ref, since we get the inode ref, remove it directly,
4088 * it is unnecessary to do delayed deletion.
4090 * But if we have dir index, needn't search inode ref to get it.
4091 * Since the inode ref is close to the inode item, it is better
4092 * that we delay to delete it, and just do this deletion when
4093 * we update the inode item.
4095 if (inode->dir_index) {
4096 ret = btrfs_delayed_delete_inode_ref(inode);
4098 index = inode->dir_index;
4103 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4106 "failed to delete reference to %.*s, inode %llu parent %llu",
4107 name->len, name->name, ino, dir_ino);
4108 btrfs_abort_transaction(trans, ret);
4113 rename_ctx->index = index;
4115 ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4117 btrfs_abort_transaction(trans, ret);
4122 * If we are in a rename context, we don't need to update anything in the
4123 * log. That will be done later during the rename by btrfs_log_new_name().
4124 * Besides that, doing it here would only cause extra unnecessary btree
4125 * operations on the log tree, increasing latency for applications.
4128 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
4129 btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
4133 * If we have a pending delayed iput we could end up with the final iput
4134 * being run in btrfs-cleaner context. If we have enough of these built
4135 * up we can end up burning a lot of time in btrfs-cleaner without any
4136 * way to throttle the unlinks. Since we're currently holding a ref on
4137 * the inode we can run the delayed iput here without any issues as the
4138 * final iput won't be done until after we drop the ref we're currently
4141 btrfs_run_delayed_iput(fs_info, inode);
4143 btrfs_free_path(path);
4147 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4148 inode_inc_iversion(&inode->vfs_inode);
4149 inode_inc_iversion(&dir->vfs_inode);
4150 inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4151 dir->vfs_inode.i_mtime = inode->vfs_inode.i_ctime;
4152 dir->vfs_inode.i_ctime = inode->vfs_inode.i_ctime;
4153 ret = btrfs_update_inode(trans, root, dir);
4158 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4159 struct btrfs_inode *dir, struct btrfs_inode *inode,
4160 const struct fscrypt_str *name)
4164 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4166 drop_nlink(&inode->vfs_inode);
4167 ret = btrfs_update_inode(trans, inode->root, inode);
4173 * helper to start transaction for unlink and rmdir.
4175 * unlink and rmdir are special in btrfs, they do not always free space, so
4176 * if we cannot make our reservations the normal way try and see if there is
4177 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4178 * allow the unlink to occur.
4180 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4182 struct btrfs_root *root = dir->root;
4184 return btrfs_start_transaction_fallback_global_rsv(root,
4185 BTRFS_UNLINK_METADATA_UNITS);
4188 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4190 struct btrfs_trans_handle *trans;
4191 struct inode *inode = d_inode(dentry);
4193 struct fscrypt_name fname;
4195 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4199 /* This needs to handle no-key deletions later on */
4201 trans = __unlink_start_trans(BTRFS_I(dir));
4202 if (IS_ERR(trans)) {
4203 ret = PTR_ERR(trans);
4207 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4210 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4215 if (inode->i_nlink == 0) {
4216 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4222 btrfs_end_transaction(trans);
4223 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4225 fscrypt_free_filename(&fname);
4229 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4230 struct btrfs_inode *dir, struct dentry *dentry)
4232 struct btrfs_root *root = dir->root;
4233 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4234 struct btrfs_path *path;
4235 struct extent_buffer *leaf;
4236 struct btrfs_dir_item *di;
4237 struct btrfs_key key;
4241 u64 dir_ino = btrfs_ino(dir);
4242 struct fscrypt_name fname;
4244 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4248 /* This needs to handle no-key deletions later on */
4250 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4251 objectid = inode->root->root_key.objectid;
4252 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4253 objectid = inode->location.objectid;
4256 fscrypt_free_filename(&fname);
4260 path = btrfs_alloc_path();
4266 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4267 &fname.disk_name, -1);
4268 if (IS_ERR_OR_NULL(di)) {
4269 ret = di ? PTR_ERR(di) : -ENOENT;
4273 leaf = path->nodes[0];
4274 btrfs_dir_item_key_to_cpu(leaf, di, &key);
4275 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4276 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4278 btrfs_abort_transaction(trans, ret);
4281 btrfs_release_path(path);
4284 * This is a placeholder inode for a subvolume we didn't have a
4285 * reference to at the time of the snapshot creation. In the meantime
4286 * we could have renamed the real subvol link into our snapshot, so
4287 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4288 * Instead simply lookup the dir_index_item for this entry so we can
4289 * remove it. Otherwise we know we have a ref to the root and we can
4290 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4292 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4293 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4294 if (IS_ERR_OR_NULL(di)) {
4299 btrfs_abort_transaction(trans, ret);
4303 leaf = path->nodes[0];
4304 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4306 btrfs_release_path(path);
4308 ret = btrfs_del_root_ref(trans, objectid,
4309 root->root_key.objectid, dir_ino,
4310 &index, &fname.disk_name);
4312 btrfs_abort_transaction(trans, ret);
4317 ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4319 btrfs_abort_transaction(trans, ret);
4323 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4324 inode_inc_iversion(&dir->vfs_inode);
4325 dir->vfs_inode.i_mtime = current_time(&dir->vfs_inode);
4326 dir->vfs_inode.i_ctime = dir->vfs_inode.i_mtime;
4327 ret = btrfs_update_inode_fallback(trans, root, dir);
4329 btrfs_abort_transaction(trans, ret);
4331 btrfs_free_path(path);
4332 fscrypt_free_filename(&fname);
4337 * Helper to check if the subvolume references other subvolumes or if it's
4340 static noinline int may_destroy_subvol(struct btrfs_root *root)
4342 struct btrfs_fs_info *fs_info = root->fs_info;
4343 struct btrfs_path *path;
4344 struct btrfs_dir_item *di;
4345 struct btrfs_key key;
4346 struct fscrypt_str name = FSTR_INIT("default", 7);
4350 path = btrfs_alloc_path();
4354 /* Make sure this root isn't set as the default subvol */
4355 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4356 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4358 if (di && !IS_ERR(di)) {
4359 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4360 if (key.objectid == root->root_key.objectid) {
4363 "deleting default subvolume %llu is not allowed",
4367 btrfs_release_path(path);
4370 key.objectid = root->root_key.objectid;
4371 key.type = BTRFS_ROOT_REF_KEY;
4372 key.offset = (u64)-1;
4374 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4380 if (path->slots[0] > 0) {
4382 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4383 if (key.objectid == root->root_key.objectid &&
4384 key.type == BTRFS_ROOT_REF_KEY)
4388 btrfs_free_path(path);
4392 /* Delete all dentries for inodes belonging to the root */
4393 static void btrfs_prune_dentries(struct btrfs_root *root)
4395 struct btrfs_fs_info *fs_info = root->fs_info;
4396 struct rb_node *node;
4397 struct rb_node *prev;
4398 struct btrfs_inode *entry;
4399 struct inode *inode;
4402 if (!BTRFS_FS_ERROR(fs_info))
4403 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4405 spin_lock(&root->inode_lock);
4407 node = root->inode_tree.rb_node;
4411 entry = rb_entry(node, struct btrfs_inode, rb_node);
4413 if (objectid < btrfs_ino(entry))
4414 node = node->rb_left;
4415 else if (objectid > btrfs_ino(entry))
4416 node = node->rb_right;
4422 entry = rb_entry(prev, struct btrfs_inode, rb_node);
4423 if (objectid <= btrfs_ino(entry)) {
4427 prev = rb_next(prev);
4431 entry = rb_entry(node, struct btrfs_inode, rb_node);
4432 objectid = btrfs_ino(entry) + 1;
4433 inode = igrab(&entry->vfs_inode);
4435 spin_unlock(&root->inode_lock);
4436 if (atomic_read(&inode->i_count) > 1)
4437 d_prune_aliases(inode);
4439 * btrfs_drop_inode will have it removed from the inode
4440 * cache when its usage count hits zero.
4444 spin_lock(&root->inode_lock);
4448 if (cond_resched_lock(&root->inode_lock))
4451 node = rb_next(node);
4453 spin_unlock(&root->inode_lock);
4456 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4458 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4459 struct btrfs_root *root = dir->root;
4460 struct inode *inode = d_inode(dentry);
4461 struct btrfs_root *dest = BTRFS_I(inode)->root;
4462 struct btrfs_trans_handle *trans;
4463 struct btrfs_block_rsv block_rsv;
4468 * Don't allow to delete a subvolume with send in progress. This is
4469 * inside the inode lock so the error handling that has to drop the bit
4470 * again is not run concurrently.
4472 spin_lock(&dest->root_item_lock);
4473 if (dest->send_in_progress) {
4474 spin_unlock(&dest->root_item_lock);
4476 "attempt to delete subvolume %llu during send",
4477 dest->root_key.objectid);
4480 if (atomic_read(&dest->nr_swapfiles)) {
4481 spin_unlock(&dest->root_item_lock);
4483 "attempt to delete subvolume %llu with active swapfile",
4484 root->root_key.objectid);
4487 root_flags = btrfs_root_flags(&dest->root_item);
4488 btrfs_set_root_flags(&dest->root_item,
4489 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4490 spin_unlock(&dest->root_item_lock);
4492 down_write(&fs_info->subvol_sem);
4494 ret = may_destroy_subvol(dest);
4498 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4500 * One for dir inode,
4501 * two for dir entries,
4502 * two for root ref/backref.
4504 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4508 trans = btrfs_start_transaction(root, 0);
4509 if (IS_ERR(trans)) {
4510 ret = PTR_ERR(trans);
4513 trans->block_rsv = &block_rsv;
4514 trans->bytes_reserved = block_rsv.size;
4516 btrfs_record_snapshot_destroy(trans, dir);
4518 ret = btrfs_unlink_subvol(trans, dir, dentry);
4520 btrfs_abort_transaction(trans, ret);
4524 ret = btrfs_record_root_in_trans(trans, dest);
4526 btrfs_abort_transaction(trans, ret);
4530 memset(&dest->root_item.drop_progress, 0,
4531 sizeof(dest->root_item.drop_progress));
4532 btrfs_set_root_drop_level(&dest->root_item, 0);
4533 btrfs_set_root_refs(&dest->root_item, 0);
4535 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4536 ret = btrfs_insert_orphan_item(trans,
4538 dest->root_key.objectid);
4540 btrfs_abort_transaction(trans, ret);
4545 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4546 BTRFS_UUID_KEY_SUBVOL,
4547 dest->root_key.objectid);
4548 if (ret && ret != -ENOENT) {
4549 btrfs_abort_transaction(trans, ret);
4552 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4553 ret = btrfs_uuid_tree_remove(trans,
4554 dest->root_item.received_uuid,
4555 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4556 dest->root_key.objectid);
4557 if (ret && ret != -ENOENT) {
4558 btrfs_abort_transaction(trans, ret);
4563 free_anon_bdev(dest->anon_dev);
4566 trans->block_rsv = NULL;
4567 trans->bytes_reserved = 0;
4568 ret = btrfs_end_transaction(trans);
4569 inode->i_flags |= S_DEAD;
4571 btrfs_subvolume_release_metadata(root, &block_rsv);
4573 up_write(&fs_info->subvol_sem);
4575 spin_lock(&dest->root_item_lock);
4576 root_flags = btrfs_root_flags(&dest->root_item);
4577 btrfs_set_root_flags(&dest->root_item,
4578 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4579 spin_unlock(&dest->root_item_lock);
4581 d_invalidate(dentry);
4582 btrfs_prune_dentries(dest);
4583 ASSERT(dest->send_in_progress == 0);
4589 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4591 struct inode *inode = d_inode(dentry);
4592 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4594 struct btrfs_trans_handle *trans;
4595 u64 last_unlink_trans;
4596 struct fscrypt_name fname;
4598 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4600 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4601 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4603 "extent tree v2 doesn't support snapshot deletion yet");
4606 return btrfs_delete_subvolume(BTRFS_I(dir), dentry);
4609 err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4613 /* This needs to handle no-key deletions later on */
4615 trans = __unlink_start_trans(BTRFS_I(dir));
4616 if (IS_ERR(trans)) {
4617 err = PTR_ERR(trans);
4621 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4622 err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
4626 err = btrfs_orphan_add(trans, BTRFS_I(inode));
4630 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4632 /* now the directory is empty */
4633 err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4636 btrfs_i_size_write(BTRFS_I(inode), 0);
4638 * Propagate the last_unlink_trans value of the deleted dir to
4639 * its parent directory. This is to prevent an unrecoverable
4640 * log tree in the case we do something like this:
4642 * 2) create snapshot under dir foo
4643 * 3) delete the snapshot
4646 * 6) fsync foo or some file inside foo
4648 if (last_unlink_trans >= trans->transid)
4649 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4652 btrfs_end_transaction(trans);
4654 btrfs_btree_balance_dirty(fs_info);
4655 fscrypt_free_filename(&fname);
4661 * btrfs_truncate_block - read, zero a chunk and write a block
4662 * @inode - inode that we're zeroing
4663 * @from - the offset to start zeroing
4664 * @len - the length to zero, 0 to zero the entire range respective to the
4666 * @front - zero up to the offset instead of from the offset on
4668 * This will find the block for the "from" offset and cow the block and zero the
4669 * part we want to zero. This is used with truncate and hole punching.
4671 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4674 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4675 struct address_space *mapping = inode->vfs_inode.i_mapping;
4676 struct extent_io_tree *io_tree = &inode->io_tree;
4677 struct btrfs_ordered_extent *ordered;
4678 struct extent_state *cached_state = NULL;
4679 struct extent_changeset *data_reserved = NULL;
4680 bool only_release_metadata = false;
4681 u32 blocksize = fs_info->sectorsize;
4682 pgoff_t index = from >> PAGE_SHIFT;
4683 unsigned offset = from & (blocksize - 1);
4685 gfp_t mask = btrfs_alloc_write_mask(mapping);
4686 size_t write_bytes = blocksize;
4691 if (IS_ALIGNED(offset, blocksize) &&
4692 (!len || IS_ALIGNED(len, blocksize)))
4695 block_start = round_down(from, blocksize);
4696 block_end = block_start + blocksize - 1;
4698 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4701 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
4702 /* For nocow case, no need to reserve data space */
4703 only_release_metadata = true;
4708 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4710 if (!only_release_metadata)
4711 btrfs_free_reserved_data_space(inode, data_reserved,
4712 block_start, blocksize);
4716 page = find_or_create_page(mapping, index, mask);
4718 btrfs_delalloc_release_space(inode, data_reserved, block_start,
4720 btrfs_delalloc_release_extents(inode, blocksize);
4725 if (!PageUptodate(page)) {
4726 ret = btrfs_read_folio(NULL, page_folio(page));
4728 if (page->mapping != mapping) {
4733 if (!PageUptodate(page)) {
4740 * We unlock the page after the io is completed and then re-lock it
4741 * above. release_folio() could have come in between that and cleared
4742 * PagePrivate(), but left the page in the mapping. Set the page mapped
4743 * here to make sure it's properly set for the subpage stuff.
4745 ret = set_page_extent_mapped(page);
4749 wait_on_page_writeback(page);
4751 lock_extent(io_tree, block_start, block_end, &cached_state);
4753 ordered = btrfs_lookup_ordered_extent(inode, block_start);
4755 unlock_extent(io_tree, block_start, block_end, &cached_state);
4758 btrfs_start_ordered_extent(ordered);
4759 btrfs_put_ordered_extent(ordered);
4763 clear_extent_bit(&inode->io_tree, block_start, block_end,
4764 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4767 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4770 unlock_extent(io_tree, block_start, block_end, &cached_state);
4774 if (offset != blocksize) {
4776 len = blocksize - offset;
4778 memzero_page(page, (block_start - page_offset(page)),
4781 memzero_page(page, (block_start - page_offset(page)) + offset,
4784 btrfs_page_clear_checked(fs_info, page, block_start,
4785 block_end + 1 - block_start);
4786 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
4787 unlock_extent(io_tree, block_start, block_end, &cached_state);
4789 if (only_release_metadata)
4790 set_extent_bit(&inode->io_tree, block_start, block_end,
4791 EXTENT_NORESERVE, NULL);
4795 if (only_release_metadata)
4796 btrfs_delalloc_release_metadata(inode, blocksize, true);
4798 btrfs_delalloc_release_space(inode, data_reserved,
4799 block_start, blocksize, true);
4801 btrfs_delalloc_release_extents(inode, blocksize);
4805 if (only_release_metadata)
4806 btrfs_check_nocow_unlock(inode);
4807 extent_changeset_free(data_reserved);
4811 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
4812 u64 offset, u64 len)
4814 struct btrfs_fs_info *fs_info = root->fs_info;
4815 struct btrfs_trans_handle *trans;
4816 struct btrfs_drop_extents_args drop_args = { 0 };
4820 * If NO_HOLES is enabled, we don't need to do anything.
4821 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4822 * or btrfs_update_inode() will be called, which guarantee that the next
4823 * fsync will know this inode was changed and needs to be logged.
4825 if (btrfs_fs_incompat(fs_info, NO_HOLES))
4829 * 1 - for the one we're dropping
4830 * 1 - for the one we're adding
4831 * 1 - for updating the inode.
4833 trans = btrfs_start_transaction(root, 3);
4835 return PTR_ERR(trans);
4837 drop_args.start = offset;
4838 drop_args.end = offset + len;
4839 drop_args.drop_cache = true;
4841 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
4843 btrfs_abort_transaction(trans, ret);
4844 btrfs_end_transaction(trans);
4848 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
4850 btrfs_abort_transaction(trans, ret);
4852 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
4853 btrfs_update_inode(trans, root, inode);
4855 btrfs_end_transaction(trans);
4860 * This function puts in dummy file extents for the area we're creating a hole
4861 * for. So if we are truncating this file to a larger size we need to insert
4862 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4863 * the range between oldsize and size
4865 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
4867 struct btrfs_root *root = inode->root;
4868 struct btrfs_fs_info *fs_info = root->fs_info;
4869 struct extent_io_tree *io_tree = &inode->io_tree;
4870 struct extent_map *em = NULL;
4871 struct extent_state *cached_state = NULL;
4872 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4873 u64 block_end = ALIGN(size, fs_info->sectorsize);
4880 * If our size started in the middle of a block we need to zero out the
4881 * rest of the block before we expand the i_size, otherwise we could
4882 * expose stale data.
4884 err = btrfs_truncate_block(inode, oldsize, 0, 0);
4888 if (size <= hole_start)
4891 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
4893 cur_offset = hole_start;
4895 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4896 block_end - cur_offset);
4902 last_byte = min(extent_map_end(em), block_end);
4903 last_byte = ALIGN(last_byte, fs_info->sectorsize);
4904 hole_size = last_byte - cur_offset;
4906 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4907 struct extent_map *hole_em;
4909 err = maybe_insert_hole(root, inode, cur_offset,
4914 err = btrfs_inode_set_file_extent_range(inode,
4915 cur_offset, hole_size);
4919 hole_em = alloc_extent_map();
4921 btrfs_drop_extent_map_range(inode, cur_offset,
4922 cur_offset + hole_size - 1,
4924 btrfs_set_inode_full_sync(inode);
4927 hole_em->start = cur_offset;
4928 hole_em->len = hole_size;
4929 hole_em->orig_start = cur_offset;
4931 hole_em->block_start = EXTENT_MAP_HOLE;
4932 hole_em->block_len = 0;
4933 hole_em->orig_block_len = 0;
4934 hole_em->ram_bytes = hole_size;
4935 hole_em->compress_type = BTRFS_COMPRESS_NONE;
4936 hole_em->generation = fs_info->generation;
4938 err = btrfs_replace_extent_map_range(inode, hole_em, true);
4939 free_extent_map(hole_em);
4941 err = btrfs_inode_set_file_extent_range(inode,
4942 cur_offset, hole_size);
4947 free_extent_map(em);
4949 cur_offset = last_byte;
4950 if (cur_offset >= block_end)
4953 free_extent_map(em);
4954 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
4958 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4960 struct btrfs_root *root = BTRFS_I(inode)->root;
4961 struct btrfs_trans_handle *trans;
4962 loff_t oldsize = i_size_read(inode);
4963 loff_t newsize = attr->ia_size;
4964 int mask = attr->ia_valid;
4968 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4969 * special case where we need to update the times despite not having
4970 * these flags set. For all other operations the VFS set these flags
4971 * explicitly if it wants a timestamp update.
4973 if (newsize != oldsize) {
4974 inode_inc_iversion(inode);
4975 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
4976 inode->i_mtime = current_time(inode);
4977 inode->i_ctime = inode->i_mtime;
4981 if (newsize > oldsize) {
4983 * Don't do an expanding truncate while snapshotting is ongoing.
4984 * This is to ensure the snapshot captures a fully consistent
4985 * state of this file - if the snapshot captures this expanding
4986 * truncation, it must capture all writes that happened before
4989 btrfs_drew_write_lock(&root->snapshot_lock);
4990 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
4992 btrfs_drew_write_unlock(&root->snapshot_lock);
4996 trans = btrfs_start_transaction(root, 1);
4997 if (IS_ERR(trans)) {
4998 btrfs_drew_write_unlock(&root->snapshot_lock);
4999 return PTR_ERR(trans);
5002 i_size_write(inode, newsize);
5003 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5004 pagecache_isize_extended(inode, oldsize, newsize);
5005 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
5006 btrfs_drew_write_unlock(&root->snapshot_lock);
5007 btrfs_end_transaction(trans);
5009 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5011 if (btrfs_is_zoned(fs_info)) {
5012 ret = btrfs_wait_ordered_range(inode,
5013 ALIGN(newsize, fs_info->sectorsize),
5020 * We're truncating a file that used to have good data down to
5021 * zero. Make sure any new writes to the file get on disk
5025 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5026 &BTRFS_I(inode)->runtime_flags);
5028 truncate_setsize(inode, newsize);
5030 inode_dio_wait(inode);
5032 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5033 if (ret && inode->i_nlink) {
5037 * Truncate failed, so fix up the in-memory size. We
5038 * adjusted disk_i_size down as we removed extents, so
5039 * wait for disk_i_size to be stable and then update the
5040 * in-memory size to match.
5042 err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5045 i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5052 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5055 struct inode *inode = d_inode(dentry);
5056 struct btrfs_root *root = BTRFS_I(inode)->root;
5059 if (btrfs_root_readonly(root))
5062 err = setattr_prepare(idmap, dentry, attr);
5066 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5067 err = btrfs_setsize(inode, attr);
5072 if (attr->ia_valid) {
5073 setattr_copy(idmap, inode, attr);
5074 inode_inc_iversion(inode);
5075 err = btrfs_dirty_inode(BTRFS_I(inode));
5077 if (!err && attr->ia_valid & ATTR_MODE)
5078 err = posix_acl_chmod(idmap, dentry, inode->i_mode);
5085 * While truncating the inode pages during eviction, we get the VFS
5086 * calling btrfs_invalidate_folio() against each folio of the inode. This
5087 * is slow because the calls to btrfs_invalidate_folio() result in a
5088 * huge amount of calls to lock_extent() and clear_extent_bit(),
5089 * which keep merging and splitting extent_state structures over and over,
5090 * wasting lots of time.
5092 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5093 * skip all those expensive operations on a per folio basis and do only
5094 * the ordered io finishing, while we release here the extent_map and
5095 * extent_state structures, without the excessive merging and splitting.
5097 static void evict_inode_truncate_pages(struct inode *inode)
5099 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5100 struct rb_node *node;
5102 ASSERT(inode->i_state & I_FREEING);
5103 truncate_inode_pages_final(&inode->i_data);
5105 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5108 * Keep looping until we have no more ranges in the io tree.
5109 * We can have ongoing bios started by readahead that have
5110 * their endio callback (extent_io.c:end_bio_extent_readpage)
5111 * still in progress (unlocked the pages in the bio but did not yet
5112 * unlocked the ranges in the io tree). Therefore this means some
5113 * ranges can still be locked and eviction started because before
5114 * submitting those bios, which are executed by a separate task (work
5115 * queue kthread), inode references (inode->i_count) were not taken
5116 * (which would be dropped in the end io callback of each bio).
5117 * Therefore here we effectively end up waiting for those bios and
5118 * anyone else holding locked ranges without having bumped the inode's
5119 * reference count - if we don't do it, when they access the inode's
5120 * io_tree to unlock a range it may be too late, leading to an
5121 * use-after-free issue.
5123 spin_lock(&io_tree->lock);
5124 while (!RB_EMPTY_ROOT(&io_tree->state)) {
5125 struct extent_state *state;
5126 struct extent_state *cached_state = NULL;
5129 unsigned state_flags;
5131 node = rb_first(&io_tree->state);
5132 state = rb_entry(node, struct extent_state, rb_node);
5133 start = state->start;
5135 state_flags = state->state;
5136 spin_unlock(&io_tree->lock);
5138 lock_extent(io_tree, start, end, &cached_state);
5141 * If still has DELALLOC flag, the extent didn't reach disk,
5142 * and its reserved space won't be freed by delayed_ref.
5143 * So we need to free its reserved space here.
5144 * (Refer to comment in btrfs_invalidate_folio, case 2)
5146 * Note, end is the bytenr of last byte, so we need + 1 here.
5148 if (state_flags & EXTENT_DELALLOC)
5149 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5152 clear_extent_bit(io_tree, start, end,
5153 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5157 spin_lock(&io_tree->lock);
5159 spin_unlock(&io_tree->lock);
5162 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5163 struct btrfs_block_rsv *rsv)
5165 struct btrfs_fs_info *fs_info = root->fs_info;
5166 struct btrfs_trans_handle *trans;
5167 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5171 * Eviction should be taking place at some place safe because of our
5172 * delayed iputs. However the normal flushing code will run delayed
5173 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5175 * We reserve the delayed_refs_extra here again because we can't use
5176 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5177 * above. We reserve our extra bit here because we generate a ton of
5178 * delayed refs activity by truncating.
5180 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5181 * if we fail to make this reservation we can re-try without the
5182 * delayed_refs_extra so we can make some forward progress.
5184 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5185 BTRFS_RESERVE_FLUSH_EVICT);
5187 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5188 BTRFS_RESERVE_FLUSH_EVICT);
5191 "could not allocate space for delete; will truncate on mount");
5192 return ERR_PTR(-ENOSPC);
5194 delayed_refs_extra = 0;
5197 trans = btrfs_join_transaction(root);
5201 if (delayed_refs_extra) {
5202 trans->block_rsv = &fs_info->trans_block_rsv;
5203 trans->bytes_reserved = delayed_refs_extra;
5204 btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5205 delayed_refs_extra, true);
5210 void btrfs_evict_inode(struct inode *inode)
5212 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5213 struct btrfs_trans_handle *trans;
5214 struct btrfs_root *root = BTRFS_I(inode)->root;
5215 struct btrfs_block_rsv *rsv = NULL;
5218 trace_btrfs_inode_evict(inode);
5221 fsverity_cleanup_inode(inode);
5226 evict_inode_truncate_pages(inode);
5228 if (inode->i_nlink &&
5229 ((btrfs_root_refs(&root->root_item) != 0 &&
5230 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5231 btrfs_is_free_space_inode(BTRFS_I(inode))))
5234 if (is_bad_inode(inode))
5237 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5240 if (inode->i_nlink > 0) {
5241 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5242 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5247 * This makes sure the inode item in tree is uptodate and the space for
5248 * the inode update is released.
5250 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5255 * This drops any pending insert or delete operations we have for this
5256 * inode. We could have a delayed dir index deletion queued up, but
5257 * we're removing the inode completely so that'll be taken care of in
5260 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5262 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5265 rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5266 rsv->failfast = true;
5268 btrfs_i_size_write(BTRFS_I(inode), 0);
5271 struct btrfs_truncate_control control = {
5272 .inode = BTRFS_I(inode),
5273 .ino = btrfs_ino(BTRFS_I(inode)),
5278 trans = evict_refill_and_join(root, rsv);
5282 trans->block_rsv = rsv;
5284 ret = btrfs_truncate_inode_items(trans, root, &control);
5285 trans->block_rsv = &fs_info->trans_block_rsv;
5286 btrfs_end_transaction(trans);
5288 * We have not added new delayed items for our inode after we
5289 * have flushed its delayed items, so no need to throttle on
5290 * delayed items. However we have modified extent buffers.
5292 btrfs_btree_balance_dirty_nodelay(fs_info);
5293 if (ret && ret != -ENOSPC && ret != -EAGAIN)
5300 * Errors here aren't a big deal, it just means we leave orphan items in
5301 * the tree. They will be cleaned up on the next mount. If the inode
5302 * number gets reused, cleanup deletes the orphan item without doing
5303 * anything, and unlink reuses the existing orphan item.
5305 * If it turns out that we are dropping too many of these, we might want
5306 * to add a mechanism for retrying these after a commit.
5308 trans = evict_refill_and_join(root, rsv);
5309 if (!IS_ERR(trans)) {
5310 trans->block_rsv = rsv;
5311 btrfs_orphan_del(trans, BTRFS_I(inode));
5312 trans->block_rsv = &fs_info->trans_block_rsv;
5313 btrfs_end_transaction(trans);
5317 btrfs_free_block_rsv(fs_info, rsv);
5319 * If we didn't successfully delete, the orphan item will still be in
5320 * the tree and we'll retry on the next mount. Again, we might also want
5321 * to retry these periodically in the future.
5323 btrfs_remove_delayed_node(BTRFS_I(inode));
5324 fsverity_cleanup_inode(inode);
5329 * Return the key found in the dir entry in the location pointer, fill @type
5330 * with BTRFS_FT_*, and return 0.
5332 * If no dir entries were found, returns -ENOENT.
5333 * If found a corrupted location in dir entry, returns -EUCLEAN.
5335 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5336 struct btrfs_key *location, u8 *type)
5338 struct btrfs_dir_item *di;
5339 struct btrfs_path *path;
5340 struct btrfs_root *root = dir->root;
5342 struct fscrypt_name fname;
5344 path = btrfs_alloc_path();
5348 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5352 * fscrypt_setup_filename() should never return a positive value, but
5353 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5357 /* This needs to handle no-key deletions later on */
5359 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5360 &fname.disk_name, 0);
5361 if (IS_ERR_OR_NULL(di)) {
5362 ret = di ? PTR_ERR(di) : -ENOENT;
5366 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5367 if (location->type != BTRFS_INODE_ITEM_KEY &&
5368 location->type != BTRFS_ROOT_ITEM_KEY) {
5370 btrfs_warn(root->fs_info,
5371 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5372 __func__, fname.disk_name.name, btrfs_ino(dir),
5373 location->objectid, location->type, location->offset);
5376 *type = btrfs_dir_ftype(path->nodes[0], di);
5378 fscrypt_free_filename(&fname);
5379 btrfs_free_path(path);
5384 * when we hit a tree root in a directory, the btrfs part of the inode
5385 * needs to be changed to reflect the root directory of the tree root. This
5386 * is kind of like crossing a mount point.
5388 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5389 struct btrfs_inode *dir,
5390 struct dentry *dentry,
5391 struct btrfs_key *location,
5392 struct btrfs_root **sub_root)
5394 struct btrfs_path *path;
5395 struct btrfs_root *new_root;
5396 struct btrfs_root_ref *ref;
5397 struct extent_buffer *leaf;
5398 struct btrfs_key key;
5401 struct fscrypt_name fname;
5403 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5407 path = btrfs_alloc_path();
5414 key.objectid = dir->root->root_key.objectid;
5415 key.type = BTRFS_ROOT_REF_KEY;
5416 key.offset = location->objectid;
5418 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5425 leaf = path->nodes[0];
5426 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5427 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5428 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5431 ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5432 (unsigned long)(ref + 1), fname.disk_name.len);
5436 btrfs_release_path(path);
5438 new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5439 if (IS_ERR(new_root)) {
5440 err = PTR_ERR(new_root);
5444 *sub_root = new_root;
5445 location->objectid = btrfs_root_dirid(&new_root->root_item);
5446 location->type = BTRFS_INODE_ITEM_KEY;
5447 location->offset = 0;
5450 btrfs_free_path(path);
5451 fscrypt_free_filename(&fname);
5455 static void inode_tree_add(struct btrfs_inode *inode)
5457 struct btrfs_root *root = inode->root;
5458 struct btrfs_inode *entry;
5460 struct rb_node *parent;
5461 struct rb_node *new = &inode->rb_node;
5462 u64 ino = btrfs_ino(inode);
5464 if (inode_unhashed(&inode->vfs_inode))
5467 spin_lock(&root->inode_lock);
5468 p = &root->inode_tree.rb_node;
5471 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5473 if (ino < btrfs_ino(entry))
5474 p = &parent->rb_left;
5475 else if (ino > btrfs_ino(entry))
5476 p = &parent->rb_right;
5478 WARN_ON(!(entry->vfs_inode.i_state &
5479 (I_WILL_FREE | I_FREEING)));
5480 rb_replace_node(parent, new, &root->inode_tree);
5481 RB_CLEAR_NODE(parent);
5482 spin_unlock(&root->inode_lock);
5486 rb_link_node(new, parent, p);
5487 rb_insert_color(new, &root->inode_tree);
5488 spin_unlock(&root->inode_lock);
5491 static void inode_tree_del(struct btrfs_inode *inode)
5493 struct btrfs_root *root = inode->root;
5496 spin_lock(&root->inode_lock);
5497 if (!RB_EMPTY_NODE(&inode->rb_node)) {
5498 rb_erase(&inode->rb_node, &root->inode_tree);
5499 RB_CLEAR_NODE(&inode->rb_node);
5500 empty = RB_EMPTY_ROOT(&root->inode_tree);
5502 spin_unlock(&root->inode_lock);
5504 if (empty && btrfs_root_refs(&root->root_item) == 0) {
5505 spin_lock(&root->inode_lock);
5506 empty = RB_EMPTY_ROOT(&root->inode_tree);
5507 spin_unlock(&root->inode_lock);
5509 btrfs_add_dead_root(root);
5514 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5516 struct btrfs_iget_args *args = p;
5518 inode->i_ino = args->ino;
5519 BTRFS_I(inode)->location.objectid = args->ino;
5520 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5521 BTRFS_I(inode)->location.offset = 0;
5522 BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5523 BUG_ON(args->root && !BTRFS_I(inode)->root);
5525 if (args->root && args->root == args->root->fs_info->tree_root &&
5526 args->ino != BTRFS_BTREE_INODE_OBJECTID)
5527 set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5528 &BTRFS_I(inode)->runtime_flags);
5532 static int btrfs_find_actor(struct inode *inode, void *opaque)
5534 struct btrfs_iget_args *args = opaque;
5536 return args->ino == BTRFS_I(inode)->location.objectid &&
5537 args->root == BTRFS_I(inode)->root;
5540 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
5541 struct btrfs_root *root)
5543 struct inode *inode;
5544 struct btrfs_iget_args args;
5545 unsigned long hashval = btrfs_inode_hash(ino, root);
5550 inode = iget5_locked(s, hashval, btrfs_find_actor,
5551 btrfs_init_locked_inode,
5557 * Get an inode object given its inode number and corresponding root.
5558 * Path can be preallocated to prevent recursing back to iget through
5559 * allocator. NULL is also valid but may require an additional allocation
5562 struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
5563 struct btrfs_root *root, struct btrfs_path *path)
5565 struct inode *inode;
5567 inode = btrfs_iget_locked(s, ino, root);
5569 return ERR_PTR(-ENOMEM);
5571 if (inode->i_state & I_NEW) {
5574 ret = btrfs_read_locked_inode(inode, path);
5576 inode_tree_add(BTRFS_I(inode));
5577 unlock_new_inode(inode);
5581 * ret > 0 can come from btrfs_search_slot called by
5582 * btrfs_read_locked_inode, this means the inode item
5587 inode = ERR_PTR(ret);
5594 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
5596 return btrfs_iget_path(s, ino, root, NULL);
5599 static struct inode *new_simple_dir(struct super_block *s,
5600 struct btrfs_key *key,
5601 struct btrfs_root *root)
5603 struct inode *inode = new_inode(s);
5606 return ERR_PTR(-ENOMEM);
5608 BTRFS_I(inode)->root = btrfs_grab_root(root);
5609 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5610 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5612 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5614 * We only need lookup, the rest is read-only and there's no inode
5615 * associated with the dentry
5617 inode->i_op = &simple_dir_inode_operations;
5618 inode->i_opflags &= ~IOP_XATTR;
5619 inode->i_fop = &simple_dir_operations;
5620 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5621 inode->i_mtime = current_time(inode);
5622 inode->i_atime = inode->i_mtime;
5623 inode->i_ctime = inode->i_mtime;
5624 BTRFS_I(inode)->i_otime = inode->i_mtime;
5629 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5630 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5631 static_assert(BTRFS_FT_DIR == FT_DIR);
5632 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5633 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5634 static_assert(BTRFS_FT_FIFO == FT_FIFO);
5635 static_assert(BTRFS_FT_SOCK == FT_SOCK);
5636 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5638 static inline u8 btrfs_inode_type(struct inode *inode)
5640 return fs_umode_to_ftype(inode->i_mode);
5643 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5645 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5646 struct inode *inode;
5647 struct btrfs_root *root = BTRFS_I(dir)->root;
5648 struct btrfs_root *sub_root = root;
5649 struct btrfs_key location;
5653 if (dentry->d_name.len > BTRFS_NAME_LEN)
5654 return ERR_PTR(-ENAMETOOLONG);
5656 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
5658 return ERR_PTR(ret);
5660 if (location.type == BTRFS_INODE_ITEM_KEY) {
5661 inode = btrfs_iget(dir->i_sb, location.objectid, root);
5665 /* Do extra check against inode mode with di_type */
5666 if (btrfs_inode_type(inode) != di_type) {
5668 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5669 inode->i_mode, btrfs_inode_type(inode),
5672 return ERR_PTR(-EUCLEAN);
5677 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
5678 &location, &sub_root);
5681 inode = ERR_PTR(ret);
5683 inode = new_simple_dir(dir->i_sb, &location, root);
5685 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
5686 btrfs_put_root(sub_root);
5691 down_read(&fs_info->cleanup_work_sem);
5692 if (!sb_rdonly(inode->i_sb))
5693 ret = btrfs_orphan_cleanup(sub_root);
5694 up_read(&fs_info->cleanup_work_sem);
5697 inode = ERR_PTR(ret);
5704 static int btrfs_dentry_delete(const struct dentry *dentry)
5706 struct btrfs_root *root;
5707 struct inode *inode = d_inode(dentry);
5709 if (!inode && !IS_ROOT(dentry))
5710 inode = d_inode(dentry->d_parent);
5713 root = BTRFS_I(inode)->root;
5714 if (btrfs_root_refs(&root->root_item) == 0)
5717 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5723 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5726 struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5728 if (inode == ERR_PTR(-ENOENT))
5730 return d_splice_alias(inode, dentry);
5734 * Find the highest existing sequence number in a directory and then set the
5735 * in-memory index_cnt variable to the first free sequence number.
5737 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
5739 struct btrfs_root *root = inode->root;
5740 struct btrfs_key key, found_key;
5741 struct btrfs_path *path;
5742 struct extent_buffer *leaf;
5745 key.objectid = btrfs_ino(inode);
5746 key.type = BTRFS_DIR_INDEX_KEY;
5747 key.offset = (u64)-1;
5749 path = btrfs_alloc_path();
5753 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5756 /* FIXME: we should be able to handle this */
5761 if (path->slots[0] == 0) {
5762 inode->index_cnt = BTRFS_DIR_START_INDEX;
5768 leaf = path->nodes[0];
5769 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5771 if (found_key.objectid != btrfs_ino(inode) ||
5772 found_key.type != BTRFS_DIR_INDEX_KEY) {
5773 inode->index_cnt = BTRFS_DIR_START_INDEX;
5777 inode->index_cnt = found_key.offset + 1;
5779 btrfs_free_path(path);
5783 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
5785 if (dir->index_cnt == (u64)-1) {
5788 ret = btrfs_inode_delayed_dir_index_count(dir);
5790 ret = btrfs_set_inode_index_count(dir);
5796 *index = dir->index_cnt;
5802 * All this infrastructure exists because dir_emit can fault, and we are holding
5803 * the tree lock when doing readdir. For now just allocate a buffer and copy
5804 * our information into that, and then dir_emit from the buffer. This is
5805 * similar to what NFS does, only we don't keep the buffer around in pagecache
5806 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5807 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5810 static int btrfs_opendir(struct inode *inode, struct file *file)
5812 struct btrfs_file_private *private;
5816 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
5820 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5823 private->last_index = last_index;
5824 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5825 if (!private->filldir_buf) {
5829 file->private_data = private;
5840 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5843 struct dir_entry *entry = addr;
5844 char *name = (char *)(entry + 1);
5846 ctx->pos = get_unaligned(&entry->offset);
5847 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5848 get_unaligned(&entry->ino),
5849 get_unaligned(&entry->type)))
5851 addr += sizeof(struct dir_entry) +
5852 get_unaligned(&entry->name_len);
5858 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5860 struct inode *inode = file_inode(file);
5861 struct btrfs_root *root = BTRFS_I(inode)->root;
5862 struct btrfs_file_private *private = file->private_data;
5863 struct btrfs_dir_item *di;
5864 struct btrfs_key key;
5865 struct btrfs_key found_key;
5866 struct btrfs_path *path;
5868 struct list_head ins_list;
5869 struct list_head del_list;
5876 struct btrfs_key location;
5878 if (!dir_emit_dots(file, ctx))
5881 path = btrfs_alloc_path();
5885 addr = private->filldir_buf;
5886 path->reada = READA_FORWARD;
5888 INIT_LIST_HEAD(&ins_list);
5889 INIT_LIST_HEAD(&del_list);
5890 put = btrfs_readdir_get_delayed_items(inode, private->last_index,
5891 &ins_list, &del_list);
5894 key.type = BTRFS_DIR_INDEX_KEY;
5895 key.offset = ctx->pos;
5896 key.objectid = btrfs_ino(BTRFS_I(inode));
5898 btrfs_for_each_slot(root, &key, &found_key, path, ret) {
5899 struct dir_entry *entry;
5900 struct extent_buffer *leaf = path->nodes[0];
5903 if (found_key.objectid != key.objectid)
5905 if (found_key.type != BTRFS_DIR_INDEX_KEY)
5907 if (found_key.offset < ctx->pos)
5909 if (found_key.offset > private->last_index)
5911 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5913 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
5914 name_len = btrfs_dir_name_len(leaf, di);
5915 if ((total_len + sizeof(struct dir_entry) + name_len) >=
5917 btrfs_release_path(path);
5918 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5921 addr = private->filldir_buf;
5927 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
5929 name_ptr = (char *)(entry + 1);
5930 read_extent_buffer(leaf, name_ptr,
5931 (unsigned long)(di + 1), name_len);
5932 put_unaligned(name_len, &entry->name_len);
5933 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
5934 btrfs_dir_item_key_to_cpu(leaf, di, &location);
5935 put_unaligned(location.objectid, &entry->ino);
5936 put_unaligned(found_key.offset, &entry->offset);
5938 addr += sizeof(struct dir_entry) + name_len;
5939 total_len += sizeof(struct dir_entry) + name_len;
5941 /* Catch error encountered during iteration */
5945 btrfs_release_path(path);
5947 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5951 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5956 * Stop new entries from being returned after we return the last
5959 * New directory entries are assigned a strictly increasing
5960 * offset. This means that new entries created during readdir
5961 * are *guaranteed* to be seen in the future by that readdir.
5962 * This has broken buggy programs which operate on names as
5963 * they're returned by readdir. Until we re-use freed offsets
5964 * we have this hack to stop new entries from being returned
5965 * under the assumption that they'll never reach this huge
5968 * This is being careful not to overflow 32bit loff_t unless the
5969 * last entry requires it because doing so has broken 32bit apps
5972 if (ctx->pos >= INT_MAX)
5973 ctx->pos = LLONG_MAX;
5980 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5981 btrfs_free_path(path);
5986 * This is somewhat expensive, updating the tree every time the
5987 * inode changes. But, it is most likely to find the inode in cache.
5988 * FIXME, needs more benchmarking...there are no reasons other than performance
5989 * to keep or drop this code.
5991 static int btrfs_dirty_inode(struct btrfs_inode *inode)
5993 struct btrfs_root *root = inode->root;
5994 struct btrfs_fs_info *fs_info = root->fs_info;
5995 struct btrfs_trans_handle *trans;
5998 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6001 trans = btrfs_join_transaction(root);
6003 return PTR_ERR(trans);
6005 ret = btrfs_update_inode(trans, root, inode);
6006 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
6007 /* whoops, lets try again with the full transaction */
6008 btrfs_end_transaction(trans);
6009 trans = btrfs_start_transaction(root, 1);
6011 return PTR_ERR(trans);
6013 ret = btrfs_update_inode(trans, root, inode);
6015 btrfs_end_transaction(trans);
6016 if (inode->delayed_node)
6017 btrfs_balance_delayed_items(fs_info);
6023 * This is a copy of file_update_time. We need this so we can return error on
6024 * ENOSPC for updating the inode in the case of file write and mmap writes.
6026 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
6029 struct btrfs_root *root = BTRFS_I(inode)->root;
6030 bool dirty = flags & ~S_VERSION;
6032 if (btrfs_root_readonly(root))
6035 if (flags & S_VERSION)
6036 dirty |= inode_maybe_inc_iversion(inode, dirty);
6037 if (flags & S_CTIME)
6038 inode->i_ctime = *now;
6039 if (flags & S_MTIME)
6040 inode->i_mtime = *now;
6041 if (flags & S_ATIME)
6042 inode->i_atime = *now;
6043 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
6047 * helper to find a free sequence number in a given directory. This current
6048 * code is very simple, later versions will do smarter things in the btree
6050 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6054 if (dir->index_cnt == (u64)-1) {
6055 ret = btrfs_inode_delayed_dir_index_count(dir);
6057 ret = btrfs_set_inode_index_count(dir);
6063 *index = dir->index_cnt;
6069 static int btrfs_insert_inode_locked(struct inode *inode)
6071 struct btrfs_iget_args args;
6073 args.ino = BTRFS_I(inode)->location.objectid;
6074 args.root = BTRFS_I(inode)->root;
6076 return insert_inode_locked4(inode,
6077 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6078 btrfs_find_actor, &args);
6081 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6082 unsigned int *trans_num_items)
6084 struct inode *dir = args->dir;
6085 struct inode *inode = args->inode;
6088 if (!args->orphan) {
6089 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6095 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6097 fscrypt_free_filename(&args->fname);
6101 /* 1 to add inode item */
6102 *trans_num_items = 1;
6103 /* 1 to add compression property */
6104 if (BTRFS_I(dir)->prop_compress)
6105 (*trans_num_items)++;
6106 /* 1 to add default ACL xattr */
6107 if (args->default_acl)
6108 (*trans_num_items)++;
6109 /* 1 to add access ACL xattr */
6111 (*trans_num_items)++;
6112 #ifdef CONFIG_SECURITY
6113 /* 1 to add LSM xattr */
6114 if (dir->i_security)
6115 (*trans_num_items)++;
6118 /* 1 to add orphan item */
6119 (*trans_num_items)++;
6123 * 1 to add dir index
6124 * 1 to update parent inode item
6126 * No need for 1 unit for the inode ref item because it is
6127 * inserted in a batch together with the inode item at
6128 * btrfs_create_new_inode().
6130 *trans_num_items += 3;
6135 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6137 posix_acl_release(args->acl);
6138 posix_acl_release(args->default_acl);
6139 fscrypt_free_filename(&args->fname);
6143 * Inherit flags from the parent inode.
6145 * Currently only the compression flags and the cow flags are inherited.
6147 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6153 if (flags & BTRFS_INODE_NOCOMPRESS) {
6154 inode->flags &= ~BTRFS_INODE_COMPRESS;
6155 inode->flags |= BTRFS_INODE_NOCOMPRESS;
6156 } else if (flags & BTRFS_INODE_COMPRESS) {
6157 inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6158 inode->flags |= BTRFS_INODE_COMPRESS;
6161 if (flags & BTRFS_INODE_NODATACOW) {
6162 inode->flags |= BTRFS_INODE_NODATACOW;
6163 if (S_ISREG(inode->vfs_inode.i_mode))
6164 inode->flags |= BTRFS_INODE_NODATASUM;
6167 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
6170 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6171 struct btrfs_new_inode_args *args)
6173 struct inode *dir = args->dir;
6174 struct inode *inode = args->inode;
6175 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6176 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6177 struct btrfs_root *root;
6178 struct btrfs_inode_item *inode_item;
6179 struct btrfs_key *location;
6180 struct btrfs_path *path;
6182 struct btrfs_inode_ref *ref;
6183 struct btrfs_key key[2];
6185 struct btrfs_item_batch batch;
6189 path = btrfs_alloc_path();
6194 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6195 root = BTRFS_I(inode)->root;
6197 ret = btrfs_get_free_objectid(root, &objectid);
6200 inode->i_ino = objectid;
6204 * O_TMPFILE, set link count to 0, so that after this point, we
6205 * fill in an inode item with the correct link count.
6207 set_nlink(inode, 0);
6209 trace_btrfs_inode_request(dir);
6211 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6215 /* index_cnt is ignored for everything but a dir. */
6216 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6217 BTRFS_I(inode)->generation = trans->transid;
6218 inode->i_generation = BTRFS_I(inode)->generation;
6221 * Subvolumes don't inherit flags from their parent directory.
6222 * Originally this was probably by accident, but we probably can't
6223 * change it now without compatibility issues.
6226 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6228 if (S_ISREG(inode->i_mode)) {
6229 if (btrfs_test_opt(fs_info, NODATASUM))
6230 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6231 if (btrfs_test_opt(fs_info, NODATACOW))
6232 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6233 BTRFS_INODE_NODATASUM;
6236 location = &BTRFS_I(inode)->location;
6237 location->objectid = objectid;
6238 location->offset = 0;
6239 location->type = BTRFS_INODE_ITEM_KEY;
6241 ret = btrfs_insert_inode_locked(inode);
6244 BTRFS_I(dir)->index_cnt--;
6249 * We could have gotten an inode number from somebody who was fsynced
6250 * and then removed in this same transaction, so let's just set full
6251 * sync since it will be a full sync anyway and this will blow away the
6252 * old info in the log.
6254 btrfs_set_inode_full_sync(BTRFS_I(inode));
6256 key[0].objectid = objectid;
6257 key[0].type = BTRFS_INODE_ITEM_KEY;
6260 sizes[0] = sizeof(struct btrfs_inode_item);
6262 if (!args->orphan) {
6264 * Start new inodes with an inode_ref. This is slightly more
6265 * efficient for small numbers of hard links since they will
6266 * be packed into one item. Extended refs will kick in if we
6267 * add more hard links than can fit in the ref item.
6269 key[1].objectid = objectid;
6270 key[1].type = BTRFS_INODE_REF_KEY;
6272 key[1].offset = objectid;
6273 sizes[1] = 2 + sizeof(*ref);
6275 key[1].offset = btrfs_ino(BTRFS_I(dir));
6276 sizes[1] = name->len + sizeof(*ref);
6280 batch.keys = &key[0];
6281 batch.data_sizes = &sizes[0];
6282 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6283 batch.nr = args->orphan ? 1 : 2;
6284 ret = btrfs_insert_empty_items(trans, root, path, &batch);
6286 btrfs_abort_transaction(trans, ret);
6290 inode->i_mtime = current_time(inode);
6291 inode->i_atime = inode->i_mtime;
6292 inode->i_ctime = inode->i_mtime;
6293 BTRFS_I(inode)->i_otime = inode->i_mtime;
6296 * We're going to fill the inode item now, so at this point the inode
6297 * must be fully initialized.
6300 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6301 struct btrfs_inode_item);
6302 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6303 sizeof(*inode_item));
6304 fill_inode_item(trans, path->nodes[0], inode_item, inode);
6306 if (!args->orphan) {
6307 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6308 struct btrfs_inode_ref);
6309 ptr = (unsigned long)(ref + 1);
6311 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6312 btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6313 write_extent_buffer(path->nodes[0], "..", ptr, 2);
6315 btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6317 btrfs_set_inode_ref_index(path->nodes[0], ref,
6318 BTRFS_I(inode)->dir_index);
6319 write_extent_buffer(path->nodes[0], name->name, ptr,
6324 btrfs_mark_buffer_dirty(path->nodes[0]);
6326 * We don't need the path anymore, plus inheriting properties, adding
6327 * ACLs, security xattrs, orphan item or adding the link, will result in
6328 * allocating yet another path. So just free our path.
6330 btrfs_free_path(path);
6334 struct inode *parent;
6337 * Subvolumes inherit properties from their parent subvolume,
6338 * not the directory they were created in.
6340 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID,
6341 BTRFS_I(dir)->root);
6342 if (IS_ERR(parent)) {
6343 ret = PTR_ERR(parent);
6345 ret = btrfs_inode_inherit_props(trans, inode, parent);
6349 ret = btrfs_inode_inherit_props(trans, inode, dir);
6353 "error inheriting props for ino %llu (root %llu): %d",
6354 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid,
6359 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6362 if (!args->subvol) {
6363 ret = btrfs_init_inode_security(trans, args);
6365 btrfs_abort_transaction(trans, ret);
6370 inode_tree_add(BTRFS_I(inode));
6372 trace_btrfs_inode_new(inode);
6373 btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6375 btrfs_update_root_times(trans, root);
6378 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6380 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6381 0, BTRFS_I(inode)->dir_index);
6384 btrfs_abort_transaction(trans, ret);
6392 * discard_new_inode() calls iput(), but the caller owns the reference
6396 discard_new_inode(inode);
6398 btrfs_free_path(path);
6403 * utility function to add 'inode' into 'parent_inode' with
6404 * a give name and a given sequence number.
6405 * if 'add_backref' is true, also insert a backref from the
6406 * inode to the parent directory.
6408 int btrfs_add_link(struct btrfs_trans_handle *trans,
6409 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6410 const struct fscrypt_str *name, int add_backref, u64 index)
6413 struct btrfs_key key;
6414 struct btrfs_root *root = parent_inode->root;
6415 u64 ino = btrfs_ino(inode);
6416 u64 parent_ino = btrfs_ino(parent_inode);
6418 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6419 memcpy(&key, &inode->root->root_key, sizeof(key));
6422 key.type = BTRFS_INODE_ITEM_KEY;
6426 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6427 ret = btrfs_add_root_ref(trans, key.objectid,
6428 root->root_key.objectid, parent_ino,
6430 } else if (add_backref) {
6431 ret = btrfs_insert_inode_ref(trans, root, name,
6432 ino, parent_ino, index);
6435 /* Nothing to clean up yet */
6439 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6440 btrfs_inode_type(&inode->vfs_inode), index);
6441 if (ret == -EEXIST || ret == -EOVERFLOW)
6444 btrfs_abort_transaction(trans, ret);
6448 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6450 inode_inc_iversion(&parent_inode->vfs_inode);
6452 * If we are replaying a log tree, we do not want to update the mtime
6453 * and ctime of the parent directory with the current time, since the
6454 * log replay procedure is responsible for setting them to their correct
6455 * values (the ones it had when the fsync was done).
6457 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
6458 struct timespec64 now = current_time(&parent_inode->vfs_inode);
6460 parent_inode->vfs_inode.i_mtime = now;
6461 parent_inode->vfs_inode.i_ctime = now;
6463 ret = btrfs_update_inode(trans, root, parent_inode);
6465 btrfs_abort_transaction(trans, ret);
6469 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6472 err = btrfs_del_root_ref(trans, key.objectid,
6473 root->root_key.objectid, parent_ino,
6474 &local_index, name);
6476 btrfs_abort_transaction(trans, err);
6477 } else if (add_backref) {
6481 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
6484 btrfs_abort_transaction(trans, err);
6487 /* Return the original error code */
6491 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6492 struct inode *inode)
6494 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6495 struct btrfs_root *root = BTRFS_I(dir)->root;
6496 struct btrfs_new_inode_args new_inode_args = {
6501 unsigned int trans_num_items;
6502 struct btrfs_trans_handle *trans;
6505 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6509 trans = btrfs_start_transaction(root, trans_num_items);
6510 if (IS_ERR(trans)) {
6511 err = PTR_ERR(trans);
6512 goto out_new_inode_args;
6515 err = btrfs_create_new_inode(trans, &new_inode_args);
6517 d_instantiate_new(dentry, inode);
6519 btrfs_end_transaction(trans);
6520 btrfs_btree_balance_dirty(fs_info);
6522 btrfs_new_inode_args_destroy(&new_inode_args);
6529 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6530 struct dentry *dentry, umode_t mode, dev_t rdev)
6532 struct inode *inode;
6534 inode = new_inode(dir->i_sb);
6537 inode_init_owner(idmap, inode, dir, mode);
6538 inode->i_op = &btrfs_special_inode_operations;
6539 init_special_inode(inode, inode->i_mode, rdev);
6540 return btrfs_create_common(dir, dentry, inode);
6543 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
6544 struct dentry *dentry, umode_t mode, bool excl)
6546 struct inode *inode;
6548 inode = new_inode(dir->i_sb);
6551 inode_init_owner(idmap, inode, dir, mode);
6552 inode->i_fop = &btrfs_file_operations;
6553 inode->i_op = &btrfs_file_inode_operations;
6554 inode->i_mapping->a_ops = &btrfs_aops;
6555 return btrfs_create_common(dir, dentry, inode);
6558 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6559 struct dentry *dentry)
6561 struct btrfs_trans_handle *trans = NULL;
6562 struct btrfs_root *root = BTRFS_I(dir)->root;
6563 struct inode *inode = d_inode(old_dentry);
6564 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6565 struct fscrypt_name fname;
6570 /* do not allow sys_link's with other subvols of the same device */
6571 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6574 if (inode->i_nlink >= BTRFS_LINK_MAX)
6577 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
6581 err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6586 * 2 items for inode and inode ref
6587 * 2 items for dir items
6588 * 1 item for parent inode
6589 * 1 item for orphan item deletion if O_TMPFILE
6591 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6592 if (IS_ERR(trans)) {
6593 err = PTR_ERR(trans);
6598 /* There are several dir indexes for this inode, clear the cache. */
6599 BTRFS_I(inode)->dir_index = 0ULL;
6601 inode_inc_iversion(inode);
6602 inode->i_ctime = current_time(inode);
6604 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6606 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6607 &fname.disk_name, 1, index);
6612 struct dentry *parent = dentry->d_parent;
6614 err = btrfs_update_inode(trans, root, BTRFS_I(inode));
6617 if (inode->i_nlink == 1) {
6619 * If new hard link count is 1, it's a file created
6620 * with open(2) O_TMPFILE flag.
6622 err = btrfs_orphan_del(trans, BTRFS_I(inode));
6626 d_instantiate(dentry, inode);
6627 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6631 fscrypt_free_filename(&fname);
6633 btrfs_end_transaction(trans);
6635 inode_dec_link_count(inode);
6638 btrfs_btree_balance_dirty(fs_info);
6642 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
6643 struct dentry *dentry, umode_t mode)
6645 struct inode *inode;
6647 inode = new_inode(dir->i_sb);
6650 inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
6651 inode->i_op = &btrfs_dir_inode_operations;
6652 inode->i_fop = &btrfs_dir_file_operations;
6653 return btrfs_create_common(dir, dentry, inode);
6656 static noinline int uncompress_inline(struct btrfs_path *path,
6658 struct btrfs_file_extent_item *item)
6661 struct extent_buffer *leaf = path->nodes[0];
6664 unsigned long inline_size;
6668 compress_type = btrfs_file_extent_compression(leaf, item);
6669 max_size = btrfs_file_extent_ram_bytes(leaf, item);
6670 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6671 tmp = kmalloc(inline_size, GFP_NOFS);
6674 ptr = btrfs_file_extent_inline_start(item);
6676 read_extent_buffer(leaf, tmp, ptr, inline_size);
6678 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6679 ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size);
6682 * decompression code contains a memset to fill in any space between the end
6683 * of the uncompressed data and the end of max_size in case the decompressed
6684 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6685 * the end of an inline extent and the beginning of the next block, so we
6686 * cover that region here.
6689 if (max_size < PAGE_SIZE)
6690 memzero_page(page, max_size, PAGE_SIZE - max_size);
6695 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path,
6698 struct btrfs_file_extent_item *fi;
6702 if (!page || PageUptodate(page))
6705 ASSERT(page_offset(page) == 0);
6707 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6708 struct btrfs_file_extent_item);
6709 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
6710 return uncompress_inline(path, page, fi);
6712 copy_size = min_t(u64, PAGE_SIZE,
6713 btrfs_file_extent_ram_bytes(path->nodes[0], fi));
6714 kaddr = kmap_local_page(page);
6715 read_extent_buffer(path->nodes[0], kaddr,
6716 btrfs_file_extent_inline_start(fi), copy_size);
6717 kunmap_local(kaddr);
6718 if (copy_size < PAGE_SIZE)
6719 memzero_page(page, copy_size, PAGE_SIZE - copy_size);
6724 * Lookup the first extent overlapping a range in a file.
6726 * @inode: file to search in
6727 * @page: page to read extent data into if the extent is inline
6728 * @pg_offset: offset into @page to copy to
6729 * @start: file offset
6730 * @len: length of range starting at @start
6732 * Return the first &struct extent_map which overlaps the given range, reading
6733 * it from the B-tree and caching it if necessary. Note that there may be more
6734 * extents which overlap the given range after the returned extent_map.
6736 * If @page is not NULL and the extent is inline, this also reads the extent
6737 * data directly into the page and marks the extent up to date in the io_tree.
6739 * Return: ERR_PTR on error, non-NULL extent_map on success.
6741 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6742 struct page *page, size_t pg_offset,
6745 struct btrfs_fs_info *fs_info = inode->root->fs_info;
6747 u64 extent_start = 0;
6749 u64 objectid = btrfs_ino(inode);
6750 int extent_type = -1;
6751 struct btrfs_path *path = NULL;
6752 struct btrfs_root *root = inode->root;
6753 struct btrfs_file_extent_item *item;
6754 struct extent_buffer *leaf;
6755 struct btrfs_key found_key;
6756 struct extent_map *em = NULL;
6757 struct extent_map_tree *em_tree = &inode->extent_tree;
6759 read_lock(&em_tree->lock);
6760 em = lookup_extent_mapping(em_tree, start, len);
6761 read_unlock(&em_tree->lock);
6764 if (em->start > start || em->start + em->len <= start)
6765 free_extent_map(em);
6766 else if (em->block_start == EXTENT_MAP_INLINE && page)
6767 free_extent_map(em);
6771 em = alloc_extent_map();
6776 em->start = EXTENT_MAP_HOLE;
6777 em->orig_start = EXTENT_MAP_HOLE;
6779 em->block_len = (u64)-1;
6781 path = btrfs_alloc_path();
6787 /* Chances are we'll be called again, so go ahead and do readahead */
6788 path->reada = READA_FORWARD;
6791 * The same explanation in load_free_space_cache applies here as well,
6792 * we only read when we're loading the free space cache, and at that
6793 * point the commit_root has everything we need.
6795 if (btrfs_is_free_space_inode(inode)) {
6796 path->search_commit_root = 1;
6797 path->skip_locking = 1;
6800 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6803 } else if (ret > 0) {
6804 if (path->slots[0] == 0)
6810 leaf = path->nodes[0];
6811 item = btrfs_item_ptr(leaf, path->slots[0],
6812 struct btrfs_file_extent_item);
6813 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6814 if (found_key.objectid != objectid ||
6815 found_key.type != BTRFS_EXTENT_DATA_KEY) {
6817 * If we backup past the first extent we want to move forward
6818 * and see if there is an extent in front of us, otherwise we'll
6819 * say there is a hole for our whole search range which can
6826 extent_type = btrfs_file_extent_type(leaf, item);
6827 extent_start = found_key.offset;
6828 extent_end = btrfs_file_extent_end(path);
6829 if (extent_type == BTRFS_FILE_EXTENT_REG ||
6830 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6831 /* Only regular file could have regular/prealloc extent */
6832 if (!S_ISREG(inode->vfs_inode.i_mode)) {
6835 "regular/prealloc extent found for non-regular inode %llu",
6839 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6841 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6842 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6847 if (start >= extent_end) {
6849 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6850 ret = btrfs_next_leaf(root, path);
6856 leaf = path->nodes[0];
6858 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6859 if (found_key.objectid != objectid ||
6860 found_key.type != BTRFS_EXTENT_DATA_KEY)
6862 if (start + len <= found_key.offset)
6864 if (start > found_key.offset)
6867 /* New extent overlaps with existing one */
6869 em->orig_start = start;
6870 em->len = found_key.offset - start;
6871 em->block_start = EXTENT_MAP_HOLE;
6875 btrfs_extent_item_to_extent_map(inode, path, item, em);
6877 if (extent_type == BTRFS_FILE_EXTENT_REG ||
6878 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6880 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6882 * Inline extent can only exist at file offset 0. This is
6883 * ensured by tree-checker and inline extent creation path.
6884 * Thus all members representing file offsets should be zero.
6886 ASSERT(pg_offset == 0);
6887 ASSERT(extent_start == 0);
6888 ASSERT(em->start == 0);
6891 * btrfs_extent_item_to_extent_map() should have properly
6892 * initialized em members already.
6894 * Other members are not utilized for inline extents.
6896 ASSERT(em->block_start == EXTENT_MAP_INLINE);
6897 ASSERT(em->len == fs_info->sectorsize);
6899 ret = read_inline_extent(inode, path, page);
6906 em->orig_start = start;
6908 em->block_start = EXTENT_MAP_HOLE;
6911 btrfs_release_path(path);
6912 if (em->start > start || extent_map_end(em) <= start) {
6914 "bad extent! em: [%llu %llu] passed [%llu %llu]",
6915 em->start, em->len, start, len);
6920 write_lock(&em_tree->lock);
6921 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
6922 write_unlock(&em_tree->lock);
6924 btrfs_free_path(path);
6926 trace_btrfs_get_extent(root, inode, em);
6929 free_extent_map(em);
6930 return ERR_PTR(ret);
6935 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
6936 struct btrfs_dio_data *dio_data,
6939 const u64 orig_start,
6940 const u64 block_start,
6941 const u64 block_len,
6942 const u64 orig_block_len,
6943 const u64 ram_bytes,
6946 struct extent_map *em = NULL;
6947 struct btrfs_ordered_extent *ordered;
6949 if (type != BTRFS_ORDERED_NOCOW) {
6950 em = create_io_em(inode, start, len, orig_start, block_start,
6951 block_len, orig_block_len, ram_bytes,
6952 BTRFS_COMPRESS_NONE, /* compress_type */
6957 ordered = btrfs_alloc_ordered_extent(inode, start, len, len,
6958 block_start, block_len, 0,
6960 (1 << BTRFS_ORDERED_DIRECT),
6961 BTRFS_COMPRESS_NONE);
6962 if (IS_ERR(ordered)) {
6964 free_extent_map(em);
6965 btrfs_drop_extent_map_range(inode, start,
6966 start + len - 1, false);
6968 em = ERR_CAST(ordered);
6970 ASSERT(!dio_data->ordered);
6971 dio_data->ordered = ordered;
6978 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
6979 struct btrfs_dio_data *dio_data,
6982 struct btrfs_root *root = inode->root;
6983 struct btrfs_fs_info *fs_info = root->fs_info;
6984 struct extent_map *em;
6985 struct btrfs_key ins;
6989 alloc_hint = get_extent_allocation_hint(inode, start, len);
6990 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
6991 0, alloc_hint, &ins, 1, 1);
6993 return ERR_PTR(ret);
6995 em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start,
6996 ins.objectid, ins.offset, ins.offset,
6997 ins.offset, BTRFS_ORDERED_REGULAR);
6998 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7000 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
7006 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7008 struct btrfs_block_group *block_group;
7009 bool readonly = false;
7011 block_group = btrfs_lookup_block_group(fs_info, bytenr);
7012 if (!block_group || block_group->ro)
7015 btrfs_put_block_group(block_group);
7020 * Check if we can do nocow write into the range [@offset, @offset + @len)
7022 * @offset: File offset
7023 * @len: The length to write, will be updated to the nocow writeable
7025 * @orig_start: (optional) Return the original file offset of the file extent
7026 * @orig_len: (optional) Return the original on-disk length of the file extent
7027 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7028 * @strict: if true, omit optimizations that might force us into unnecessary
7029 * cow. e.g., don't trust generation number.
7032 * >0 and update @len if we can do nocow write
7033 * 0 if we can't do nocow write
7034 * <0 if error happened
7036 * NOTE: This only checks the file extents, caller is responsible to wait for
7037 * any ordered extents.
7039 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7040 u64 *orig_start, u64 *orig_block_len,
7041 u64 *ram_bytes, bool nowait, bool strict)
7043 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7044 struct can_nocow_file_extent_args nocow_args = { 0 };
7045 struct btrfs_path *path;
7047 struct extent_buffer *leaf;
7048 struct btrfs_root *root = BTRFS_I(inode)->root;
7049 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7050 struct btrfs_file_extent_item *fi;
7051 struct btrfs_key key;
7054 path = btrfs_alloc_path();
7057 path->nowait = nowait;
7059 ret = btrfs_lookup_file_extent(NULL, root, path,
7060 btrfs_ino(BTRFS_I(inode)), offset, 0);
7065 if (path->slots[0] == 0) {
7066 /* can't find the item, must cow */
7073 leaf = path->nodes[0];
7074 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7075 if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7076 key.type != BTRFS_EXTENT_DATA_KEY) {
7077 /* not our file or wrong item type, must cow */
7081 if (key.offset > offset) {
7082 /* Wrong offset, must cow */
7086 if (btrfs_file_extent_end(path) <= offset)
7089 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7090 found_type = btrfs_file_extent_type(leaf, fi);
7092 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7094 nocow_args.start = offset;
7095 nocow_args.end = offset + *len - 1;
7096 nocow_args.strict = strict;
7097 nocow_args.free_path = true;
7099 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7100 /* can_nocow_file_extent() has freed the path. */
7104 /* Treat errors as not being able to NOCOW. */
7110 if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr))
7113 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7114 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7117 range_end = round_up(offset + nocow_args.num_bytes,
7118 root->fs_info->sectorsize) - 1;
7119 ret = test_range_bit(io_tree, offset, range_end,
7120 EXTENT_DELALLOC, 0, NULL);
7128 *orig_start = key.offset - nocow_args.extent_offset;
7130 *orig_block_len = nocow_args.disk_num_bytes;
7132 *len = nocow_args.num_bytes;
7135 btrfs_free_path(path);
7139 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7140 struct extent_state **cached_state,
7141 unsigned int iomap_flags)
7143 const bool writing = (iomap_flags & IOMAP_WRITE);
7144 const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7145 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7146 struct btrfs_ordered_extent *ordered;
7151 if (!try_lock_extent(io_tree, lockstart, lockend,
7155 lock_extent(io_tree, lockstart, lockend, cached_state);
7158 * We're concerned with the entire range that we're going to be
7159 * doing DIO to, so we need to make sure there's no ordered
7160 * extents in this range.
7162 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7163 lockend - lockstart + 1);
7166 * We need to make sure there are no buffered pages in this
7167 * range either, we could have raced between the invalidate in
7168 * generic_file_direct_write and locking the extent. The
7169 * invalidate needs to happen so that reads after a write do not
7173 (!writing || !filemap_range_has_page(inode->i_mapping,
7174 lockstart, lockend)))
7177 unlock_extent(io_tree, lockstart, lockend, cached_state);
7181 btrfs_put_ordered_extent(ordered);
7186 * If we are doing a DIO read and the ordered extent we
7187 * found is for a buffered write, we can not wait for it
7188 * to complete and retry, because if we do so we can
7189 * deadlock with concurrent buffered writes on page
7190 * locks. This happens only if our DIO read covers more
7191 * than one extent map, if at this point has already
7192 * created an ordered extent for a previous extent map
7193 * and locked its range in the inode's io tree, and a
7194 * concurrent write against that previous extent map's
7195 * range and this range started (we unlock the ranges
7196 * in the io tree only when the bios complete and
7197 * buffered writes always lock pages before attempting
7198 * to lock range in the io tree).
7201 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7202 btrfs_start_ordered_extent(ordered);
7204 ret = nowait ? -EAGAIN : -ENOTBLK;
7205 btrfs_put_ordered_extent(ordered);
7208 * We could trigger writeback for this range (and wait
7209 * for it to complete) and then invalidate the pages for
7210 * this range (through invalidate_inode_pages2_range()),
7211 * but that can lead us to a deadlock with a concurrent
7212 * call to readahead (a buffered read or a defrag call
7213 * triggered a readahead) on a page lock due to an
7214 * ordered dio extent we created before but did not have
7215 * yet a corresponding bio submitted (whence it can not
7216 * complete), which makes readahead wait for that
7217 * ordered extent to complete while holding a lock on
7220 ret = nowait ? -EAGAIN : -ENOTBLK;
7232 /* The callers of this must take lock_extent() */
7233 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
7234 u64 len, u64 orig_start, u64 block_start,
7235 u64 block_len, u64 orig_block_len,
7236 u64 ram_bytes, int compress_type,
7239 struct extent_map *em;
7242 ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7243 type == BTRFS_ORDERED_COMPRESSED ||
7244 type == BTRFS_ORDERED_NOCOW ||
7245 type == BTRFS_ORDERED_REGULAR);
7247 em = alloc_extent_map();
7249 return ERR_PTR(-ENOMEM);
7252 em->orig_start = orig_start;
7254 em->block_len = block_len;
7255 em->block_start = block_start;
7256 em->orig_block_len = orig_block_len;
7257 em->ram_bytes = ram_bytes;
7258 em->generation = -1;
7259 set_bit(EXTENT_FLAG_PINNED, &em->flags);
7260 if (type == BTRFS_ORDERED_PREALLOC) {
7261 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7262 } else if (type == BTRFS_ORDERED_COMPRESSED) {
7263 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7264 em->compress_type = compress_type;
7267 ret = btrfs_replace_extent_map_range(inode, em, true);
7269 free_extent_map(em);
7270 return ERR_PTR(ret);
7273 /* em got 2 refs now, callers needs to do free_extent_map once. */
7278 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7279 struct inode *inode,
7280 struct btrfs_dio_data *dio_data,
7281 u64 start, u64 *lenp,
7282 unsigned int iomap_flags)
7284 const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7285 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7286 struct extent_map *em = *map;
7288 u64 block_start, orig_start, orig_block_len, ram_bytes;
7289 struct btrfs_block_group *bg;
7290 bool can_nocow = false;
7291 bool space_reserved = false;
7297 * We don't allocate a new extent in the following cases
7299 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7301 * 2) The extent is marked as PREALLOC. We're good to go here and can
7302 * just use the extent.
7305 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7306 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7307 em->block_start != EXTENT_MAP_HOLE)) {
7308 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7309 type = BTRFS_ORDERED_PREALLOC;
7311 type = BTRFS_ORDERED_NOCOW;
7312 len = min(len, em->len - (start - em->start));
7313 block_start = em->block_start + (start - em->start);
7315 if (can_nocow_extent(inode, start, &len, &orig_start,
7316 &orig_block_len, &ram_bytes, false, false) == 1) {
7317 bg = btrfs_inc_nocow_writers(fs_info, block_start);
7325 struct extent_map *em2;
7327 /* We can NOCOW, so only need to reserve metadata space. */
7328 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7331 /* Our caller expects us to free the input extent map. */
7332 free_extent_map(em);
7334 btrfs_dec_nocow_writers(bg);
7335 if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
7339 space_reserved = true;
7341 em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len,
7342 orig_start, block_start,
7343 len, orig_block_len,
7345 btrfs_dec_nocow_writers(bg);
7346 if (type == BTRFS_ORDERED_PREALLOC) {
7347 free_extent_map(em);
7357 dio_data->nocow_done = true;
7359 /* Our caller expects us to free the input extent map. */
7360 free_extent_map(em);
7369 * If we could not allocate data space before locking the file
7370 * range and we can't do a NOCOW write, then we have to fail.
7372 if (!dio_data->data_space_reserved) {
7378 * We have to COW and we have already reserved data space before,
7379 * so now we reserve only metadata.
7381 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7385 space_reserved = true;
7387 em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len);
7393 len = min(len, em->len - (start - em->start));
7395 btrfs_delalloc_release_metadata(BTRFS_I(inode),
7396 prev_len - len, true);
7400 * We have created our ordered extent, so we can now release our reservation
7401 * for an outstanding extent.
7403 btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
7406 * Need to update the i_size under the extent lock so buffered
7407 * readers will get the updated i_size when we unlock.
7409 if (start + len > i_size_read(inode))
7410 i_size_write(inode, start + len);
7412 if (ret && space_reserved) {
7413 btrfs_delalloc_release_extents(BTRFS_I(inode), len);
7414 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
7420 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
7421 loff_t length, unsigned int flags, struct iomap *iomap,
7422 struct iomap *srcmap)
7424 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7425 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7426 struct extent_map *em;
7427 struct extent_state *cached_state = NULL;
7428 struct btrfs_dio_data *dio_data = iter->private;
7429 u64 lockstart, lockend;
7430 const bool write = !!(flags & IOMAP_WRITE);
7433 const u64 data_alloc_len = length;
7434 bool unlock_extents = false;
7437 * We could potentially fault if we have a buffer > PAGE_SIZE, and if
7438 * we're NOWAIT we may submit a bio for a partial range and return
7439 * EIOCBQUEUED, which would result in an errant short read.
7441 * The best way to handle this would be to allow for partial completions
7442 * of iocb's, so we could submit the partial bio, return and fault in
7443 * the rest of the pages, and then submit the io for the rest of the
7444 * range. However we don't have that currently, so simply return
7445 * -EAGAIN at this point so that the normal path is used.
7447 if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
7451 * Cap the size of reads to that usually seen in buffered I/O as we need
7452 * to allocate a contiguous array for the checksums.
7455 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS);
7458 lockend = start + len - 1;
7461 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7462 * enough if we've written compressed pages to this area, so we need to
7463 * flush the dirty pages again to make absolutely sure that any
7464 * outstanding dirty pages are on disk - the first flush only starts
7465 * compression on the data, while keeping the pages locked, so by the
7466 * time the second flush returns we know bios for the compressed pages
7467 * were submitted and finished, and the pages no longer under writeback.
7469 * If we have a NOWAIT request and we have any pages in the range that
7470 * are locked, likely due to compression still in progress, we don't want
7471 * to block on page locks. We also don't want to block on pages marked as
7472 * dirty or under writeback (same as for the non-compression case).
7473 * iomap_dio_rw() did the same check, but after that and before we got
7474 * here, mmap'ed writes may have happened or buffered reads started
7475 * (readpage() and readahead(), which lock pages), as we haven't locked
7476 * the file range yet.
7478 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7479 &BTRFS_I(inode)->runtime_flags)) {
7480 if (flags & IOMAP_NOWAIT) {
7481 if (filemap_range_needs_writeback(inode->i_mapping,
7482 lockstart, lockend))
7485 ret = filemap_fdatawrite_range(inode->i_mapping, start,
7486 start + length - 1);
7492 memset(dio_data, 0, sizeof(*dio_data));
7495 * We always try to allocate data space and must do it before locking
7496 * the file range, to avoid deadlocks with concurrent writes to the same
7497 * range if the range has several extents and the writes don't expand the
7498 * current i_size (the inode lock is taken in shared mode). If we fail to
7499 * allocate data space here we continue and later, after locking the
7500 * file range, we fail with ENOSPC only if we figure out we can not do a
7503 if (write && !(flags & IOMAP_NOWAIT)) {
7504 ret = btrfs_check_data_free_space(BTRFS_I(inode),
7505 &dio_data->data_reserved,
7506 start, data_alloc_len, false);
7508 dio_data->data_space_reserved = true;
7509 else if (ret && !(BTRFS_I(inode)->flags &
7510 (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
7515 * If this errors out it's because we couldn't invalidate pagecache for
7516 * this range and we need to fallback to buffered IO, or we are doing a
7517 * NOWAIT read/write and we need to block.
7519 ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
7523 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
7530 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7531 * io. INLINE is special, and we could probably kludge it in here, but
7532 * it's still buffered so for safety lets just fall back to the generic
7535 * For COMPRESSED we _have_ to read the entire extent in so we can
7536 * decompress it, so there will be buffering required no matter what we
7537 * do, so go ahead and fallback to buffered.
7539 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7540 * to buffered IO. Don't blame me, this is the price we pay for using
7543 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7544 em->block_start == EXTENT_MAP_INLINE) {
7545 free_extent_map(em);
7547 * If we are in a NOWAIT context, return -EAGAIN in order to
7548 * fallback to buffered IO. This is not only because we can
7549 * block with buffered IO (no support for NOWAIT semantics at
7550 * the moment) but also to avoid returning short reads to user
7551 * space - this happens if we were able to read some data from
7552 * previous non-compressed extents and then when we fallback to
7553 * buffered IO, at btrfs_file_read_iter() by calling
7554 * filemap_read(), we fail to fault in pages for the read buffer,
7555 * in which case filemap_read() returns a short read (the number
7556 * of bytes previously read is > 0, so it does not return -EFAULT).
7558 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
7562 len = min(len, em->len - (start - em->start));
7565 * If we have a NOWAIT request and the range contains multiple extents
7566 * (or a mix of extents and holes), then we return -EAGAIN to make the
7567 * caller fallback to a context where it can do a blocking (without
7568 * NOWAIT) request. This way we avoid doing partial IO and returning
7569 * success to the caller, which is not optimal for writes and for reads
7570 * it can result in unexpected behaviour for an application.
7572 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7573 * iomap_dio_rw(), we can end up returning less data then what the caller
7574 * asked for, resulting in an unexpected, and incorrect, short read.
7575 * That is, the caller asked to read N bytes and we return less than that,
7576 * which is wrong unless we are crossing EOF. This happens if we get a
7577 * page fault error when trying to fault in pages for the buffer that is
7578 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7579 * have previously submitted bios for other extents in the range, in
7580 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7581 * those bios have completed by the time we get the page fault error,
7582 * which we return back to our caller - we should only return EIOCBQUEUED
7583 * after we have submitted bios for all the extents in the range.
7585 if ((flags & IOMAP_NOWAIT) && len < length) {
7586 free_extent_map(em);
7592 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
7593 start, &len, flags);
7596 unlock_extents = true;
7597 /* Recalc len in case the new em is smaller than requested */
7598 len = min(len, em->len - (start - em->start));
7599 if (dio_data->data_space_reserved) {
7601 u64 release_len = 0;
7603 if (dio_data->nocow_done) {
7604 release_offset = start;
7605 release_len = data_alloc_len;
7606 } else if (len < data_alloc_len) {
7607 release_offset = start + len;
7608 release_len = data_alloc_len - len;
7611 if (release_len > 0)
7612 btrfs_free_reserved_data_space(BTRFS_I(inode),
7613 dio_data->data_reserved,
7619 * We need to unlock only the end area that we aren't using.
7620 * The rest is going to be unlocked by the endio routine.
7622 lockstart = start + len;
7623 if (lockstart < lockend)
7624 unlock_extents = true;
7628 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7631 free_extent_state(cached_state);
7634 * Translate extent map information to iomap.
7635 * We trim the extents (and move the addr) even though iomap code does
7636 * that, since we have locked only the parts we are performing I/O in.
7638 if ((em->block_start == EXTENT_MAP_HOLE) ||
7639 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) {
7640 iomap->addr = IOMAP_NULL_ADDR;
7641 iomap->type = IOMAP_HOLE;
7643 iomap->addr = em->block_start + (start - em->start);
7644 iomap->type = IOMAP_MAPPED;
7646 iomap->offset = start;
7647 iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
7648 iomap->length = len;
7649 free_extent_map(em);
7654 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7657 if (dio_data->data_space_reserved) {
7658 btrfs_free_reserved_data_space(BTRFS_I(inode),
7659 dio_data->data_reserved,
7660 start, data_alloc_len);
7661 extent_changeset_free(dio_data->data_reserved);
7667 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
7668 ssize_t written, unsigned int flags, struct iomap *iomap)
7670 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7671 struct btrfs_dio_data *dio_data = iter->private;
7672 size_t submitted = dio_data->submitted;
7673 const bool write = !!(flags & IOMAP_WRITE);
7676 if (!write && (iomap->type == IOMAP_HOLE)) {
7677 /* If reading from a hole, unlock and return */
7678 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1,
7683 if (submitted < length) {
7685 length -= submitted;
7687 btrfs_finish_ordered_extent(dio_data->ordered, NULL,
7688 pos, length, false);
7690 unlock_extent(&BTRFS_I(inode)->io_tree, pos,
7691 pos + length - 1, NULL);
7695 btrfs_put_ordered_extent(dio_data->ordered);
7696 dio_data->ordered = NULL;
7700 extent_changeset_free(dio_data->data_reserved);
7704 static void btrfs_dio_end_io(struct btrfs_bio *bbio)
7706 struct btrfs_dio_private *dip =
7707 container_of(bbio, struct btrfs_dio_private, bbio);
7708 struct btrfs_inode *inode = bbio->inode;
7709 struct bio *bio = &bbio->bio;
7711 if (bio->bi_status) {
7712 btrfs_warn(inode->root->fs_info,
7713 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
7714 btrfs_ino(inode), bio->bi_opf,
7715 dip->file_offset, dip->bytes, bio->bi_status);
7718 if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
7719 btrfs_finish_ordered_extent(bbio->ordered, NULL,
7720 dip->file_offset, dip->bytes,
7723 unlock_extent(&inode->io_tree, dip->file_offset,
7724 dip->file_offset + dip->bytes - 1, NULL);
7727 bbio->bio.bi_private = bbio->private;
7728 iomap_dio_bio_end_io(bio);
7731 static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
7734 struct btrfs_bio *bbio = btrfs_bio(bio);
7735 struct btrfs_dio_private *dip =
7736 container_of(bbio, struct btrfs_dio_private, bbio);
7737 struct btrfs_dio_data *dio_data = iter->private;
7739 btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
7740 btrfs_dio_end_io, bio->bi_private);
7741 bbio->inode = BTRFS_I(iter->inode);
7742 bbio->file_offset = file_offset;
7744 dip->file_offset = file_offset;
7745 dip->bytes = bio->bi_iter.bi_size;
7747 dio_data->submitted += bio->bi_iter.bi_size;
7750 * Check if we are doing a partial write. If we are, we need to split
7751 * the ordered extent to match the submitted bio. Hang on to the
7752 * remaining unfinishable ordered_extent in dio_data so that it can be
7753 * cancelled in iomap_end to avoid a deadlock wherein faulting the
7754 * remaining pages is blocked on the outstanding ordered extent.
7756 if (iter->flags & IOMAP_WRITE) {
7759 ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered);
7761 btrfs_finish_ordered_extent(dio_data->ordered, NULL,
7762 file_offset, dip->bytes,
7764 bio->bi_status = errno_to_blk_status(ret);
7765 iomap_dio_bio_end_io(bio);
7770 btrfs_submit_bio(bbio, 0);
7773 static const struct iomap_ops btrfs_dio_iomap_ops = {
7774 .iomap_begin = btrfs_dio_iomap_begin,
7775 .iomap_end = btrfs_dio_iomap_end,
7778 static const struct iomap_dio_ops btrfs_dio_ops = {
7779 .submit_io = btrfs_dio_submit_io,
7780 .bio_set = &btrfs_dio_bioset,
7783 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before)
7785 struct btrfs_dio_data data = { 0 };
7787 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
7788 IOMAP_DIO_PARTIAL, &data, done_before);
7791 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
7794 struct btrfs_dio_data data = { 0 };
7796 return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
7797 IOMAP_DIO_PARTIAL, &data, done_before);
7800 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7805 ret = fiemap_prep(inode, fieinfo, start, &len, 0);
7810 * fiemap_prep() called filemap_write_and_wait() for the whole possible
7811 * file range (0 to LLONG_MAX), but that is not enough if we have
7812 * compression enabled. The first filemap_fdatawrite_range() only kicks
7813 * in the compression of data (in an async thread) and will return
7814 * before the compression is done and writeback is started. A second
7815 * filemap_fdatawrite_range() is needed to wait for the compression to
7816 * complete and writeback to start. We also need to wait for ordered
7817 * extents to complete, because our fiemap implementation uses mainly
7818 * file extent items to list the extents, searching for extent maps
7819 * only for file ranges with holes or prealloc extents to figure out
7820 * if we have delalloc in those ranges.
7822 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
7823 ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
7828 return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
7831 static int btrfs_writepages(struct address_space *mapping,
7832 struct writeback_control *wbc)
7834 return extent_writepages(mapping, wbc);
7837 static void btrfs_readahead(struct readahead_control *rac)
7839 extent_readahead(rac);
7843 * For release_folio() and invalidate_folio() we have a race window where
7844 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7845 * If we continue to release/invalidate the page, we could cause use-after-free
7846 * for subpage spinlock. So this function is to spin and wait for subpage
7849 static void wait_subpage_spinlock(struct page *page)
7851 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
7852 struct btrfs_subpage *subpage;
7854 if (!btrfs_is_subpage(fs_info, page))
7857 ASSERT(PagePrivate(page) && page->private);
7858 subpage = (struct btrfs_subpage *)page->private;
7861 * This may look insane as we just acquire the spinlock and release it,
7862 * without doing anything. But we just want to make sure no one is
7863 * still holding the subpage spinlock.
7864 * And since the page is not dirty nor writeback, and we have page
7865 * locked, the only possible way to hold a spinlock is from the endio
7866 * function to clear page writeback.
7868 * Here we just acquire the spinlock so that all existing callers
7869 * should exit and we're safe to release/invalidate the page.
7871 spin_lock_irq(&subpage->lock);
7872 spin_unlock_irq(&subpage->lock);
7875 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7877 int ret = try_release_extent_mapping(&folio->page, gfp_flags);
7880 wait_subpage_spinlock(&folio->page);
7881 clear_page_extent_mapped(&folio->page);
7886 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7888 if (folio_test_writeback(folio) || folio_test_dirty(folio))
7890 return __btrfs_release_folio(folio, gfp_flags);
7893 #ifdef CONFIG_MIGRATION
7894 static int btrfs_migrate_folio(struct address_space *mapping,
7895 struct folio *dst, struct folio *src,
7896 enum migrate_mode mode)
7898 int ret = filemap_migrate_folio(mapping, dst, src, mode);
7900 if (ret != MIGRATEPAGE_SUCCESS)
7903 if (folio_test_ordered(src)) {
7904 folio_clear_ordered(src);
7905 folio_set_ordered(dst);
7908 return MIGRATEPAGE_SUCCESS;
7911 #define btrfs_migrate_folio NULL
7914 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
7917 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
7918 struct btrfs_fs_info *fs_info = inode->root->fs_info;
7919 struct extent_io_tree *tree = &inode->io_tree;
7920 struct extent_state *cached_state = NULL;
7921 u64 page_start = folio_pos(folio);
7922 u64 page_end = page_start + folio_size(folio) - 1;
7924 int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
7927 * We have folio locked so no new ordered extent can be created on this
7928 * page, nor bio can be submitted for this folio.
7930 * But already submitted bio can still be finished on this folio.
7931 * Furthermore, endio function won't skip folio which has Ordered
7932 * (Private2) already cleared, so it's possible for endio and
7933 * invalidate_folio to do the same ordered extent accounting twice
7936 * So here we wait for any submitted bios to finish, so that we won't
7937 * do double ordered extent accounting on the same folio.
7939 folio_wait_writeback(folio);
7940 wait_subpage_spinlock(&folio->page);
7943 * For subpage case, we have call sites like
7944 * btrfs_punch_hole_lock_range() which passes range not aligned to
7946 * If the range doesn't cover the full folio, we don't need to and
7947 * shouldn't clear page extent mapped, as folio->private can still
7948 * record subpage dirty bits for other part of the range.
7950 * For cases that invalidate the full folio even the range doesn't
7951 * cover the full folio, like invalidating the last folio, we're
7952 * still safe to wait for ordered extent to finish.
7954 if (!(offset == 0 && length == folio_size(folio))) {
7955 btrfs_release_folio(folio, GFP_NOFS);
7959 if (!inode_evicting)
7960 lock_extent(tree, page_start, page_end, &cached_state);
7963 while (cur < page_end) {
7964 struct btrfs_ordered_extent *ordered;
7967 u32 extra_flags = 0;
7969 ordered = btrfs_lookup_first_ordered_range(inode, cur,
7970 page_end + 1 - cur);
7972 range_end = page_end;
7974 * No ordered extent covering this range, we are safe
7975 * to delete all extent states in the range.
7977 extra_flags = EXTENT_CLEAR_ALL_BITS;
7980 if (ordered->file_offset > cur) {
7982 * There is a range between [cur, oe->file_offset) not
7983 * covered by any ordered extent.
7984 * We are safe to delete all extent states, and handle
7985 * the ordered extent in the next iteration.
7987 range_end = ordered->file_offset - 1;
7988 extra_flags = EXTENT_CLEAR_ALL_BITS;
7992 range_end = min(ordered->file_offset + ordered->num_bytes - 1,
7994 ASSERT(range_end + 1 - cur < U32_MAX);
7995 range_len = range_end + 1 - cur;
7996 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) {
7998 * If Ordered (Private2) is cleared, it means endio has
7999 * already been executed for the range.
8000 * We can't delete the extent states as
8001 * btrfs_finish_ordered_io() may still use some of them.
8005 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len);
8008 * IO on this page will never be started, so we need to account
8009 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8010 * here, must leave that up for the ordered extent completion.
8012 * This will also unlock the range for incoming
8013 * btrfs_finish_ordered_io().
8015 if (!inode_evicting)
8016 clear_extent_bit(tree, cur, range_end,
8018 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8019 EXTENT_DEFRAG, &cached_state);
8021 spin_lock_irq(&inode->ordered_tree.lock);
8022 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8023 ordered->truncated_len = min(ordered->truncated_len,
8024 cur - ordered->file_offset);
8025 spin_unlock_irq(&inode->ordered_tree.lock);
8028 * If the ordered extent has finished, we're safe to delete all
8029 * the extent states of the range, otherwise
8030 * btrfs_finish_ordered_io() will get executed by endio for
8031 * other pages, so we can't delete extent states.
8033 if (btrfs_dec_test_ordered_pending(inode, &ordered,
8034 cur, range_end + 1 - cur)) {
8035 btrfs_finish_ordered_io(ordered);
8037 * The ordered extent has finished, now we're again
8038 * safe to delete all extent states of the range.
8040 extra_flags = EXTENT_CLEAR_ALL_BITS;
8044 btrfs_put_ordered_extent(ordered);
8046 * Qgroup reserved space handler
8047 * Sector(s) here will be either:
8049 * 1) Already written to disk or bio already finished
8050 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
8051 * Qgroup will be handled by its qgroup_record then.
8052 * btrfs_qgroup_free_data() call will do nothing here.
8054 * 2) Not written to disk yet
8055 * Then btrfs_qgroup_free_data() call will clear the
8056 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
8057 * reserved data space.
8058 * Since the IO will never happen for this page.
8060 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
8061 if (!inode_evicting) {
8062 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
8063 EXTENT_DELALLOC | EXTENT_UPTODATE |
8064 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
8065 extra_flags, &cached_state);
8067 cur = range_end + 1;
8070 * We have iterated through all ordered extents of the page, the page
8071 * should not have Ordered (Private2) anymore, or the above iteration
8072 * did something wrong.
8074 ASSERT(!folio_test_ordered(folio));
8075 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
8076 if (!inode_evicting)
8077 __btrfs_release_folio(folio, GFP_NOFS);
8078 clear_page_extent_mapped(&folio->page);
8082 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8083 * called from a page fault handler when a page is first dirtied. Hence we must
8084 * be careful to check for EOF conditions here. We set the page up correctly
8085 * for a written page which means we get ENOSPC checking when writing into
8086 * holes and correct delalloc and unwritten extent mapping on filesystems that
8087 * support these features.
8089 * We are not allowed to take the i_mutex here so we have to play games to
8090 * protect against truncate races as the page could now be beyond EOF. Because
8091 * truncate_setsize() writes the inode size before removing pages, once we have
8092 * the page lock we can determine safely if the page is beyond EOF. If it is not
8093 * beyond EOF, then the page is guaranteed safe against truncation until we
8096 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8098 struct page *page = vmf->page;
8099 struct inode *inode = file_inode(vmf->vma->vm_file);
8100 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8101 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8102 struct btrfs_ordered_extent *ordered;
8103 struct extent_state *cached_state = NULL;
8104 struct extent_changeset *data_reserved = NULL;
8105 unsigned long zero_start;
8115 reserved_space = PAGE_SIZE;
8117 sb_start_pagefault(inode->i_sb);
8118 page_start = page_offset(page);
8119 page_end = page_start + PAGE_SIZE - 1;
8123 * Reserving delalloc space after obtaining the page lock can lead to
8124 * deadlock. For example, if a dirty page is locked by this function
8125 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8126 * dirty page write out, then the btrfs_writepages() function could
8127 * end up waiting indefinitely to get a lock on the page currently
8128 * being processed by btrfs_page_mkwrite() function.
8130 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
8131 page_start, reserved_space);
8133 ret2 = file_update_time(vmf->vma->vm_file);
8137 ret = vmf_error(ret2);
8143 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8145 down_read(&BTRFS_I(inode)->i_mmap_lock);
8147 size = i_size_read(inode);
8149 if ((page->mapping != inode->i_mapping) ||
8150 (page_start >= size)) {
8151 /* page got truncated out from underneath us */
8154 wait_on_page_writeback(page);
8156 lock_extent(io_tree, page_start, page_end, &cached_state);
8157 ret2 = set_page_extent_mapped(page);
8159 ret = vmf_error(ret2);
8160 unlock_extent(io_tree, page_start, page_end, &cached_state);
8165 * we can't set the delalloc bits if there are pending ordered
8166 * extents. Drop our locks and wait for them to finish
8168 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8171 unlock_extent(io_tree, page_start, page_end, &cached_state);
8173 up_read(&BTRFS_I(inode)->i_mmap_lock);
8174 btrfs_start_ordered_extent(ordered);
8175 btrfs_put_ordered_extent(ordered);
8179 if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8180 reserved_space = round_up(size - page_start,
8181 fs_info->sectorsize);
8182 if (reserved_space < PAGE_SIZE) {
8183 end = page_start + reserved_space - 1;
8184 btrfs_delalloc_release_space(BTRFS_I(inode),
8185 data_reserved, page_start,
8186 PAGE_SIZE - reserved_space, true);
8191 * page_mkwrite gets called when the page is firstly dirtied after it's
8192 * faulted in, but write(2) could also dirty a page and set delalloc
8193 * bits, thus in this case for space account reason, we still need to
8194 * clear any delalloc bits within this page range since we have to
8195 * reserve data&meta space before lock_page() (see above comments).
8197 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8198 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8199 EXTENT_DEFRAG, &cached_state);
8201 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8204 unlock_extent(io_tree, page_start, page_end, &cached_state);
8205 ret = VM_FAULT_SIGBUS;
8209 /* page is wholly or partially inside EOF */
8210 if (page_start + PAGE_SIZE > size)
8211 zero_start = offset_in_page(size);
8213 zero_start = PAGE_SIZE;
8215 if (zero_start != PAGE_SIZE)
8216 memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8218 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
8219 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
8220 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
8222 btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
8224 unlock_extent(io_tree, page_start, page_end, &cached_state);
8225 up_read(&BTRFS_I(inode)->i_mmap_lock);
8227 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8228 sb_end_pagefault(inode->i_sb);
8229 extent_changeset_free(data_reserved);
8230 return VM_FAULT_LOCKED;
8234 up_read(&BTRFS_I(inode)->i_mmap_lock);
8236 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8237 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
8238 reserved_space, (ret != 0));
8240 sb_end_pagefault(inode->i_sb);
8241 extent_changeset_free(data_reserved);
8245 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
8247 struct btrfs_truncate_control control = {
8249 .ino = btrfs_ino(inode),
8250 .min_type = BTRFS_EXTENT_DATA_KEY,
8251 .clear_extent_range = true,
8253 struct btrfs_root *root = inode->root;
8254 struct btrfs_fs_info *fs_info = root->fs_info;
8255 struct btrfs_block_rsv *rsv;
8257 struct btrfs_trans_handle *trans;
8258 u64 mask = fs_info->sectorsize - 1;
8259 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8261 if (!skip_writeback) {
8262 ret = btrfs_wait_ordered_range(&inode->vfs_inode,
8263 inode->vfs_inode.i_size & (~mask),
8270 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
8271 * things going on here:
8273 * 1) We need to reserve space to update our inode.
8275 * 2) We need to have something to cache all the space that is going to
8276 * be free'd up by the truncate operation, but also have some slack
8277 * space reserved in case it uses space during the truncate (thank you
8278 * very much snapshotting).
8280 * And we need these to be separate. The fact is we can use a lot of
8281 * space doing the truncate, and we have no earthly idea how much space
8282 * we will use, so we need the truncate reservation to be separate so it
8283 * doesn't end up using space reserved for updating the inode. We also
8284 * need to be able to stop the transaction and start a new one, which
8285 * means we need to be able to update the inode several times, and we
8286 * have no idea of knowing how many times that will be, so we can't just
8287 * reserve 1 item for the entirety of the operation, so that has to be
8288 * done separately as well.
8290 * So that leaves us with
8292 * 1) rsv - for the truncate reservation, which we will steal from the
8293 * transaction reservation.
8294 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8295 * updating the inode.
8297 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8300 rsv->size = min_size;
8301 rsv->failfast = true;
8304 * 1 for the truncate slack space
8305 * 1 for updating the inode.
8307 trans = btrfs_start_transaction(root, 2);
8308 if (IS_ERR(trans)) {
8309 ret = PTR_ERR(trans);
8313 /* Migrate the slack space for the truncate to our reserve */
8314 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8317 * We have reserved 2 metadata units when we started the transaction and
8318 * min_size matches 1 unit, so this should never fail, but if it does,
8319 * it's not critical we just fail truncation.
8322 btrfs_end_transaction(trans);
8326 trans->block_rsv = rsv;
8329 struct extent_state *cached_state = NULL;
8330 const u64 new_size = inode->vfs_inode.i_size;
8331 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
8333 control.new_size = new_size;
8334 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
8336 * We want to drop from the next block forward in case this new
8337 * size is not block aligned since we will be keeping the last
8338 * block of the extent just the way it is.
8340 btrfs_drop_extent_map_range(inode,
8341 ALIGN(new_size, fs_info->sectorsize),
8344 ret = btrfs_truncate_inode_items(trans, root, &control);
8346 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
8347 btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
8349 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
8351 trans->block_rsv = &fs_info->trans_block_rsv;
8352 if (ret != -ENOSPC && ret != -EAGAIN)
8355 ret = btrfs_update_inode(trans, root, inode);
8359 btrfs_end_transaction(trans);
8360 btrfs_btree_balance_dirty(fs_info);
8362 trans = btrfs_start_transaction(root, 2);
8363 if (IS_ERR(trans)) {
8364 ret = PTR_ERR(trans);
8369 btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
8370 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8371 rsv, min_size, false);
8373 * We have reserved 2 metadata units when we started the
8374 * transaction and min_size matches 1 unit, so this should never
8375 * fail, but if it does, it's not critical we just fail truncation.
8380 trans->block_rsv = rsv;
8384 * We can't call btrfs_truncate_block inside a trans handle as we could
8385 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8386 * know we've truncated everything except the last little bit, and can
8387 * do btrfs_truncate_block and then update the disk_i_size.
8389 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
8390 btrfs_end_transaction(trans);
8391 btrfs_btree_balance_dirty(fs_info);
8393 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0);
8396 trans = btrfs_start_transaction(root, 1);
8397 if (IS_ERR(trans)) {
8398 ret = PTR_ERR(trans);
8401 btrfs_inode_safe_disk_i_size_write(inode, 0);
8407 trans->block_rsv = &fs_info->trans_block_rsv;
8408 ret2 = btrfs_update_inode(trans, root, inode);
8412 ret2 = btrfs_end_transaction(trans);
8415 btrfs_btree_balance_dirty(fs_info);
8418 btrfs_free_block_rsv(fs_info, rsv);
8420 * So if we truncate and then write and fsync we normally would just
8421 * write the extents that changed, which is a problem if we need to
8422 * first truncate that entire inode. So set this flag so we write out
8423 * all of the extents in the inode to the sync log so we're completely
8426 * If no extents were dropped or trimmed we don't need to force the next
8427 * fsync to truncate all the inode's items from the log and re-log them
8428 * all. This means the truncate operation did not change the file size,
8429 * or changed it to a smaller size but there was only an implicit hole
8430 * between the old i_size and the new i_size, and there were no prealloc
8431 * extents beyond i_size to drop.
8433 if (control.extents_found > 0)
8434 btrfs_set_inode_full_sync(inode);
8439 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
8442 struct inode *inode;
8444 inode = new_inode(dir->i_sb);
8447 * Subvolumes don't inherit the sgid bit or the parent's gid if
8448 * the parent's sgid bit is set. This is probably a bug.
8450 inode_init_owner(idmap, inode, NULL,
8451 S_IFDIR | (~current_umask() & S_IRWXUGO));
8452 inode->i_op = &btrfs_dir_inode_operations;
8453 inode->i_fop = &btrfs_dir_file_operations;
8458 struct inode *btrfs_alloc_inode(struct super_block *sb)
8460 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8461 struct btrfs_inode *ei;
8462 struct inode *inode;
8464 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8471 ei->last_sub_trans = 0;
8472 ei->logged_trans = 0;
8473 ei->delalloc_bytes = 0;
8474 ei->new_delalloc_bytes = 0;
8475 ei->defrag_bytes = 0;
8476 ei->disk_i_size = 0;
8480 ei->index_cnt = (u64)-1;
8482 ei->last_unlink_trans = 0;
8483 ei->last_reflink_trans = 0;
8484 ei->last_log_commit = 0;
8486 spin_lock_init(&ei->lock);
8487 ei->outstanding_extents = 0;
8488 if (sb->s_magic != BTRFS_TEST_MAGIC)
8489 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8490 BTRFS_BLOCK_RSV_DELALLOC);
8491 ei->runtime_flags = 0;
8492 ei->prop_compress = BTRFS_COMPRESS_NONE;
8493 ei->defrag_compress = BTRFS_COMPRESS_NONE;
8495 ei->delayed_node = NULL;
8497 ei->i_otime.tv_sec = 0;
8498 ei->i_otime.tv_nsec = 0;
8500 inode = &ei->vfs_inode;
8501 extent_map_tree_init(&ei->extent_tree);
8502 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
8503 ei->io_tree.inode = ei;
8504 extent_io_tree_init(fs_info, &ei->file_extent_tree,
8505 IO_TREE_INODE_FILE_EXTENT);
8506 mutex_init(&ei->log_mutex);
8507 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8508 INIT_LIST_HEAD(&ei->delalloc_inodes);
8509 INIT_LIST_HEAD(&ei->delayed_iput);
8510 RB_CLEAR_NODE(&ei->rb_node);
8511 init_rwsem(&ei->i_mmap_lock);
8516 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8517 void btrfs_test_destroy_inode(struct inode *inode)
8519 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8520 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8524 void btrfs_free_inode(struct inode *inode)
8526 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8529 void btrfs_destroy_inode(struct inode *vfs_inode)
8531 struct btrfs_ordered_extent *ordered;
8532 struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8533 struct btrfs_root *root = inode->root;
8534 bool freespace_inode;
8536 WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8537 WARN_ON(vfs_inode->i_data.nrpages);
8538 WARN_ON(inode->block_rsv.reserved);
8539 WARN_ON(inode->block_rsv.size);
8540 WARN_ON(inode->outstanding_extents);
8541 if (!S_ISDIR(vfs_inode->i_mode)) {
8542 WARN_ON(inode->delalloc_bytes);
8543 WARN_ON(inode->new_delalloc_bytes);
8545 WARN_ON(inode->csum_bytes);
8546 WARN_ON(inode->defrag_bytes);
8549 * This can happen where we create an inode, but somebody else also
8550 * created the same inode and we need to destroy the one we already
8557 * If this is a free space inode do not take the ordered extents lockdep
8560 freespace_inode = btrfs_is_free_space_inode(inode);
8563 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8567 btrfs_err(root->fs_info,
8568 "found ordered extent %llu %llu on inode cleanup",
8569 ordered->file_offset, ordered->num_bytes);
8571 if (!freespace_inode)
8572 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8574 btrfs_remove_ordered_extent(inode, ordered);
8575 btrfs_put_ordered_extent(ordered);
8576 btrfs_put_ordered_extent(ordered);
8579 btrfs_qgroup_check_reserved_leak(inode);
8580 inode_tree_del(inode);
8581 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8582 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8583 btrfs_put_root(inode->root);
8586 int btrfs_drop_inode(struct inode *inode)
8588 struct btrfs_root *root = BTRFS_I(inode)->root;
8593 /* the snap/subvol tree is on deleting */
8594 if (btrfs_root_refs(&root->root_item) == 0)
8597 return generic_drop_inode(inode);
8600 static void init_once(void *foo)
8602 struct btrfs_inode *ei = foo;
8604 inode_init_once(&ei->vfs_inode);
8607 void __cold btrfs_destroy_cachep(void)
8610 * Make sure all delayed rcu free inodes are flushed before we
8614 bioset_exit(&btrfs_dio_bioset);
8615 kmem_cache_destroy(btrfs_inode_cachep);
8618 int __init btrfs_init_cachep(void)
8620 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8621 sizeof(struct btrfs_inode), 0,
8622 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
8624 if (!btrfs_inode_cachep)
8627 if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
8628 offsetof(struct btrfs_dio_private, bbio.bio),
8634 btrfs_destroy_cachep();
8638 static int btrfs_getattr(struct mnt_idmap *idmap,
8639 const struct path *path, struct kstat *stat,
8640 u32 request_mask, unsigned int flags)
8644 struct inode *inode = d_inode(path->dentry);
8645 u32 blocksize = inode->i_sb->s_blocksize;
8646 u32 bi_flags = BTRFS_I(inode)->flags;
8647 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8649 stat->result_mask |= STATX_BTIME;
8650 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
8651 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
8652 if (bi_flags & BTRFS_INODE_APPEND)
8653 stat->attributes |= STATX_ATTR_APPEND;
8654 if (bi_flags & BTRFS_INODE_COMPRESS)
8655 stat->attributes |= STATX_ATTR_COMPRESSED;
8656 if (bi_flags & BTRFS_INODE_IMMUTABLE)
8657 stat->attributes |= STATX_ATTR_IMMUTABLE;
8658 if (bi_flags & BTRFS_INODE_NODUMP)
8659 stat->attributes |= STATX_ATTR_NODUMP;
8660 if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8661 stat->attributes |= STATX_ATTR_VERITY;
8663 stat->attributes_mask |= (STATX_ATTR_APPEND |
8664 STATX_ATTR_COMPRESSED |
8665 STATX_ATTR_IMMUTABLE |
8668 generic_fillattr(idmap, inode, stat);
8669 stat->dev = BTRFS_I(inode)->root->anon_dev;
8671 spin_lock(&BTRFS_I(inode)->lock);
8672 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8673 inode_bytes = inode_get_bytes(inode);
8674 spin_unlock(&BTRFS_I(inode)->lock);
8675 stat->blocks = (ALIGN(inode_bytes, blocksize) +
8676 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
8680 static int btrfs_rename_exchange(struct inode *old_dir,
8681 struct dentry *old_dentry,
8682 struct inode *new_dir,
8683 struct dentry *new_dentry)
8685 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
8686 struct btrfs_trans_handle *trans;
8687 unsigned int trans_num_items;
8688 struct btrfs_root *root = BTRFS_I(old_dir)->root;
8689 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8690 struct inode *new_inode = new_dentry->d_inode;
8691 struct inode *old_inode = old_dentry->d_inode;
8692 struct timespec64 ctime = current_time(old_inode);
8693 struct btrfs_rename_ctx old_rename_ctx;
8694 struct btrfs_rename_ctx new_rename_ctx;
8695 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8696 u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8701 bool need_abort = false;
8702 struct fscrypt_name old_fname, new_fname;
8703 struct fscrypt_str *old_name, *new_name;
8706 * For non-subvolumes allow exchange only within one subvolume, in the
8707 * same inode namespace. Two subvolumes (represented as directory) can
8708 * be exchanged as they're a logical link and have a fixed inode number.
8711 (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
8712 new_ino != BTRFS_FIRST_FREE_OBJECTID))
8715 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8719 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8721 fscrypt_free_filename(&old_fname);
8725 old_name = &old_fname.disk_name;
8726 new_name = &new_fname.disk_name;
8728 /* close the race window with snapshot create/destroy ioctl */
8729 if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8730 new_ino == BTRFS_FIRST_FREE_OBJECTID)
8731 down_read(&fs_info->subvol_sem);
8735 * 1 to remove old dir item
8736 * 1 to remove old dir index
8737 * 1 to add new dir item
8738 * 1 to add new dir index
8739 * 1 to update parent inode
8741 * If the parents are the same, we only need to account for one
8743 trans_num_items = (old_dir == new_dir ? 9 : 10);
8744 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8746 * 1 to remove old root ref
8747 * 1 to remove old root backref
8748 * 1 to add new root ref
8749 * 1 to add new root backref
8751 trans_num_items += 4;
8754 * 1 to update inode item
8755 * 1 to remove old inode ref
8756 * 1 to add new inode ref
8758 trans_num_items += 3;
8760 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
8761 trans_num_items += 4;
8763 trans_num_items += 3;
8764 trans = btrfs_start_transaction(root, trans_num_items);
8765 if (IS_ERR(trans)) {
8766 ret = PTR_ERR(trans);
8771 ret = btrfs_record_root_in_trans(trans, dest);
8777 * We need to find a free sequence number both in the source and
8778 * in the destination directory for the exchange.
8780 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8783 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8787 BTRFS_I(old_inode)->dir_index = 0ULL;
8788 BTRFS_I(new_inode)->dir_index = 0ULL;
8790 /* Reference for the source. */
8791 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8792 /* force full log commit if subvolume involved. */
8793 btrfs_set_log_full_commit(trans);
8795 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8796 btrfs_ino(BTRFS_I(new_dir)),
8803 /* And now for the dest. */
8804 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8805 /* force full log commit if subvolume involved. */
8806 btrfs_set_log_full_commit(trans);
8808 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8809 btrfs_ino(BTRFS_I(old_dir)),
8813 btrfs_abort_transaction(trans, ret);
8818 /* Update inode version and ctime/mtime. */
8819 inode_inc_iversion(old_dir);
8820 inode_inc_iversion(new_dir);
8821 inode_inc_iversion(old_inode);
8822 inode_inc_iversion(new_inode);
8823 old_dir->i_mtime = ctime;
8824 old_dir->i_ctime = ctime;
8825 new_dir->i_mtime = ctime;
8826 new_dir->i_ctime = ctime;
8827 old_inode->i_ctime = ctime;
8828 new_inode->i_ctime = ctime;
8830 if (old_dentry->d_parent != new_dentry->d_parent) {
8831 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8832 BTRFS_I(old_inode), true);
8833 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8834 BTRFS_I(new_inode), true);
8837 /* src is a subvolume */
8838 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8839 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8840 } else { /* src is an inode */
8841 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8842 BTRFS_I(old_dentry->d_inode),
8843 old_name, &old_rename_ctx);
8845 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
8848 btrfs_abort_transaction(trans, ret);
8852 /* dest is a subvolume */
8853 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8854 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8855 } else { /* dest is an inode */
8856 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8857 BTRFS_I(new_dentry->d_inode),
8858 new_name, &new_rename_ctx);
8860 ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
8863 btrfs_abort_transaction(trans, ret);
8867 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8868 new_name, 0, old_idx);
8870 btrfs_abort_transaction(trans, ret);
8874 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8875 old_name, 0, new_idx);
8877 btrfs_abort_transaction(trans, ret);
8881 if (old_inode->i_nlink == 1)
8882 BTRFS_I(old_inode)->dir_index = old_idx;
8883 if (new_inode->i_nlink == 1)
8884 BTRFS_I(new_inode)->dir_index = new_idx;
8887 * Now pin the logs of the roots. We do it to ensure that no other task
8888 * can sync the logs while we are in progress with the rename, because
8889 * that could result in an inconsistency in case any of the inodes that
8890 * are part of this rename operation were logged before.
8892 if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8893 btrfs_pin_log_trans(root);
8894 if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8895 btrfs_pin_log_trans(dest);
8897 /* Do the log updates for all inodes. */
8898 if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8899 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8900 old_rename_ctx.index, new_dentry->d_parent);
8901 if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8902 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
8903 new_rename_ctx.index, old_dentry->d_parent);
8905 /* Now unpin the logs. */
8906 if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8907 btrfs_end_log_trans(root);
8908 if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8909 btrfs_end_log_trans(dest);
8911 ret2 = btrfs_end_transaction(trans);
8912 ret = ret ? ret : ret2;
8914 if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
8915 old_ino == BTRFS_FIRST_FREE_OBJECTID)
8916 up_read(&fs_info->subvol_sem);
8918 fscrypt_free_filename(&new_fname);
8919 fscrypt_free_filename(&old_fname);
8923 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
8926 struct inode *inode;
8928 inode = new_inode(dir->i_sb);
8930 inode_init_owner(idmap, inode, dir,
8931 S_IFCHR | WHITEOUT_MODE);
8932 inode->i_op = &btrfs_special_inode_operations;
8933 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
8938 static int btrfs_rename(struct mnt_idmap *idmap,
8939 struct inode *old_dir, struct dentry *old_dentry,
8940 struct inode *new_dir, struct dentry *new_dentry,
8943 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
8944 struct btrfs_new_inode_args whiteout_args = {
8946 .dentry = old_dentry,
8948 struct btrfs_trans_handle *trans;
8949 unsigned int trans_num_items;
8950 struct btrfs_root *root = BTRFS_I(old_dir)->root;
8951 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8952 struct inode *new_inode = d_inode(new_dentry);
8953 struct inode *old_inode = d_inode(old_dentry);
8954 struct btrfs_rename_ctx rename_ctx;
8958 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8959 struct fscrypt_name old_fname, new_fname;
8961 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8964 /* we only allow rename subvolume link between subvolumes */
8965 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8968 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8969 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
8972 if (S_ISDIR(old_inode->i_mode) && new_inode &&
8973 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8976 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8980 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8982 fscrypt_free_filename(&old_fname);
8986 /* check for collisions, even if the name isn't there */
8987 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
8989 if (ret == -EEXIST) {
8991 * eexist without a new_inode */
8992 if (WARN_ON(!new_inode)) {
8993 goto out_fscrypt_names;
8996 /* maybe -EOVERFLOW */
8997 goto out_fscrypt_names;
9003 * we're using rename to replace one file with another. Start IO on it
9004 * now so we don't add too much work to the end of the transaction
9006 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9007 filemap_flush(old_inode->i_mapping);
9009 if (flags & RENAME_WHITEOUT) {
9010 whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
9011 if (!whiteout_args.inode) {
9013 goto out_fscrypt_names;
9015 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
9017 goto out_whiteout_inode;
9019 /* 1 to update the old parent inode. */
9020 trans_num_items = 1;
9023 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9024 /* Close the race window with snapshot create/destroy ioctl */
9025 down_read(&fs_info->subvol_sem);
9027 * 1 to remove old root ref
9028 * 1 to remove old root backref
9029 * 1 to add new root ref
9030 * 1 to add new root backref
9032 trans_num_items += 4;
9036 * 1 to remove old inode ref
9037 * 1 to add new inode ref
9039 trans_num_items += 3;
9042 * 1 to remove old dir item
9043 * 1 to remove old dir index
9044 * 1 to add new dir item
9045 * 1 to add new dir index
9047 trans_num_items += 4;
9048 /* 1 to update new parent inode if it's not the same as the old parent */
9049 if (new_dir != old_dir)
9054 * 1 to remove inode ref
9055 * 1 to remove dir item
9056 * 1 to remove dir index
9057 * 1 to possibly add orphan item
9059 trans_num_items += 5;
9061 trans = btrfs_start_transaction(root, trans_num_items);
9062 if (IS_ERR(trans)) {
9063 ret = PTR_ERR(trans);
9068 ret = btrfs_record_root_in_trans(trans, dest);
9073 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9077 BTRFS_I(old_inode)->dir_index = 0ULL;
9078 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9079 /* force full log commit if subvolume involved. */
9080 btrfs_set_log_full_commit(trans);
9082 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
9083 old_ino, btrfs_ino(BTRFS_I(new_dir)),
9089 inode_inc_iversion(old_dir);
9090 inode_inc_iversion(new_dir);
9091 inode_inc_iversion(old_inode);
9092 old_dir->i_mtime = current_time(old_dir);
9093 old_dir->i_ctime = old_dir->i_mtime;
9094 new_dir->i_mtime = old_dir->i_mtime;
9095 new_dir->i_ctime = old_dir->i_mtime;
9096 old_inode->i_ctime = old_dir->i_mtime;
9098 if (old_dentry->d_parent != new_dentry->d_parent)
9099 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9100 BTRFS_I(old_inode), true);
9102 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9103 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
9105 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9106 BTRFS_I(d_inode(old_dentry)),
9107 &old_fname.disk_name, &rename_ctx);
9109 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9112 btrfs_abort_transaction(trans, ret);
9117 inode_inc_iversion(new_inode);
9118 new_inode->i_ctime = current_time(new_inode);
9119 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9120 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9121 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
9122 BUG_ON(new_inode->i_nlink == 0);
9124 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9125 BTRFS_I(d_inode(new_dentry)),
9126 &new_fname.disk_name);
9128 if (!ret && new_inode->i_nlink == 0)
9129 ret = btrfs_orphan_add(trans,
9130 BTRFS_I(d_inode(new_dentry)));
9132 btrfs_abort_transaction(trans, ret);
9137 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9138 &new_fname.disk_name, 0, index);
9140 btrfs_abort_transaction(trans, ret);
9144 if (old_inode->i_nlink == 1)
9145 BTRFS_I(old_inode)->dir_index = index;
9147 if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9148 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9149 rename_ctx.index, new_dentry->d_parent);
9151 if (flags & RENAME_WHITEOUT) {
9152 ret = btrfs_create_new_inode(trans, &whiteout_args);
9154 btrfs_abort_transaction(trans, ret);
9157 unlock_new_inode(whiteout_args.inode);
9158 iput(whiteout_args.inode);
9159 whiteout_args.inode = NULL;
9163 ret2 = btrfs_end_transaction(trans);
9164 ret = ret ? ret : ret2;
9166 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9167 up_read(&fs_info->subvol_sem);
9168 if (flags & RENAME_WHITEOUT)
9169 btrfs_new_inode_args_destroy(&whiteout_args);
9171 if (flags & RENAME_WHITEOUT)
9172 iput(whiteout_args.inode);
9174 fscrypt_free_filename(&old_fname);
9175 fscrypt_free_filename(&new_fname);
9179 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
9180 struct dentry *old_dentry, struct inode *new_dir,
9181 struct dentry *new_dentry, unsigned int flags)
9185 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9188 if (flags & RENAME_EXCHANGE)
9189 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9192 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
9195 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
9200 struct btrfs_delalloc_work {
9201 struct inode *inode;
9202 struct completion completion;
9203 struct list_head list;
9204 struct btrfs_work work;
9207 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9209 struct btrfs_delalloc_work *delalloc_work;
9210 struct inode *inode;
9212 delalloc_work = container_of(work, struct btrfs_delalloc_work,
9214 inode = delalloc_work->inode;
9215 filemap_flush(inode->i_mapping);
9216 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9217 &BTRFS_I(inode)->runtime_flags))
9218 filemap_flush(inode->i_mapping);
9221 complete(&delalloc_work->completion);
9224 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9226 struct btrfs_delalloc_work *work;
9228 work = kmalloc(sizeof(*work), GFP_NOFS);
9232 init_completion(&work->completion);
9233 INIT_LIST_HEAD(&work->list);
9234 work->inode = inode;
9235 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
9241 * some fairly slow code that needs optimization. This walks the list
9242 * of all the inodes with pending delalloc and forces them to disk.
9244 static int start_delalloc_inodes(struct btrfs_root *root,
9245 struct writeback_control *wbc, bool snapshot,
9246 bool in_reclaim_context)
9248 struct btrfs_inode *binode;
9249 struct inode *inode;
9250 struct btrfs_delalloc_work *work, *next;
9251 struct list_head works;
9252 struct list_head splice;
9254 bool full_flush = wbc->nr_to_write == LONG_MAX;
9256 INIT_LIST_HEAD(&works);
9257 INIT_LIST_HEAD(&splice);
9259 mutex_lock(&root->delalloc_mutex);
9260 spin_lock(&root->delalloc_lock);
9261 list_splice_init(&root->delalloc_inodes, &splice);
9262 while (!list_empty(&splice)) {
9263 binode = list_entry(splice.next, struct btrfs_inode,
9266 list_move_tail(&binode->delalloc_inodes,
9267 &root->delalloc_inodes);
9269 if (in_reclaim_context &&
9270 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
9273 inode = igrab(&binode->vfs_inode);
9275 cond_resched_lock(&root->delalloc_lock);
9278 spin_unlock(&root->delalloc_lock);
9281 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9282 &binode->runtime_flags);
9284 work = btrfs_alloc_delalloc_work(inode);
9290 list_add_tail(&work->list, &works);
9291 btrfs_queue_work(root->fs_info->flush_workers,
9294 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
9295 btrfs_add_delayed_iput(BTRFS_I(inode));
9296 if (ret || wbc->nr_to_write <= 0)
9300 spin_lock(&root->delalloc_lock);
9302 spin_unlock(&root->delalloc_lock);
9305 list_for_each_entry_safe(work, next, &works, list) {
9306 list_del_init(&work->list);
9307 wait_for_completion(&work->completion);
9311 if (!list_empty(&splice)) {
9312 spin_lock(&root->delalloc_lock);
9313 list_splice_tail(&splice, &root->delalloc_inodes);
9314 spin_unlock(&root->delalloc_lock);
9316 mutex_unlock(&root->delalloc_mutex);
9320 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
9322 struct writeback_control wbc = {
9323 .nr_to_write = LONG_MAX,
9324 .sync_mode = WB_SYNC_NONE,
9326 .range_end = LLONG_MAX,
9328 struct btrfs_fs_info *fs_info = root->fs_info;
9330 if (BTRFS_FS_ERROR(fs_info))
9333 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
9336 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
9337 bool in_reclaim_context)
9339 struct writeback_control wbc = {
9341 .sync_mode = WB_SYNC_NONE,
9343 .range_end = LLONG_MAX,
9345 struct btrfs_root *root;
9346 struct list_head splice;
9349 if (BTRFS_FS_ERROR(fs_info))
9352 INIT_LIST_HEAD(&splice);
9354 mutex_lock(&fs_info->delalloc_root_mutex);
9355 spin_lock(&fs_info->delalloc_root_lock);
9356 list_splice_init(&fs_info->delalloc_roots, &splice);
9357 while (!list_empty(&splice)) {
9359 * Reset nr_to_write here so we know that we're doing a full
9363 wbc.nr_to_write = LONG_MAX;
9365 root = list_first_entry(&splice, struct btrfs_root,
9367 root = btrfs_grab_root(root);
9369 list_move_tail(&root->delalloc_root,
9370 &fs_info->delalloc_roots);
9371 spin_unlock(&fs_info->delalloc_root_lock);
9373 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
9374 btrfs_put_root(root);
9375 if (ret < 0 || wbc.nr_to_write <= 0)
9377 spin_lock(&fs_info->delalloc_root_lock);
9379 spin_unlock(&fs_info->delalloc_root_lock);
9383 if (!list_empty(&splice)) {
9384 spin_lock(&fs_info->delalloc_root_lock);
9385 list_splice_tail(&splice, &fs_info->delalloc_roots);
9386 spin_unlock(&fs_info->delalloc_root_lock);
9388 mutex_unlock(&fs_info->delalloc_root_mutex);
9392 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
9393 struct dentry *dentry, const char *symname)
9395 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9396 struct btrfs_trans_handle *trans;
9397 struct btrfs_root *root = BTRFS_I(dir)->root;
9398 struct btrfs_path *path;
9399 struct btrfs_key key;
9400 struct inode *inode;
9401 struct btrfs_new_inode_args new_inode_args = {
9405 unsigned int trans_num_items;
9410 struct btrfs_file_extent_item *ei;
9411 struct extent_buffer *leaf;
9413 name_len = strlen(symname);
9414 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9415 return -ENAMETOOLONG;
9417 inode = new_inode(dir->i_sb);
9420 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
9421 inode->i_op = &btrfs_symlink_inode_operations;
9422 inode_nohighmem(inode);
9423 inode->i_mapping->a_ops = &btrfs_aops;
9424 btrfs_i_size_write(BTRFS_I(inode), name_len);
9425 inode_set_bytes(inode, name_len);
9427 new_inode_args.inode = inode;
9428 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9431 /* 1 additional item for the inline extent */
9434 trans = btrfs_start_transaction(root, trans_num_items);
9435 if (IS_ERR(trans)) {
9436 err = PTR_ERR(trans);
9437 goto out_new_inode_args;
9440 err = btrfs_create_new_inode(trans, &new_inode_args);
9444 path = btrfs_alloc_path();
9447 btrfs_abort_transaction(trans, err);
9448 discard_new_inode(inode);
9452 key.objectid = btrfs_ino(BTRFS_I(inode));
9454 key.type = BTRFS_EXTENT_DATA_KEY;
9455 datasize = btrfs_file_extent_calc_inline_size(name_len);
9456 err = btrfs_insert_empty_item(trans, root, path, &key,
9459 btrfs_abort_transaction(trans, err);
9460 btrfs_free_path(path);
9461 discard_new_inode(inode);
9465 leaf = path->nodes[0];
9466 ei = btrfs_item_ptr(leaf, path->slots[0],
9467 struct btrfs_file_extent_item);
9468 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9469 btrfs_set_file_extent_type(leaf, ei,
9470 BTRFS_FILE_EXTENT_INLINE);
9471 btrfs_set_file_extent_encryption(leaf, ei, 0);
9472 btrfs_set_file_extent_compression(leaf, ei, 0);
9473 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9474 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9476 ptr = btrfs_file_extent_inline_start(ei);
9477 write_extent_buffer(leaf, symname, ptr, name_len);
9478 btrfs_mark_buffer_dirty(leaf);
9479 btrfs_free_path(path);
9481 d_instantiate_new(dentry, inode);
9484 btrfs_end_transaction(trans);
9485 btrfs_btree_balance_dirty(fs_info);
9487 btrfs_new_inode_args_destroy(&new_inode_args);
9494 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9495 struct btrfs_trans_handle *trans_in,
9496 struct btrfs_inode *inode,
9497 struct btrfs_key *ins,
9500 struct btrfs_file_extent_item stack_fi;
9501 struct btrfs_replace_extent_info extent_info;
9502 struct btrfs_trans_handle *trans = trans_in;
9503 struct btrfs_path *path;
9504 u64 start = ins->objectid;
9505 u64 len = ins->offset;
9506 int qgroup_released;
9509 memset(&stack_fi, 0, sizeof(stack_fi));
9511 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9512 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9513 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9514 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9515 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9516 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9517 /* Encryption and other encoding is reserved and all 0 */
9519 qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
9520 if (qgroup_released < 0)
9521 return ERR_PTR(qgroup_released);
9524 ret = insert_reserved_file_extent(trans, inode,
9525 file_offset, &stack_fi,
9526 true, qgroup_released);
9532 extent_info.disk_offset = start;
9533 extent_info.disk_len = len;
9534 extent_info.data_offset = 0;
9535 extent_info.data_len = len;
9536 extent_info.file_offset = file_offset;
9537 extent_info.extent_buf = (char *)&stack_fi;
9538 extent_info.is_new_extent = true;
9539 extent_info.update_times = true;
9540 extent_info.qgroup_reserved = qgroup_released;
9541 extent_info.insertions = 0;
9543 path = btrfs_alloc_path();
9549 ret = btrfs_replace_file_extents(inode, path, file_offset,
9550 file_offset + len - 1, &extent_info,
9552 btrfs_free_path(path);
9559 * We have released qgroup data range at the beginning of the function,
9560 * and normally qgroup_released bytes will be freed when committing
9562 * But if we error out early, we have to free what we have released
9563 * or we leak qgroup data reservation.
9565 btrfs_qgroup_free_refroot(inode->root->fs_info,
9566 inode->root->root_key.objectid, qgroup_released,
9567 BTRFS_QGROUP_RSV_DATA);
9568 return ERR_PTR(ret);
9571 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9572 u64 start, u64 num_bytes, u64 min_size,
9573 loff_t actual_len, u64 *alloc_hint,
9574 struct btrfs_trans_handle *trans)
9576 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9577 struct extent_map *em;
9578 struct btrfs_root *root = BTRFS_I(inode)->root;
9579 struct btrfs_key ins;
9580 u64 cur_offset = start;
9581 u64 clear_offset = start;
9584 u64 last_alloc = (u64)-1;
9586 bool own_trans = true;
9587 u64 end = start + num_bytes - 1;
9591 while (num_bytes > 0) {
9592 cur_bytes = min_t(u64, num_bytes, SZ_256M);
9593 cur_bytes = max(cur_bytes, min_size);
9595 * If we are severely fragmented we could end up with really
9596 * small allocations, so if the allocator is returning small
9597 * chunks lets make its job easier by only searching for those
9600 cur_bytes = min(cur_bytes, last_alloc);
9601 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9602 min_size, 0, *alloc_hint, &ins, 1, 0);
9607 * We've reserved this space, and thus converted it from
9608 * ->bytes_may_use to ->bytes_reserved. Any error that happens
9609 * from here on out we will only need to clear our reservation
9610 * for the remaining unreserved area, so advance our
9611 * clear_offset by our extent size.
9613 clear_offset += ins.offset;
9615 last_alloc = ins.offset;
9616 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9619 * Now that we inserted the prealloc extent we can finally
9620 * decrement the number of reservations in the block group.
9621 * If we did it before, we could race with relocation and have
9622 * relocation miss the reserved extent, making it fail later.
9624 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9625 if (IS_ERR(trans)) {
9626 ret = PTR_ERR(trans);
9627 btrfs_free_reserved_extent(fs_info, ins.objectid,
9632 em = alloc_extent_map();
9634 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9635 cur_offset + ins.offset - 1, false);
9636 btrfs_set_inode_full_sync(BTRFS_I(inode));
9640 em->start = cur_offset;
9641 em->orig_start = cur_offset;
9642 em->len = ins.offset;
9643 em->block_start = ins.objectid;
9644 em->block_len = ins.offset;
9645 em->orig_block_len = ins.offset;
9646 em->ram_bytes = ins.offset;
9647 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9648 em->generation = trans->transid;
9650 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9651 free_extent_map(em);
9653 num_bytes -= ins.offset;
9654 cur_offset += ins.offset;
9655 *alloc_hint = ins.objectid + ins.offset;
9657 inode_inc_iversion(inode);
9658 inode->i_ctime = current_time(inode);
9659 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9660 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9661 (actual_len > inode->i_size) &&
9662 (cur_offset > inode->i_size)) {
9663 if (cur_offset > actual_len)
9664 i_size = actual_len;
9666 i_size = cur_offset;
9667 i_size_write(inode, i_size);
9668 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9671 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
9674 btrfs_abort_transaction(trans, ret);
9676 btrfs_end_transaction(trans);
9681 btrfs_end_transaction(trans);
9685 if (clear_offset < end)
9686 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9687 end - clear_offset + 1);
9691 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9692 u64 start, u64 num_bytes, u64 min_size,
9693 loff_t actual_len, u64 *alloc_hint)
9695 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9696 min_size, actual_len, alloc_hint,
9700 int btrfs_prealloc_file_range_trans(struct inode *inode,
9701 struct btrfs_trans_handle *trans, int mode,
9702 u64 start, u64 num_bytes, u64 min_size,
9703 loff_t actual_len, u64 *alloc_hint)
9705 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9706 min_size, actual_len, alloc_hint, trans);
9709 static int btrfs_permission(struct mnt_idmap *idmap,
9710 struct inode *inode, int mask)
9712 struct btrfs_root *root = BTRFS_I(inode)->root;
9713 umode_t mode = inode->i_mode;
9715 if (mask & MAY_WRITE &&
9716 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9717 if (btrfs_root_readonly(root))
9719 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9722 return generic_permission(idmap, inode, mask);
9725 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
9726 struct file *file, umode_t mode)
9728 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9729 struct btrfs_trans_handle *trans;
9730 struct btrfs_root *root = BTRFS_I(dir)->root;
9731 struct inode *inode;
9732 struct btrfs_new_inode_args new_inode_args = {
9734 .dentry = file->f_path.dentry,
9737 unsigned int trans_num_items;
9740 inode = new_inode(dir->i_sb);
9743 inode_init_owner(idmap, inode, dir, mode);
9744 inode->i_fop = &btrfs_file_operations;
9745 inode->i_op = &btrfs_file_inode_operations;
9746 inode->i_mapping->a_ops = &btrfs_aops;
9748 new_inode_args.inode = inode;
9749 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9753 trans = btrfs_start_transaction(root, trans_num_items);
9754 if (IS_ERR(trans)) {
9755 ret = PTR_ERR(trans);
9756 goto out_new_inode_args;
9759 ret = btrfs_create_new_inode(trans, &new_inode_args);
9762 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9763 * set it to 1 because d_tmpfile() will issue a warning if the count is
9766 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9768 set_nlink(inode, 1);
9771 d_tmpfile(file, inode);
9772 unlock_new_inode(inode);
9773 mark_inode_dirty(inode);
9776 btrfs_end_transaction(trans);
9777 btrfs_btree_balance_dirty(fs_info);
9779 btrfs_new_inode_args_destroy(&new_inode_args);
9783 return finish_open_simple(file, ret);
9786 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
9788 struct btrfs_fs_info *fs_info = inode->root->fs_info;
9789 unsigned long index = start >> PAGE_SHIFT;
9790 unsigned long end_index = end >> PAGE_SHIFT;
9794 ASSERT(end + 1 - start <= U32_MAX);
9795 len = end + 1 - start;
9796 while (index <= end_index) {
9797 page = find_get_page(inode->vfs_inode.i_mapping, index);
9798 ASSERT(page); /* Pages should be in the extent_io_tree */
9800 btrfs_page_set_writeback(fs_info, page, start, len);
9806 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9809 switch (compress_type) {
9810 case BTRFS_COMPRESS_NONE:
9811 return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9812 case BTRFS_COMPRESS_ZLIB:
9813 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9814 case BTRFS_COMPRESS_LZO:
9816 * The LZO format depends on the sector size. 64K is the maximum
9817 * sector size that we support.
9819 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9821 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9822 (fs_info->sectorsize_bits - 12);
9823 case BTRFS_COMPRESS_ZSTD:
9824 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9830 static ssize_t btrfs_encoded_read_inline(
9832 struct iov_iter *iter, u64 start,
9834 struct extent_state **cached_state,
9835 u64 extent_start, size_t count,
9836 struct btrfs_ioctl_encoded_io_args *encoded,
9839 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9840 struct btrfs_root *root = inode->root;
9841 struct btrfs_fs_info *fs_info = root->fs_info;
9842 struct extent_io_tree *io_tree = &inode->io_tree;
9843 struct btrfs_path *path;
9844 struct extent_buffer *leaf;
9845 struct btrfs_file_extent_item *item;
9851 path = btrfs_alloc_path();
9856 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9860 /* The extent item disappeared? */
9865 leaf = path->nodes[0];
9866 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9868 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9869 ptr = btrfs_file_extent_inline_start(item);
9871 encoded->len = min_t(u64, extent_start + ram_bytes,
9872 inode->vfs_inode.i_size) - iocb->ki_pos;
9873 ret = btrfs_encoded_io_compression_from_extent(fs_info,
9874 btrfs_file_extent_compression(leaf, item));
9877 encoded->compression = ret;
9878 if (encoded->compression) {
9881 inline_size = btrfs_file_extent_inline_item_len(leaf,
9883 if (inline_size > count) {
9887 count = inline_size;
9888 encoded->unencoded_len = ram_bytes;
9889 encoded->unencoded_offset = iocb->ki_pos - extent_start;
9891 count = min_t(u64, count, encoded->len);
9892 encoded->len = count;
9893 encoded->unencoded_len = count;
9894 ptr += iocb->ki_pos - extent_start;
9897 tmp = kmalloc(count, GFP_NOFS);
9902 read_extent_buffer(leaf, tmp, ptr, count);
9903 btrfs_release_path(path);
9904 unlock_extent(io_tree, start, lockend, cached_state);
9905 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9908 ret = copy_to_iter(tmp, count, iter);
9913 btrfs_free_path(path);
9917 struct btrfs_encoded_read_private {
9918 wait_queue_head_t wait;
9920 blk_status_t status;
9923 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
9925 struct btrfs_encoded_read_private *priv = bbio->private;
9927 if (bbio->bio.bi_status) {
9929 * The memory barrier implied by the atomic_dec_return() here
9930 * pairs with the memory barrier implied by the
9931 * atomic_dec_return() or io_wait_event() in
9932 * btrfs_encoded_read_regular_fill_pages() to ensure that this
9933 * write is observed before the load of status in
9934 * btrfs_encoded_read_regular_fill_pages().
9936 WRITE_ONCE(priv->status, bbio->bio.bi_status);
9938 if (!atomic_dec_return(&priv->pending))
9939 wake_up(&priv->wait);
9940 bio_put(&bbio->bio);
9943 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
9944 u64 file_offset, u64 disk_bytenr,
9945 u64 disk_io_size, struct page **pages)
9947 struct btrfs_fs_info *fs_info = inode->root->fs_info;
9948 struct btrfs_encoded_read_private priv = {
9949 .pending = ATOMIC_INIT(1),
9951 unsigned long i = 0;
9952 struct btrfs_bio *bbio;
9954 init_waitqueue_head(&priv.wait);
9956 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
9957 btrfs_encoded_read_endio, &priv);
9958 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9959 bbio->inode = inode;
9962 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
9964 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
9965 atomic_inc(&priv.pending);
9966 btrfs_submit_bio(bbio, 0);
9968 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
9969 btrfs_encoded_read_endio, &priv);
9970 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9971 bbio->inode = inode;
9976 disk_bytenr += bytes;
9977 disk_io_size -= bytes;
9978 } while (disk_io_size);
9980 atomic_inc(&priv.pending);
9981 btrfs_submit_bio(bbio, 0);
9983 if (atomic_dec_return(&priv.pending))
9984 io_wait_event(priv.wait, !atomic_read(&priv.pending));
9985 /* See btrfs_encoded_read_endio() for ordering. */
9986 return blk_status_to_errno(READ_ONCE(priv.status));
9989 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
9990 struct iov_iter *iter,
9991 u64 start, u64 lockend,
9992 struct extent_state **cached_state,
9993 u64 disk_bytenr, u64 disk_io_size,
9994 size_t count, bool compressed,
9997 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9998 struct extent_io_tree *io_tree = &inode->io_tree;
9999 struct page **pages;
10000 unsigned long nr_pages, i;
10002 size_t page_offset;
10005 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
10006 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
10009 ret = btrfs_alloc_page_array(nr_pages, pages);
10015 ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
10016 disk_io_size, pages);
10020 unlock_extent(io_tree, start, lockend, cached_state);
10021 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10028 i = (iocb->ki_pos - start) >> PAGE_SHIFT;
10029 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
10032 while (cur < count) {
10033 size_t bytes = min_t(size_t, count - cur,
10034 PAGE_SIZE - page_offset);
10036 if (copy_page_to_iter(pages[i], page_offset, bytes,
10047 for (i = 0; i < nr_pages; i++) {
10049 __free_page(pages[i]);
10055 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
10056 struct btrfs_ioctl_encoded_io_args *encoded)
10058 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10059 struct btrfs_fs_info *fs_info = inode->root->fs_info;
10060 struct extent_io_tree *io_tree = &inode->io_tree;
10062 size_t count = iov_iter_count(iter);
10063 u64 start, lockend, disk_bytenr, disk_io_size;
10064 struct extent_state *cached_state = NULL;
10065 struct extent_map *em;
10066 bool unlocked = false;
10068 file_accessed(iocb->ki_filp);
10070 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
10072 if (iocb->ki_pos >= inode->vfs_inode.i_size) {
10073 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10076 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
10078 * We don't know how long the extent containing iocb->ki_pos is, but if
10079 * it's compressed we know that it won't be longer than this.
10081 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
10084 struct btrfs_ordered_extent *ordered;
10086 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start,
10087 lockend - start + 1);
10089 goto out_unlock_inode;
10090 lock_extent(io_tree, start, lockend, &cached_state);
10091 ordered = btrfs_lookup_ordered_range(inode, start,
10092 lockend - start + 1);
10095 btrfs_put_ordered_extent(ordered);
10096 unlock_extent(io_tree, start, lockend, &cached_state);
10100 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1);
10103 goto out_unlock_extent;
10106 if (em->block_start == EXTENT_MAP_INLINE) {
10107 u64 extent_start = em->start;
10110 * For inline extents we get everything we need out of the
10113 free_extent_map(em);
10115 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
10116 &cached_state, extent_start,
10117 count, encoded, &unlocked);
10122 * We only want to return up to EOF even if the extent extends beyond
10125 encoded->len = min_t(u64, extent_map_end(em),
10126 inode->vfs_inode.i_size) - iocb->ki_pos;
10127 if (em->block_start == EXTENT_MAP_HOLE ||
10128 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
10129 disk_bytenr = EXTENT_MAP_HOLE;
10130 count = min_t(u64, count, encoded->len);
10131 encoded->len = count;
10132 encoded->unencoded_len = count;
10133 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
10134 disk_bytenr = em->block_start;
10136 * Bail if the buffer isn't large enough to return the whole
10137 * compressed extent.
10139 if (em->block_len > count) {
10143 disk_io_size = em->block_len;
10144 count = em->block_len;
10145 encoded->unencoded_len = em->ram_bytes;
10146 encoded->unencoded_offset = iocb->ki_pos - em->orig_start;
10147 ret = btrfs_encoded_io_compression_from_extent(fs_info,
10148 em->compress_type);
10151 encoded->compression = ret;
10153 disk_bytenr = em->block_start + (start - em->start);
10154 if (encoded->len > count)
10155 encoded->len = count;
10157 * Don't read beyond what we locked. This also limits the page
10158 * allocations that we'll do.
10160 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
10161 count = start + disk_io_size - iocb->ki_pos;
10162 encoded->len = count;
10163 encoded->unencoded_len = count;
10164 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
10166 free_extent_map(em);
10169 if (disk_bytenr == EXTENT_MAP_HOLE) {
10170 unlock_extent(io_tree, start, lockend, &cached_state);
10171 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10173 ret = iov_iter_zero(count, iter);
10177 ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
10178 &cached_state, disk_bytenr,
10179 disk_io_size, count,
10180 encoded->compression,
10186 iocb->ki_pos += encoded->len;
10188 free_extent_map(em);
10191 unlock_extent(io_tree, start, lockend, &cached_state);
10194 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10198 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
10199 const struct btrfs_ioctl_encoded_io_args *encoded)
10201 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10202 struct btrfs_root *root = inode->root;
10203 struct btrfs_fs_info *fs_info = root->fs_info;
10204 struct extent_io_tree *io_tree = &inode->io_tree;
10205 struct extent_changeset *data_reserved = NULL;
10206 struct extent_state *cached_state = NULL;
10207 struct btrfs_ordered_extent *ordered;
10211 u64 num_bytes, ram_bytes, disk_num_bytes;
10212 unsigned long nr_pages, i;
10213 struct page **pages;
10214 struct btrfs_key ins;
10215 bool extent_reserved = false;
10216 struct extent_map *em;
10219 switch (encoded->compression) {
10220 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
10221 compression = BTRFS_COMPRESS_ZLIB;
10223 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
10224 compression = BTRFS_COMPRESS_ZSTD;
10226 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
10227 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
10228 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
10229 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
10230 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
10231 /* The sector size must match for LZO. */
10232 if (encoded->compression -
10233 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
10234 fs_info->sectorsize_bits)
10236 compression = BTRFS_COMPRESS_LZO;
10241 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
10244 orig_count = iov_iter_count(from);
10246 /* The extent size must be sane. */
10247 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
10248 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
10252 * The compressed data must be smaller than the decompressed data.
10254 * It's of course possible for data to compress to larger or the same
10255 * size, but the buffered I/O path falls back to no compression for such
10256 * data, and we don't want to break any assumptions by creating these
10259 * Note that this is less strict than the current check we have that the
10260 * compressed data must be at least one sector smaller than the
10261 * decompressed data. We only want to enforce the weaker requirement
10262 * from old kernels that it is at least one byte smaller.
10264 if (orig_count >= encoded->unencoded_len)
10267 /* The extent must start on a sector boundary. */
10268 start = iocb->ki_pos;
10269 if (!IS_ALIGNED(start, fs_info->sectorsize))
10273 * The extent must end on a sector boundary. However, we allow a write
10274 * which ends at or extends i_size to have an unaligned length; we round
10275 * up the extent size and set i_size to the unaligned end.
10277 if (start + encoded->len < inode->vfs_inode.i_size &&
10278 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
10281 /* Finally, the offset in the unencoded data must be sector-aligned. */
10282 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
10285 num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
10286 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
10287 end = start + num_bytes - 1;
10290 * If the extent cannot be inline, the compressed data on disk must be
10291 * sector-aligned. For convenience, we extend it with zeroes if it
10294 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
10295 nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
10296 pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
10299 for (i = 0; i < nr_pages; i++) {
10300 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
10303 pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
10308 kaddr = kmap_local_page(pages[i]);
10309 if (copy_from_iter(kaddr, bytes, from) != bytes) {
10310 kunmap_local(kaddr);
10314 if (bytes < PAGE_SIZE)
10315 memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
10316 kunmap_local(kaddr);
10320 struct btrfs_ordered_extent *ordered;
10322 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
10325 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10326 start >> PAGE_SHIFT,
10327 end >> PAGE_SHIFT);
10330 lock_extent(io_tree, start, end, &cached_state);
10331 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10333 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10336 btrfs_put_ordered_extent(ordered);
10337 unlock_extent(io_tree, start, end, &cached_state);
10342 * We don't use the higher-level delalloc space functions because our
10343 * num_bytes and disk_num_bytes are different.
10345 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10348 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10350 goto out_free_data_space;
10351 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10354 goto out_qgroup_free_data;
10356 /* Try an inline extent first. */
10357 if (start == 0 && encoded->unencoded_len == encoded->len &&
10358 encoded->unencoded_offset == 0) {
10359 ret = cow_file_range_inline(inode, encoded->len, orig_count,
10360 compression, pages, true);
10364 goto out_delalloc_release;
10368 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10369 disk_num_bytes, 0, 0, &ins, 1, 1);
10371 goto out_delalloc_release;
10372 extent_reserved = true;
10374 em = create_io_em(inode, start, num_bytes,
10375 start - encoded->unencoded_offset, ins.objectid,
10376 ins.offset, ins.offset, ram_bytes, compression,
10377 BTRFS_ORDERED_COMPRESSED);
10380 goto out_free_reserved;
10382 free_extent_map(em);
10384 ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes,
10385 ins.objectid, ins.offset,
10386 encoded->unencoded_offset,
10387 (1 << BTRFS_ORDERED_ENCODED) |
10388 (1 << BTRFS_ORDERED_COMPRESSED),
10390 if (IS_ERR(ordered)) {
10391 btrfs_drop_extent_map_range(inode, start, end, false);
10392 ret = PTR_ERR(ordered);
10393 goto out_free_reserved;
10395 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10397 if (start + encoded->len > inode->vfs_inode.i_size)
10398 i_size_write(&inode->vfs_inode, start + encoded->len);
10400 unlock_extent(io_tree, start, end, &cached_state);
10402 btrfs_delalloc_release_extents(inode, num_bytes);
10404 btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false);
10409 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10410 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
10411 out_delalloc_release:
10412 btrfs_delalloc_release_extents(inode, num_bytes);
10413 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10414 out_qgroup_free_data:
10416 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
10417 out_free_data_space:
10419 * If btrfs_reserve_extent() succeeded, then we already decremented
10422 if (!extent_reserved)
10423 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
10425 unlock_extent(io_tree, start, end, &cached_state);
10427 for (i = 0; i < nr_pages; i++) {
10429 __free_page(pages[i]);
10434 iocb->ki_pos += encoded->len;
10440 * Add an entry indicating a block group or device which is pinned by a
10441 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10442 * negative errno on failure.
10444 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10445 bool is_block_group)
10447 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10448 struct btrfs_swapfile_pin *sp, *entry;
10449 struct rb_node **p;
10450 struct rb_node *parent = NULL;
10452 sp = kmalloc(sizeof(*sp), GFP_NOFS);
10457 sp->is_block_group = is_block_group;
10458 sp->bg_extent_count = 1;
10460 spin_lock(&fs_info->swapfile_pins_lock);
10461 p = &fs_info->swapfile_pins.rb_node;
10464 entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10465 if (sp->ptr < entry->ptr ||
10466 (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10467 p = &(*p)->rb_left;
10468 } else if (sp->ptr > entry->ptr ||
10469 (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10470 p = &(*p)->rb_right;
10472 if (is_block_group)
10473 entry->bg_extent_count++;
10474 spin_unlock(&fs_info->swapfile_pins_lock);
10479 rb_link_node(&sp->node, parent, p);
10480 rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10481 spin_unlock(&fs_info->swapfile_pins_lock);
10485 /* Free all of the entries pinned by this swapfile. */
10486 static void btrfs_free_swapfile_pins(struct inode *inode)
10488 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10489 struct btrfs_swapfile_pin *sp;
10490 struct rb_node *node, *next;
10492 spin_lock(&fs_info->swapfile_pins_lock);
10493 node = rb_first(&fs_info->swapfile_pins);
10495 next = rb_next(node);
10496 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10497 if (sp->inode == inode) {
10498 rb_erase(&sp->node, &fs_info->swapfile_pins);
10499 if (sp->is_block_group) {
10500 btrfs_dec_block_group_swap_extents(sp->ptr,
10501 sp->bg_extent_count);
10502 btrfs_put_block_group(sp->ptr);
10508 spin_unlock(&fs_info->swapfile_pins_lock);
10511 struct btrfs_swap_info {
10517 unsigned long nr_pages;
10521 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10522 struct btrfs_swap_info *bsi)
10524 unsigned long nr_pages;
10525 unsigned long max_pages;
10526 u64 first_ppage, first_ppage_reported, next_ppage;
10530 * Our swapfile may have had its size extended after the swap header was
10531 * written. In that case activating the swapfile should not go beyond
10532 * the max size set in the swap header.
10534 if (bsi->nr_pages >= sis->max)
10537 max_pages = sis->max - bsi->nr_pages;
10538 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
10539 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
10541 if (first_ppage >= next_ppage)
10543 nr_pages = next_ppage - first_ppage;
10544 nr_pages = min(nr_pages, max_pages);
10546 first_ppage_reported = first_ppage;
10547 if (bsi->start == 0)
10548 first_ppage_reported++;
10549 if (bsi->lowest_ppage > first_ppage_reported)
10550 bsi->lowest_ppage = first_ppage_reported;
10551 if (bsi->highest_ppage < (next_ppage - 1))
10552 bsi->highest_ppage = next_ppage - 1;
10554 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10557 bsi->nr_extents += ret;
10558 bsi->nr_pages += nr_pages;
10562 static void btrfs_swap_deactivate(struct file *file)
10564 struct inode *inode = file_inode(file);
10566 btrfs_free_swapfile_pins(inode);
10567 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10570 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10573 struct inode *inode = file_inode(file);
10574 struct btrfs_root *root = BTRFS_I(inode)->root;
10575 struct btrfs_fs_info *fs_info = root->fs_info;
10576 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10577 struct extent_state *cached_state = NULL;
10578 struct extent_map *em = NULL;
10579 struct btrfs_device *device = NULL;
10580 struct btrfs_swap_info bsi = {
10581 .lowest_ppage = (sector_t)-1ULL,
10588 * If the swap file was just created, make sure delalloc is done. If the
10589 * file changes again after this, the user is doing something stupid and
10590 * we don't really care.
10592 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
10597 * The inode is locked, so these flags won't change after we check them.
10599 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10600 btrfs_warn(fs_info, "swapfile must not be compressed");
10603 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10604 btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10607 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10608 btrfs_warn(fs_info, "swapfile must not be checksummed");
10613 * Balance or device remove/replace/resize can move stuff around from
10614 * under us. The exclop protection makes sure they aren't running/won't
10615 * run concurrently while we are mapping the swap extents, and
10616 * fs_info->swapfile_pins prevents them from running while the swap
10617 * file is active and moving the extents. Note that this also prevents
10618 * a concurrent device add which isn't actually necessary, but it's not
10619 * really worth the trouble to allow it.
10621 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10622 btrfs_warn(fs_info,
10623 "cannot activate swapfile while exclusive operation is running");
10628 * Prevent snapshot creation while we are activating the swap file.
10629 * We do not want to race with snapshot creation. If snapshot creation
10630 * already started before we bumped nr_swapfiles from 0 to 1 and
10631 * completes before the first write into the swap file after it is
10632 * activated, than that write would fallback to COW.
10634 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10635 btrfs_exclop_finish(fs_info);
10636 btrfs_warn(fs_info,
10637 "cannot activate swapfile because snapshot creation is in progress");
10641 * Snapshots can create extents which require COW even if NODATACOW is
10642 * set. We use this counter to prevent snapshots. We must increment it
10643 * before walking the extents because we don't want a concurrent
10644 * snapshot to run after we've already checked the extents.
10646 * It is possible that subvolume is marked for deletion but still not
10647 * removed yet. To prevent this race, we check the root status before
10648 * activating the swapfile.
10650 spin_lock(&root->root_item_lock);
10651 if (btrfs_root_dead(root)) {
10652 spin_unlock(&root->root_item_lock);
10654 btrfs_exclop_finish(fs_info);
10655 btrfs_warn(fs_info,
10656 "cannot activate swapfile because subvolume %llu is being deleted",
10657 root->root_key.objectid);
10660 atomic_inc(&root->nr_swapfiles);
10661 spin_unlock(&root->root_item_lock);
10663 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10665 lock_extent(io_tree, 0, isize - 1, &cached_state);
10667 while (start < isize) {
10668 u64 logical_block_start, physical_block_start;
10669 struct btrfs_block_group *bg;
10670 u64 len = isize - start;
10672 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
10678 if (em->block_start == EXTENT_MAP_HOLE) {
10679 btrfs_warn(fs_info, "swapfile must not have holes");
10683 if (em->block_start == EXTENT_MAP_INLINE) {
10685 * It's unlikely we'll ever actually find ourselves
10686 * here, as a file small enough to fit inline won't be
10687 * big enough to store more than the swap header, but in
10688 * case something changes in the future, let's catch it
10689 * here rather than later.
10691 btrfs_warn(fs_info, "swapfile must not be inline");
10695 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
10696 btrfs_warn(fs_info, "swapfile must not be compressed");
10701 logical_block_start = em->block_start + (start - em->start);
10702 len = min(len, em->len - (start - em->start));
10703 free_extent_map(em);
10706 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true);
10712 btrfs_warn(fs_info,
10713 "swapfile must not be copy-on-write");
10718 em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10724 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10725 btrfs_warn(fs_info,
10726 "swapfile must have single data profile");
10731 if (device == NULL) {
10732 device = em->map_lookup->stripes[0].dev;
10733 ret = btrfs_add_swapfile_pin(inode, device, false);
10738 } else if (device != em->map_lookup->stripes[0].dev) {
10739 btrfs_warn(fs_info, "swapfile must be on one device");
10744 physical_block_start = (em->map_lookup->stripes[0].physical +
10745 (logical_block_start - em->start));
10746 len = min(len, em->len - (logical_block_start - em->start));
10747 free_extent_map(em);
10750 bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10752 btrfs_warn(fs_info,
10753 "could not find block group containing swapfile");
10758 if (!btrfs_inc_block_group_swap_extents(bg)) {
10759 btrfs_warn(fs_info,
10760 "block group for swapfile at %llu is read-only%s",
10762 atomic_read(&fs_info->scrubs_running) ?
10763 " (scrub running)" : "");
10764 btrfs_put_block_group(bg);
10769 ret = btrfs_add_swapfile_pin(inode, bg, true);
10771 btrfs_put_block_group(bg);
10778 if (bsi.block_len &&
10779 bsi.block_start + bsi.block_len == physical_block_start) {
10780 bsi.block_len += len;
10782 if (bsi.block_len) {
10783 ret = btrfs_add_swap_extent(sis, &bsi);
10788 bsi.block_start = physical_block_start;
10789 bsi.block_len = len;
10796 ret = btrfs_add_swap_extent(sis, &bsi);
10799 if (!IS_ERR_OR_NULL(em))
10800 free_extent_map(em);
10802 unlock_extent(io_tree, 0, isize - 1, &cached_state);
10805 btrfs_swap_deactivate(file);
10807 btrfs_drew_write_unlock(&root->snapshot_lock);
10809 btrfs_exclop_finish(fs_info);
10815 sis->bdev = device->bdev;
10816 *span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10817 sis->max = bsi.nr_pages;
10818 sis->pages = bsi.nr_pages - 1;
10819 sis->highest_bit = bsi.nr_pages - 1;
10820 return bsi.nr_extents;
10823 static void btrfs_swap_deactivate(struct file *file)
10827 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10830 return -EOPNOTSUPP;
10835 * Update the number of bytes used in the VFS' inode. When we replace extents in
10836 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10837 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10838 * always get a correct value.
10840 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10841 const u64 add_bytes,
10842 const u64 del_bytes)
10844 if (add_bytes == del_bytes)
10847 spin_lock(&inode->lock);
10849 inode_sub_bytes(&inode->vfs_inode, del_bytes);
10851 inode_add_bytes(&inode->vfs_inode, add_bytes);
10852 spin_unlock(&inode->lock);
10856 * Verify that there are no ordered extents for a given file range.
10858 * @inode: The target inode.
10859 * @start: Start offset of the file range, should be sector size aligned.
10860 * @end: End offset (inclusive) of the file range, its value +1 should be
10861 * sector size aligned.
10863 * This should typically be used for cases where we locked an inode's VFS lock in
10864 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10865 * we have flushed all delalloc in the range, we have waited for all ordered
10866 * extents in the range to complete and finally we have locked the file range in
10867 * the inode's io_tree.
10869 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10871 struct btrfs_root *root = inode->root;
10872 struct btrfs_ordered_extent *ordered;
10874 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10877 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10879 btrfs_err(root->fs_info,
10880 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10881 start, end, btrfs_ino(inode), root->root_key.objectid,
10882 ordered->file_offset,
10883 ordered->file_offset + ordered->num_bytes - 1);
10884 btrfs_put_ordered_extent(ordered);
10887 ASSERT(ordered == NULL);
10890 static const struct inode_operations btrfs_dir_inode_operations = {
10891 .getattr = btrfs_getattr,
10892 .lookup = btrfs_lookup,
10893 .create = btrfs_create,
10894 .unlink = btrfs_unlink,
10895 .link = btrfs_link,
10896 .mkdir = btrfs_mkdir,
10897 .rmdir = btrfs_rmdir,
10898 .rename = btrfs_rename2,
10899 .symlink = btrfs_symlink,
10900 .setattr = btrfs_setattr,
10901 .mknod = btrfs_mknod,
10902 .listxattr = btrfs_listxattr,
10903 .permission = btrfs_permission,
10904 .get_inode_acl = btrfs_get_acl,
10905 .set_acl = btrfs_set_acl,
10906 .update_time = btrfs_update_time,
10907 .tmpfile = btrfs_tmpfile,
10908 .fileattr_get = btrfs_fileattr_get,
10909 .fileattr_set = btrfs_fileattr_set,
10912 static const struct file_operations btrfs_dir_file_operations = {
10913 .llseek = generic_file_llseek,
10914 .read = generic_read_dir,
10915 .iterate_shared = btrfs_real_readdir,
10916 .open = btrfs_opendir,
10917 .unlocked_ioctl = btrfs_ioctl,
10918 #ifdef CONFIG_COMPAT
10919 .compat_ioctl = btrfs_compat_ioctl,
10921 .release = btrfs_release_file,
10922 .fsync = btrfs_sync_file,
10926 * btrfs doesn't support the bmap operation because swapfiles
10927 * use bmap to make a mapping of extents in the file. They assume
10928 * these extents won't change over the life of the file and they
10929 * use the bmap result to do IO directly to the drive.
10931 * the btrfs bmap call would return logical addresses that aren't
10932 * suitable for IO and they also will change frequently as COW
10933 * operations happen. So, swapfile + btrfs == corruption.
10935 * For now we're avoiding this by dropping bmap.
10937 static const struct address_space_operations btrfs_aops = {
10938 .read_folio = btrfs_read_folio,
10939 .writepages = btrfs_writepages,
10940 .readahead = btrfs_readahead,
10941 .invalidate_folio = btrfs_invalidate_folio,
10942 .release_folio = btrfs_release_folio,
10943 .migrate_folio = btrfs_migrate_folio,
10944 .dirty_folio = filemap_dirty_folio,
10945 .error_remove_page = generic_error_remove_page,
10946 .swap_activate = btrfs_swap_activate,
10947 .swap_deactivate = btrfs_swap_deactivate,
10950 static const struct inode_operations btrfs_file_inode_operations = {
10951 .getattr = btrfs_getattr,
10952 .setattr = btrfs_setattr,
10953 .listxattr = btrfs_listxattr,
10954 .permission = btrfs_permission,
10955 .fiemap = btrfs_fiemap,
10956 .get_inode_acl = btrfs_get_acl,
10957 .set_acl = btrfs_set_acl,
10958 .update_time = btrfs_update_time,
10959 .fileattr_get = btrfs_fileattr_get,
10960 .fileattr_set = btrfs_fileattr_set,
10962 static const struct inode_operations btrfs_special_inode_operations = {
10963 .getattr = btrfs_getattr,
10964 .setattr = btrfs_setattr,
10965 .permission = btrfs_permission,
10966 .listxattr = btrfs_listxattr,
10967 .get_inode_acl = btrfs_get_acl,
10968 .set_acl = btrfs_set_acl,
10969 .update_time = btrfs_update_time,
10971 static const struct inode_operations btrfs_symlink_inode_operations = {
10972 .get_link = page_get_link,
10973 .getattr = btrfs_getattr,
10974 .setattr = btrfs_setattr,
10975 .permission = btrfs_permission,
10976 .listxattr = btrfs_listxattr,
10977 .update_time = btrfs_update_time,
10980 const struct dentry_operations btrfs_dentry_operations = {
10981 .d_delete = btrfs_dentry_delete,