1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
16 #include "print-tree.h"
18 #include "compression.h"
20 #include "block-group.h"
21 #include "space-info.h"
24 /* magic values for the inode_only field in btrfs_log_inode:
26 * LOG_INODE_ALL means to log everything
27 * LOG_INODE_EXISTS means to log just enough to recreate the inode
38 * directory trouble cases
40 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
41 * log, we must force a full commit before doing an fsync of the directory
42 * where the unlink was done.
43 * ---> record transid of last unlink/rename per directory
47 * rename foo/some_dir foo2/some_dir
49 * fsync foo/some_dir/some_file
51 * The fsync above will unlink the original some_dir without recording
52 * it in its new location (foo2). After a crash, some_dir will be gone
53 * unless the fsync of some_file forces a full commit
55 * 2) we must log any new names for any file or dir that is in the fsync
56 * log. ---> check inode while renaming/linking.
58 * 2a) we must log any new names for any file or dir during rename
59 * when the directory they are being removed from was logged.
60 * ---> check inode and old parent dir during rename
62 * 2a is actually the more important variant. With the extra logging
63 * a crash might unlink the old name without recreating the new one
65 * 3) after a crash, we must go through any directories with a link count
66 * of zero and redo the rm -rf
73 * The directory f1 was fully removed from the FS, but fsync was never
74 * called on f1, only its parent dir. After a crash the rm -rf must
75 * be replayed. This must be able to recurse down the entire
76 * directory tree. The inode link count fixup code takes care of the
81 * stages for the tree walking. The first
82 * stage (0) is to only pin down the blocks we find
83 * the second stage (1) is to make sure that all the inodes
84 * we find in the log are created in the subvolume.
86 * The last stage is to deal with directories and links and extents
87 * and all the other fun semantics
91 LOG_WALK_REPLAY_INODES,
92 LOG_WALK_REPLAY_DIR_INDEX,
96 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct btrfs_inode *inode,
99 struct btrfs_log_ctx *ctx);
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_root *log,
106 struct btrfs_path *path,
107 u64 dirid, int del_all);
108 static void wait_log_commit(struct btrfs_root *root, int transid);
111 * tree logging is a special write ahead log used to make sure that
112 * fsyncs and O_SYNCs can happen without doing full tree commits.
114 * Full tree commits are expensive because they require commonly
115 * modified blocks to be recowed, creating many dirty pages in the
116 * extent tree an 4x-6x higher write load than ext3.
118 * Instead of doing a tree commit on every fsync, we use the
119 * key ranges and transaction ids to find items for a given file or directory
120 * that have changed in this transaction. Those items are copied into
121 * a special tree (one per subvolume root), that tree is written to disk
122 * and then the fsync is considered complete.
124 * After a crash, items are copied out of the log-tree back into the
125 * subvolume tree. Any file data extents found are recorded in the extent
126 * allocation tree, and the log-tree freed.
128 * The log tree is read three times, once to pin down all the extents it is
129 * using in ram and once, once to create all the inodes logged in the tree
130 * and once to do all the other items.
134 * start a sub transaction and setup the log tree
135 * this increments the log tree writer count to make the people
136 * syncing the tree wait for us to finish
138 static int start_log_trans(struct btrfs_trans_handle *trans,
139 struct btrfs_root *root,
140 struct btrfs_log_ctx *ctx)
142 struct btrfs_fs_info *fs_info = root->fs_info;
143 struct btrfs_root *tree_root = fs_info->tree_root;
144 const bool zoned = btrfs_is_zoned(fs_info);
146 bool created = false;
149 * First check if the log root tree was already created. If not, create
150 * it before locking the root's log_mutex, just to keep lockdep happy.
152 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) {
153 mutex_lock(&tree_root->log_mutex);
154 if (!fs_info->log_root_tree) {
155 ret = btrfs_init_log_root_tree(trans, fs_info);
157 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state);
161 mutex_unlock(&tree_root->log_mutex);
166 mutex_lock(&root->log_mutex);
169 if (root->log_root) {
170 int index = (root->log_transid + 1) % 2;
172 if (btrfs_need_log_full_commit(trans)) {
177 if (zoned && atomic_read(&root->log_commit[index])) {
178 wait_log_commit(root, root->log_transid - 1);
182 if (!root->log_start_pid) {
183 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
184 root->log_start_pid = current->pid;
185 } else if (root->log_start_pid != current->pid) {
186 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
190 * This means fs_info->log_root_tree was already created
191 * for some other FS trees. Do the full commit not to mix
192 * nodes from multiple log transactions to do sequential
195 if (zoned && !created) {
200 ret = btrfs_add_log_tree(trans, root);
204 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
205 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
206 root->log_start_pid = current->pid;
209 atomic_inc(&root->log_writers);
210 if (ctx && !ctx->logging_new_name) {
211 int index = root->log_transid % 2;
212 list_add_tail(&ctx->list, &root->log_ctxs[index]);
213 ctx->log_transid = root->log_transid;
217 mutex_unlock(&root->log_mutex);
222 * returns 0 if there was a log transaction running and we were able
223 * to join, or returns -ENOENT if there were not transactions
226 static int join_running_log_trans(struct btrfs_root *root)
228 const bool zoned = btrfs_is_zoned(root->fs_info);
231 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
234 mutex_lock(&root->log_mutex);
236 if (root->log_root) {
237 int index = (root->log_transid + 1) % 2;
240 if (zoned && atomic_read(&root->log_commit[index])) {
241 wait_log_commit(root, root->log_transid - 1);
244 atomic_inc(&root->log_writers);
246 mutex_unlock(&root->log_mutex);
251 * This either makes the current running log transaction wait
252 * until you call btrfs_end_log_trans() or it makes any future
253 * log transactions wait until you call btrfs_end_log_trans()
255 void btrfs_pin_log_trans(struct btrfs_root *root)
257 atomic_inc(&root->log_writers);
261 * indicate we're done making changes to the log tree
262 * and wake up anyone waiting to do a sync
264 void btrfs_end_log_trans(struct btrfs_root *root)
266 if (atomic_dec_and_test(&root->log_writers)) {
267 /* atomic_dec_and_test implies a barrier */
268 cond_wake_up_nomb(&root->log_writer_wait);
272 static int btrfs_write_tree_block(struct extent_buffer *buf)
274 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
275 buf->start + buf->len - 1);
278 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
280 filemap_fdatawait_range(buf->pages[0]->mapping,
281 buf->start, buf->start + buf->len - 1);
285 * the walk control struct is used to pass state down the chain when
286 * processing the log tree. The stage field tells us which part
287 * of the log tree processing we are currently doing. The others
288 * are state fields used for that specific part
290 struct walk_control {
291 /* should we free the extent on disk when done? This is used
292 * at transaction commit time while freeing a log tree
296 /* should we write out the extent buffer? This is used
297 * while flushing the log tree to disk during a sync
301 /* should we wait for the extent buffer io to finish? Also used
302 * while flushing the log tree to disk for a sync
306 /* pin only walk, we record which extents on disk belong to the
311 /* what stage of the replay code we're currently in */
315 * Ignore any items from the inode currently being processed. Needs
316 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
317 * the LOG_WALK_REPLAY_INODES stage.
319 bool ignore_cur_inode;
321 /* the root we are currently replaying */
322 struct btrfs_root *replay_dest;
324 /* the trans handle for the current replay */
325 struct btrfs_trans_handle *trans;
327 /* the function that gets used to process blocks we find in the
328 * tree. Note the extent_buffer might not be up to date when it is
329 * passed in, and it must be checked or read if you need the data
332 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
333 struct walk_control *wc, u64 gen, int level);
337 * process_func used to pin down extents, write them or wait on them
339 static int process_one_buffer(struct btrfs_root *log,
340 struct extent_buffer *eb,
341 struct walk_control *wc, u64 gen, int level)
343 struct btrfs_fs_info *fs_info = log->fs_info;
347 * If this fs is mixed then we need to be able to process the leaves to
348 * pin down any logged extents, so we have to read the block.
350 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
351 ret = btrfs_read_buffer(eb, gen, level, NULL);
357 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
360 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
361 if (wc->pin && btrfs_header_level(eb) == 0)
362 ret = btrfs_exclude_logged_extents(eb);
364 btrfs_write_tree_block(eb);
366 btrfs_wait_tree_block_writeback(eb);
372 * Item overwrite used by replay and tree logging. eb, slot and key all refer
373 * to the src data we are copying out.
375 * root is the tree we are copying into, and path is a scratch
376 * path for use in this function (it should be released on entry and
377 * will be released on exit).
379 * If the key is already in the destination tree the existing item is
380 * overwritten. If the existing item isn't big enough, it is extended.
381 * If it is too large, it is truncated.
383 * If the key isn't in the destination yet, a new item is inserted.
385 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
386 struct btrfs_root *root,
387 struct btrfs_path *path,
388 struct extent_buffer *eb, int slot,
389 struct btrfs_key *key)
393 u64 saved_i_size = 0;
394 int save_old_i_size = 0;
395 unsigned long src_ptr;
396 unsigned long dst_ptr;
397 int overwrite_root = 0;
398 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
400 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
403 item_size = btrfs_item_size_nr(eb, slot);
404 src_ptr = btrfs_item_ptr_offset(eb, slot);
406 /* look for the key in the destination tree */
407 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
414 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
416 if (dst_size != item_size)
419 if (item_size == 0) {
420 btrfs_release_path(path);
423 dst_copy = kmalloc(item_size, GFP_NOFS);
424 src_copy = kmalloc(item_size, GFP_NOFS);
425 if (!dst_copy || !src_copy) {
426 btrfs_release_path(path);
432 read_extent_buffer(eb, src_copy, src_ptr, item_size);
434 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
435 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
437 ret = memcmp(dst_copy, src_copy, item_size);
442 * they have the same contents, just return, this saves
443 * us from cowing blocks in the destination tree and doing
444 * extra writes that may not have been done by a previous
448 btrfs_release_path(path);
453 * We need to load the old nbytes into the inode so when we
454 * replay the extents we've logged we get the right nbytes.
457 struct btrfs_inode_item *item;
461 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
462 struct btrfs_inode_item);
463 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
464 item = btrfs_item_ptr(eb, slot,
465 struct btrfs_inode_item);
466 btrfs_set_inode_nbytes(eb, item, nbytes);
469 * If this is a directory we need to reset the i_size to
470 * 0 so that we can set it up properly when replaying
471 * the rest of the items in this log.
473 mode = btrfs_inode_mode(eb, item);
475 btrfs_set_inode_size(eb, item, 0);
477 } else if (inode_item) {
478 struct btrfs_inode_item *item;
482 * New inode, set nbytes to 0 so that the nbytes comes out
483 * properly when we replay the extents.
485 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
486 btrfs_set_inode_nbytes(eb, item, 0);
489 * If this is a directory we need to reset the i_size to 0 so
490 * that we can set it up properly when replaying the rest of
491 * the items in this log.
493 mode = btrfs_inode_mode(eb, item);
495 btrfs_set_inode_size(eb, item, 0);
498 btrfs_release_path(path);
499 /* try to insert the key into the destination tree */
500 path->skip_release_on_error = 1;
501 ret = btrfs_insert_empty_item(trans, root, path,
503 path->skip_release_on_error = 0;
505 /* make sure any existing item is the correct size */
506 if (ret == -EEXIST || ret == -EOVERFLOW) {
508 found_size = btrfs_item_size_nr(path->nodes[0],
510 if (found_size > item_size)
511 btrfs_truncate_item(path, item_size, 1);
512 else if (found_size < item_size)
513 btrfs_extend_item(path, item_size - found_size);
517 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
520 /* don't overwrite an existing inode if the generation number
521 * was logged as zero. This is done when the tree logging code
522 * is just logging an inode to make sure it exists after recovery.
524 * Also, don't overwrite i_size on directories during replay.
525 * log replay inserts and removes directory items based on the
526 * state of the tree found in the subvolume, and i_size is modified
529 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
530 struct btrfs_inode_item *src_item;
531 struct btrfs_inode_item *dst_item;
533 src_item = (struct btrfs_inode_item *)src_ptr;
534 dst_item = (struct btrfs_inode_item *)dst_ptr;
536 if (btrfs_inode_generation(eb, src_item) == 0) {
537 struct extent_buffer *dst_eb = path->nodes[0];
538 const u64 ino_size = btrfs_inode_size(eb, src_item);
541 * For regular files an ino_size == 0 is used only when
542 * logging that an inode exists, as part of a directory
543 * fsync, and the inode wasn't fsynced before. In this
544 * case don't set the size of the inode in the fs/subvol
545 * tree, otherwise we would be throwing valid data away.
547 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
548 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
550 btrfs_set_inode_size(dst_eb, dst_item, ino_size);
554 if (overwrite_root &&
555 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
556 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
558 saved_i_size = btrfs_inode_size(path->nodes[0],
563 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
566 if (save_old_i_size) {
567 struct btrfs_inode_item *dst_item;
568 dst_item = (struct btrfs_inode_item *)dst_ptr;
569 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
572 /* make sure the generation is filled in */
573 if (key->type == BTRFS_INODE_ITEM_KEY) {
574 struct btrfs_inode_item *dst_item;
575 dst_item = (struct btrfs_inode_item *)dst_ptr;
576 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
577 btrfs_set_inode_generation(path->nodes[0], dst_item,
582 btrfs_mark_buffer_dirty(path->nodes[0]);
583 btrfs_release_path(path);
588 * simple helper to read an inode off the disk from a given root
589 * This can only be called for subvolume roots and not for the log
591 static noinline struct inode *read_one_inode(struct btrfs_root *root,
596 inode = btrfs_iget(root->fs_info->sb, objectid, root);
602 /* replays a single extent in 'eb' at 'slot' with 'key' into the
603 * subvolume 'root'. path is released on entry and should be released
606 * extents in the log tree have not been allocated out of the extent
607 * tree yet. So, this completes the allocation, taking a reference
608 * as required if the extent already exists or creating a new extent
609 * if it isn't in the extent allocation tree yet.
611 * The extent is inserted into the file, dropping any existing extents
612 * from the file that overlap the new one.
614 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
615 struct btrfs_root *root,
616 struct btrfs_path *path,
617 struct extent_buffer *eb, int slot,
618 struct btrfs_key *key)
620 struct btrfs_drop_extents_args drop_args = { 0 };
621 struct btrfs_fs_info *fs_info = root->fs_info;
624 u64 start = key->offset;
626 struct btrfs_file_extent_item *item;
627 struct inode *inode = NULL;
631 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
632 found_type = btrfs_file_extent_type(eb, item);
634 if (found_type == BTRFS_FILE_EXTENT_REG ||
635 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
636 nbytes = btrfs_file_extent_num_bytes(eb, item);
637 extent_end = start + nbytes;
640 * We don't add to the inodes nbytes if we are prealloc or a
643 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
645 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
646 size = btrfs_file_extent_ram_bytes(eb, item);
647 nbytes = btrfs_file_extent_ram_bytes(eb, item);
648 extent_end = ALIGN(start + size,
649 fs_info->sectorsize);
655 inode = read_one_inode(root, key->objectid);
662 * first check to see if we already have this extent in the
663 * file. This must be done before the btrfs_drop_extents run
664 * so we don't try to drop this extent.
666 ret = btrfs_lookup_file_extent(trans, root, path,
667 btrfs_ino(BTRFS_I(inode)), start, 0);
670 (found_type == BTRFS_FILE_EXTENT_REG ||
671 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
672 struct btrfs_file_extent_item cmp1;
673 struct btrfs_file_extent_item cmp2;
674 struct btrfs_file_extent_item *existing;
675 struct extent_buffer *leaf;
677 leaf = path->nodes[0];
678 existing = btrfs_item_ptr(leaf, path->slots[0],
679 struct btrfs_file_extent_item);
681 read_extent_buffer(eb, &cmp1, (unsigned long)item,
683 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
687 * we already have a pointer to this exact extent,
688 * we don't have to do anything
690 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
691 btrfs_release_path(path);
695 btrfs_release_path(path);
697 /* drop any overlapping extents */
698 drop_args.start = start;
699 drop_args.end = extent_end;
700 drop_args.drop_cache = true;
701 ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
705 if (found_type == BTRFS_FILE_EXTENT_REG ||
706 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
708 unsigned long dest_offset;
709 struct btrfs_key ins;
711 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
712 btrfs_fs_incompat(fs_info, NO_HOLES))
715 ret = btrfs_insert_empty_item(trans, root, path, key,
719 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
721 copy_extent_buffer(path->nodes[0], eb, dest_offset,
722 (unsigned long)item, sizeof(*item));
724 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
725 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
726 ins.type = BTRFS_EXTENT_ITEM_KEY;
727 offset = key->offset - btrfs_file_extent_offset(eb, item);
730 * Manually record dirty extent, as here we did a shallow
731 * file extent item copy and skip normal backref update,
732 * but modifying extent tree all by ourselves.
733 * So need to manually record dirty extent for qgroup,
734 * as the owner of the file extent changed from log tree
735 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
737 ret = btrfs_qgroup_trace_extent(trans,
738 btrfs_file_extent_disk_bytenr(eb, item),
739 btrfs_file_extent_disk_num_bytes(eb, item),
744 if (ins.objectid > 0) {
745 struct btrfs_ref ref = { 0 };
748 LIST_HEAD(ordered_sums);
751 * is this extent already allocated in the extent
752 * allocation tree? If so, just add a reference
754 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
758 } else if (ret == 0) {
759 btrfs_init_generic_ref(&ref,
760 BTRFS_ADD_DELAYED_REF,
761 ins.objectid, ins.offset, 0);
762 btrfs_init_data_ref(&ref,
763 root->root_key.objectid,
764 key->objectid, offset, 0, false);
765 ret = btrfs_inc_extent_ref(trans, &ref);
770 * insert the extent pointer in the extent
773 ret = btrfs_alloc_logged_file_extent(trans,
774 root->root_key.objectid,
775 key->objectid, offset, &ins);
779 btrfs_release_path(path);
781 if (btrfs_file_extent_compression(eb, item)) {
782 csum_start = ins.objectid;
783 csum_end = csum_start + ins.offset;
785 csum_start = ins.objectid +
786 btrfs_file_extent_offset(eb, item);
787 csum_end = csum_start +
788 btrfs_file_extent_num_bytes(eb, item);
791 ret = btrfs_lookup_csums_range(root->log_root,
792 csum_start, csum_end - 1,
797 * Now delete all existing cums in the csum root that
798 * cover our range. We do this because we can have an
799 * extent that is completely referenced by one file
800 * extent item and partially referenced by another
801 * file extent item (like after using the clone or
802 * extent_same ioctls). In this case if we end up doing
803 * the replay of the one that partially references the
804 * extent first, and we do not do the csum deletion
805 * below, we can get 2 csum items in the csum tree that
806 * overlap each other. For example, imagine our log has
807 * the two following file extent items:
809 * key (257 EXTENT_DATA 409600)
810 * extent data disk byte 12845056 nr 102400
811 * extent data offset 20480 nr 20480 ram 102400
813 * key (257 EXTENT_DATA 819200)
814 * extent data disk byte 12845056 nr 102400
815 * extent data offset 0 nr 102400 ram 102400
817 * Where the second one fully references the 100K extent
818 * that starts at disk byte 12845056, and the log tree
819 * has a single csum item that covers the entire range
822 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
824 * After the first file extent item is replayed, the
825 * csum tree gets the following csum item:
827 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
829 * Which covers the 20K sub-range starting at offset 20K
830 * of our extent. Now when we replay the second file
831 * extent item, if we do not delete existing csum items
832 * that cover any of its blocks, we end up getting two
833 * csum items in our csum tree that overlap each other:
835 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
836 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
838 * Which is a problem, because after this anyone trying
839 * to lookup up for the checksum of any block of our
840 * extent starting at an offset of 40K or higher, will
841 * end up looking at the second csum item only, which
842 * does not contain the checksum for any block starting
843 * at offset 40K or higher of our extent.
845 while (!list_empty(&ordered_sums)) {
846 struct btrfs_ordered_sum *sums;
847 sums = list_entry(ordered_sums.next,
848 struct btrfs_ordered_sum,
851 ret = btrfs_del_csums(trans,
856 ret = btrfs_csum_file_blocks(trans,
857 fs_info->csum_root, sums);
858 list_del(&sums->list);
864 btrfs_release_path(path);
866 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
867 /* inline extents are easy, we just overwrite them */
868 ret = overwrite_item(trans, root, path, eb, slot, key);
873 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
879 btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
880 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
888 * when cleaning up conflicts between the directory names in the
889 * subvolume, directory names in the log and directory names in the
890 * inode back references, we may have to unlink inodes from directories.
892 * This is a helper function to do the unlink of a specific directory
895 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
896 struct btrfs_root *root,
897 struct btrfs_path *path,
898 struct btrfs_inode *dir,
899 struct btrfs_dir_item *di)
904 struct extent_buffer *leaf;
905 struct btrfs_key location;
908 leaf = path->nodes[0];
910 btrfs_dir_item_key_to_cpu(leaf, di, &location);
911 name_len = btrfs_dir_name_len(leaf, di);
912 name = kmalloc(name_len, GFP_NOFS);
916 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
917 btrfs_release_path(path);
919 inode = read_one_inode(root, location.objectid);
925 ret = link_to_fixup_dir(trans, root, path, location.objectid);
929 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
934 ret = btrfs_run_delayed_items(trans);
942 * See if a given name and sequence number found in an inode back reference are
943 * already in a directory and correctly point to this inode.
945 * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
948 static noinline int inode_in_dir(struct btrfs_root *root,
949 struct btrfs_path *path,
950 u64 dirid, u64 objectid, u64 index,
951 const char *name, int name_len)
953 struct btrfs_dir_item *di;
954 struct btrfs_key location;
957 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
958 index, name, name_len, 0);
963 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
964 if (location.objectid != objectid)
970 btrfs_release_path(path);
971 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
976 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
977 if (location.objectid == objectid)
981 btrfs_release_path(path);
986 * helper function to check a log tree for a named back reference in
987 * an inode. This is used to decide if a back reference that is
988 * found in the subvolume conflicts with what we find in the log.
990 * inode backreferences may have multiple refs in a single item,
991 * during replay we process one reference at a time, and we don't
992 * want to delete valid links to a file from the subvolume if that
993 * link is also in the log.
995 static noinline int backref_in_log(struct btrfs_root *log,
996 struct btrfs_key *key,
998 const char *name, int namelen)
1000 struct btrfs_path *path;
1003 path = btrfs_alloc_path();
1007 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
1010 } else if (ret == 1) {
1015 if (key->type == BTRFS_INODE_EXTREF_KEY)
1016 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1021 ret = !!btrfs_find_name_in_backref(path->nodes[0],
1025 btrfs_free_path(path);
1029 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
1030 struct btrfs_root *root,
1031 struct btrfs_path *path,
1032 struct btrfs_root *log_root,
1033 struct btrfs_inode *dir,
1034 struct btrfs_inode *inode,
1035 u64 inode_objectid, u64 parent_objectid,
1036 u64 ref_index, char *name, int namelen,
1041 int victim_name_len;
1042 struct extent_buffer *leaf;
1043 struct btrfs_dir_item *di;
1044 struct btrfs_key search_key;
1045 struct btrfs_inode_extref *extref;
1048 /* Search old style refs */
1049 search_key.objectid = inode_objectid;
1050 search_key.type = BTRFS_INODE_REF_KEY;
1051 search_key.offset = parent_objectid;
1052 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1054 struct btrfs_inode_ref *victim_ref;
1056 unsigned long ptr_end;
1058 leaf = path->nodes[0];
1060 /* are we trying to overwrite a back ref for the root directory
1061 * if so, just jump out, we're done
1063 if (search_key.objectid == search_key.offset)
1066 /* check all the names in this back reference to see
1067 * if they are in the log. if so, we allow them to stay
1068 * otherwise they must be unlinked as a conflict
1070 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1071 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1072 while (ptr < ptr_end) {
1073 victim_ref = (struct btrfs_inode_ref *)ptr;
1074 victim_name_len = btrfs_inode_ref_name_len(leaf,
1076 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1080 read_extent_buffer(leaf, victim_name,
1081 (unsigned long)(victim_ref + 1),
1084 ret = backref_in_log(log_root, &search_key,
1085 parent_objectid, victim_name,
1091 inc_nlink(&inode->vfs_inode);
1092 btrfs_release_path(path);
1094 ret = btrfs_unlink_inode(trans, root, dir, inode,
1095 victim_name, victim_name_len);
1099 ret = btrfs_run_delayed_items(trans);
1107 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1111 * NOTE: we have searched root tree and checked the
1112 * corresponding ref, it does not need to check again.
1116 btrfs_release_path(path);
1118 /* Same search but for extended refs */
1119 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1120 inode_objectid, parent_objectid, 0,
1122 if (IS_ERR(extref)) {
1123 return PTR_ERR(extref);
1124 } else if (extref) {
1128 struct inode *victim_parent;
1130 leaf = path->nodes[0];
1132 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1133 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1135 while (cur_offset < item_size) {
1136 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1138 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1140 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1143 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1146 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1149 search_key.objectid = inode_objectid;
1150 search_key.type = BTRFS_INODE_EXTREF_KEY;
1151 search_key.offset = btrfs_extref_hash(parent_objectid,
1154 ret = backref_in_log(log_root, &search_key,
1155 parent_objectid, victim_name,
1162 victim_parent = read_one_inode(root,
1164 if (victim_parent) {
1165 inc_nlink(&inode->vfs_inode);
1166 btrfs_release_path(path);
1168 ret = btrfs_unlink_inode(trans, root,
1169 BTRFS_I(victim_parent),
1174 ret = btrfs_run_delayed_items(
1177 iput(victim_parent);
1186 cur_offset += victim_name_len + sizeof(*extref);
1190 btrfs_release_path(path);
1192 /* look for a conflicting sequence number */
1193 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1194 ref_index, name, namelen, 0);
1198 ret = drop_one_dir_item(trans, root, path, dir, di);
1202 btrfs_release_path(path);
1204 /* look for a conflicting name */
1205 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1210 ret = drop_one_dir_item(trans, root, path, dir, di);
1214 btrfs_release_path(path);
1219 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1220 u32 *namelen, char **name, u64 *index,
1221 u64 *parent_objectid)
1223 struct btrfs_inode_extref *extref;
1225 extref = (struct btrfs_inode_extref *)ref_ptr;
1227 *namelen = btrfs_inode_extref_name_len(eb, extref);
1228 *name = kmalloc(*namelen, GFP_NOFS);
1232 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1236 *index = btrfs_inode_extref_index(eb, extref);
1237 if (parent_objectid)
1238 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1243 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1244 u32 *namelen, char **name, u64 *index)
1246 struct btrfs_inode_ref *ref;
1248 ref = (struct btrfs_inode_ref *)ref_ptr;
1250 *namelen = btrfs_inode_ref_name_len(eb, ref);
1251 *name = kmalloc(*namelen, GFP_NOFS);
1255 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1258 *index = btrfs_inode_ref_index(eb, ref);
1264 * Take an inode reference item from the log tree and iterate all names from the
1265 * inode reference item in the subvolume tree with the same key (if it exists).
1266 * For any name that is not in the inode reference item from the log tree, do a
1267 * proper unlink of that name (that is, remove its entry from the inode
1268 * reference item and both dir index keys).
1270 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1271 struct btrfs_root *root,
1272 struct btrfs_path *path,
1273 struct btrfs_inode *inode,
1274 struct extent_buffer *log_eb,
1276 struct btrfs_key *key)
1279 unsigned long ref_ptr;
1280 unsigned long ref_end;
1281 struct extent_buffer *eb;
1284 btrfs_release_path(path);
1285 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1293 eb = path->nodes[0];
1294 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1295 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1296 while (ref_ptr < ref_end) {
1301 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1302 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1305 parent_id = key->offset;
1306 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1312 if (key->type == BTRFS_INODE_EXTREF_KEY)
1313 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1317 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1323 btrfs_release_path(path);
1324 dir = read_one_inode(root, parent_id);
1330 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1331 inode, name, namelen);
1335 * Whenever we need to check if a name exists or not, we
1336 * check the subvolume tree. So after an unlink we must
1337 * run delayed items, so that future checks for a name
1338 * during log replay see that the name does not exists
1342 ret = btrfs_run_delayed_items(trans);
1350 if (key->type == BTRFS_INODE_EXTREF_KEY)
1351 ref_ptr += sizeof(struct btrfs_inode_extref);
1353 ref_ptr += sizeof(struct btrfs_inode_ref);
1357 btrfs_release_path(path);
1361 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1362 const u8 ref_type, const char *name,
1365 struct btrfs_key key;
1366 struct btrfs_path *path;
1367 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1370 path = btrfs_alloc_path();
1374 key.objectid = btrfs_ino(BTRFS_I(inode));
1375 key.type = ref_type;
1376 if (key.type == BTRFS_INODE_REF_KEY)
1377 key.offset = parent_id;
1379 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1381 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1388 if (key.type == BTRFS_INODE_EXTREF_KEY)
1389 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1390 path->slots[0], parent_id, name, namelen);
1392 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1396 btrfs_free_path(path);
1400 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1401 struct inode *dir, struct inode *inode, const char *name,
1402 int namelen, u64 ref_index)
1404 struct btrfs_dir_item *dir_item;
1405 struct btrfs_key key;
1406 struct btrfs_path *path;
1407 struct inode *other_inode = NULL;
1410 path = btrfs_alloc_path();
1414 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1415 btrfs_ino(BTRFS_I(dir)),
1418 btrfs_release_path(path);
1420 } else if (IS_ERR(dir_item)) {
1421 ret = PTR_ERR(dir_item);
1426 * Our inode's dentry collides with the dentry of another inode which is
1427 * in the log but not yet processed since it has a higher inode number.
1428 * So delete that other dentry.
1430 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1431 btrfs_release_path(path);
1432 other_inode = read_one_inode(root, key.objectid);
1437 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1442 * If we dropped the link count to 0, bump it so that later the iput()
1443 * on the inode will not free it. We will fixup the link count later.
1445 if (other_inode->i_nlink == 0)
1446 inc_nlink(other_inode);
1448 ret = btrfs_run_delayed_items(trans);
1452 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1453 name, namelen, 0, ref_index);
1456 btrfs_free_path(path);
1462 * replay one inode back reference item found in the log tree.
1463 * eb, slot and key refer to the buffer and key found in the log tree.
1464 * root is the destination we are replaying into, and path is for temp
1465 * use by this function. (it should be released on return).
1467 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1468 struct btrfs_root *root,
1469 struct btrfs_root *log,
1470 struct btrfs_path *path,
1471 struct extent_buffer *eb, int slot,
1472 struct btrfs_key *key)
1474 struct inode *dir = NULL;
1475 struct inode *inode = NULL;
1476 unsigned long ref_ptr;
1477 unsigned long ref_end;
1481 int search_done = 0;
1482 int log_ref_ver = 0;
1483 u64 parent_objectid;
1486 int ref_struct_size;
1488 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1489 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1491 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1492 struct btrfs_inode_extref *r;
1494 ref_struct_size = sizeof(struct btrfs_inode_extref);
1496 r = (struct btrfs_inode_extref *)ref_ptr;
1497 parent_objectid = btrfs_inode_extref_parent(eb, r);
1499 ref_struct_size = sizeof(struct btrfs_inode_ref);
1500 parent_objectid = key->offset;
1502 inode_objectid = key->objectid;
1505 * it is possible that we didn't log all the parent directories
1506 * for a given inode. If we don't find the dir, just don't
1507 * copy the back ref in. The link count fixup code will take
1510 dir = read_one_inode(root, parent_objectid);
1516 inode = read_one_inode(root, inode_objectid);
1522 while (ref_ptr < ref_end) {
1524 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1525 &ref_index, &parent_objectid);
1527 * parent object can change from one array
1531 dir = read_one_inode(root, parent_objectid);
1537 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1543 ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1544 btrfs_ino(BTRFS_I(inode)), ref_index,
1548 } else if (ret == 0) {
1550 * look for a conflicting back reference in the
1551 * metadata. if we find one we have to unlink that name
1552 * of the file before we add our new link. Later on, we
1553 * overwrite any existing back reference, and we don't
1554 * want to create dangling pointers in the directory.
1558 ret = __add_inode_ref(trans, root, path, log,
1563 ref_index, name, namelen,
1573 * If a reference item already exists for this inode
1574 * with the same parent and name, but different index,
1575 * drop it and the corresponding directory index entries
1576 * from the parent before adding the new reference item
1577 * and dir index entries, otherwise we would fail with
1578 * -EEXIST returned from btrfs_add_link() below.
1580 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1583 ret = btrfs_unlink_inode(trans, root,
1588 * If we dropped the link count to 0, bump it so
1589 * that later the iput() on the inode will not
1590 * free it. We will fixup the link count later.
1592 if (!ret && inode->i_nlink == 0)
1595 * Whenever we need to check if a name exists or
1596 * not, we check the subvolume tree. So after an
1597 * unlink we must run delayed items, so that future
1598 * checks for a name during log replay see that the
1599 * name does not exists anymore.
1602 ret = btrfs_run_delayed_items(trans);
1607 /* insert our name */
1608 ret = add_link(trans, root, dir, inode, name, namelen,
1613 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1617 /* Else, ret == 1, we already have a perfect match, we're done. */
1619 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1629 * Before we overwrite the inode reference item in the subvolume tree
1630 * with the item from the log tree, we must unlink all names from the
1631 * parent directory that are in the subvolume's tree inode reference
1632 * item, otherwise we end up with an inconsistent subvolume tree where
1633 * dir index entries exist for a name but there is no inode reference
1634 * item with the same name.
1636 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1641 /* finally write the back reference in the inode */
1642 ret = overwrite_item(trans, root, path, eb, slot, key);
1644 btrfs_release_path(path);
1651 static int count_inode_extrefs(struct btrfs_root *root,
1652 struct btrfs_inode *inode, struct btrfs_path *path)
1656 unsigned int nlink = 0;
1659 u64 inode_objectid = btrfs_ino(inode);
1662 struct btrfs_inode_extref *extref;
1663 struct extent_buffer *leaf;
1666 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1671 leaf = path->nodes[0];
1672 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1673 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1676 while (cur_offset < item_size) {
1677 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1678 name_len = btrfs_inode_extref_name_len(leaf, extref);
1682 cur_offset += name_len + sizeof(*extref);
1686 btrfs_release_path(path);
1688 btrfs_release_path(path);
1690 if (ret < 0 && ret != -ENOENT)
1695 static int count_inode_refs(struct btrfs_root *root,
1696 struct btrfs_inode *inode, struct btrfs_path *path)
1699 struct btrfs_key key;
1700 unsigned int nlink = 0;
1702 unsigned long ptr_end;
1704 u64 ino = btrfs_ino(inode);
1707 key.type = BTRFS_INODE_REF_KEY;
1708 key.offset = (u64)-1;
1711 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1715 if (path->slots[0] == 0)
1720 btrfs_item_key_to_cpu(path->nodes[0], &key,
1722 if (key.objectid != ino ||
1723 key.type != BTRFS_INODE_REF_KEY)
1725 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1726 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1728 while (ptr < ptr_end) {
1729 struct btrfs_inode_ref *ref;
1731 ref = (struct btrfs_inode_ref *)ptr;
1732 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1734 ptr = (unsigned long)(ref + 1) + name_len;
1738 if (key.offset == 0)
1740 if (path->slots[0] > 0) {
1745 btrfs_release_path(path);
1747 btrfs_release_path(path);
1753 * There are a few corners where the link count of the file can't
1754 * be properly maintained during replay. So, instead of adding
1755 * lots of complexity to the log code, we just scan the backrefs
1756 * for any file that has been through replay.
1758 * The scan will update the link count on the inode to reflect the
1759 * number of back refs found. If it goes down to zero, the iput
1760 * will free the inode.
1762 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1763 struct btrfs_root *root,
1764 struct inode *inode)
1766 struct btrfs_path *path;
1769 u64 ino = btrfs_ino(BTRFS_I(inode));
1771 path = btrfs_alloc_path();
1775 ret = count_inode_refs(root, BTRFS_I(inode), path);
1781 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1789 if (nlink != inode->i_nlink) {
1790 set_nlink(inode, nlink);
1791 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1795 BTRFS_I(inode)->index_cnt = (u64)-1;
1797 if (inode->i_nlink == 0) {
1798 if (S_ISDIR(inode->i_mode)) {
1799 ret = replay_dir_deletes(trans, root, NULL, path,
1804 ret = btrfs_insert_orphan_item(trans, root, ino);
1810 btrfs_free_path(path);
1814 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1815 struct btrfs_root *root,
1816 struct btrfs_path *path)
1819 struct btrfs_key key;
1820 struct inode *inode;
1822 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1823 key.type = BTRFS_ORPHAN_ITEM_KEY;
1824 key.offset = (u64)-1;
1826 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1832 if (path->slots[0] == 0)
1837 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1838 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1839 key.type != BTRFS_ORPHAN_ITEM_KEY)
1842 ret = btrfs_del_item(trans, root, path);
1846 btrfs_release_path(path);
1847 inode = read_one_inode(root, key.offset);
1853 ret = fixup_inode_link_count(trans, root, inode);
1859 * fixup on a directory may create new entries,
1860 * make sure we always look for the highset possible
1863 key.offset = (u64)-1;
1865 btrfs_release_path(path);
1871 * record a given inode in the fixup dir so we can check its link
1872 * count when replay is done. The link count is incremented here
1873 * so the inode won't go away until we check it
1875 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1876 struct btrfs_root *root,
1877 struct btrfs_path *path,
1880 struct btrfs_key key;
1882 struct inode *inode;
1884 inode = read_one_inode(root, objectid);
1888 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1889 key.type = BTRFS_ORPHAN_ITEM_KEY;
1890 key.offset = objectid;
1892 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1894 btrfs_release_path(path);
1896 if (!inode->i_nlink)
1897 set_nlink(inode, 1);
1900 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1901 } else if (ret == -EEXIST) {
1910 * when replaying the log for a directory, we only insert names
1911 * for inodes that actually exist. This means an fsync on a directory
1912 * does not implicitly fsync all the new files in it
1914 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1915 struct btrfs_root *root,
1916 u64 dirid, u64 index,
1917 char *name, int name_len,
1918 struct btrfs_key *location)
1920 struct inode *inode;
1924 inode = read_one_inode(root, location->objectid);
1928 dir = read_one_inode(root, dirid);
1934 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1935 name_len, 1, index);
1937 /* FIXME, put inode into FIXUP list */
1945 * take a single entry in a log directory item and replay it into
1948 * if a conflicting item exists in the subdirectory already,
1949 * the inode it points to is unlinked and put into the link count
1952 * If a name from the log points to a file or directory that does
1953 * not exist in the FS, it is skipped. fsyncs on directories
1954 * do not force down inodes inside that directory, just changes to the
1955 * names or unlinks in a directory.
1957 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1958 * non-existing inode) and 1 if the name was replayed.
1960 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1961 struct btrfs_root *root,
1962 struct btrfs_path *path,
1963 struct extent_buffer *eb,
1964 struct btrfs_dir_item *di,
1965 struct btrfs_key *key)
1969 struct btrfs_dir_item *dst_di;
1970 struct btrfs_key found_key;
1971 struct btrfs_key log_key;
1976 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1977 bool name_added = false;
1979 dir = read_one_inode(root, key->objectid);
1983 name_len = btrfs_dir_name_len(eb, di);
1984 name = kmalloc(name_len, GFP_NOFS);
1990 log_type = btrfs_dir_type(eb, di);
1991 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1994 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1995 ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1996 btrfs_release_path(path);
1999 exists = (ret == 0);
2002 if (key->type == BTRFS_DIR_ITEM_KEY) {
2003 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
2005 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
2006 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
2016 if (IS_ERR(dst_di)) {
2017 ret = PTR_ERR(dst_di);
2019 } else if (!dst_di) {
2020 /* we need a sequence number to insert, so we only
2021 * do inserts for the BTRFS_DIR_INDEX_KEY types
2023 if (key->type != BTRFS_DIR_INDEX_KEY)
2028 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
2029 /* the existing item matches the logged item */
2030 if (found_key.objectid == log_key.objectid &&
2031 found_key.type == log_key.type &&
2032 found_key.offset == log_key.offset &&
2033 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
2034 update_size = false;
2039 * don't drop the conflicting directory entry if the inode
2040 * for the new entry doesn't exist
2045 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
2049 if (key->type == BTRFS_DIR_INDEX_KEY)
2052 btrfs_release_path(path);
2053 if (!ret && update_size) {
2054 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2055 ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
2059 if (!ret && name_added)
2065 * Check if the inode reference exists in the log for the given name,
2066 * inode and parent inode
2068 found_key.objectid = log_key.objectid;
2069 found_key.type = BTRFS_INODE_REF_KEY;
2070 found_key.offset = key->objectid;
2071 ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
2075 /* The dentry will be added later. */
2077 update_size = false;
2081 found_key.objectid = log_key.objectid;
2082 found_key.type = BTRFS_INODE_EXTREF_KEY;
2083 found_key.offset = key->objectid;
2084 ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
2089 /* The dentry will be added later. */
2091 update_size = false;
2094 btrfs_release_path(path);
2095 ret = insert_one_name(trans, root, key->objectid, key->offset,
2096 name, name_len, &log_key);
2097 if (ret && ret != -ENOENT && ret != -EEXIST)
2101 update_size = false;
2107 * find all the names in a directory item and reconcile them into
2108 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2109 * one name in a directory item, but the same code gets used for
2110 * both directory index types
2112 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2113 struct btrfs_root *root,
2114 struct btrfs_path *path,
2115 struct extent_buffer *eb, int slot,
2116 struct btrfs_key *key)
2119 u32 item_size = btrfs_item_size_nr(eb, slot);
2120 struct btrfs_dir_item *di;
2123 unsigned long ptr_end;
2124 struct btrfs_path *fixup_path = NULL;
2126 ptr = btrfs_item_ptr_offset(eb, slot);
2127 ptr_end = ptr + item_size;
2128 while (ptr < ptr_end) {
2129 di = (struct btrfs_dir_item *)ptr;
2130 name_len = btrfs_dir_name_len(eb, di);
2131 ret = replay_one_name(trans, root, path, eb, di, key);
2134 ptr = (unsigned long)(di + 1);
2138 * If this entry refers to a non-directory (directories can not
2139 * have a link count > 1) and it was added in the transaction
2140 * that was not committed, make sure we fixup the link count of
2141 * the inode it the entry points to. Otherwise something like
2142 * the following would result in a directory pointing to an
2143 * inode with a wrong link that does not account for this dir
2151 * ln testdir/bar testdir/bar_link
2152 * ln testdir/foo testdir/foo_link
2153 * xfs_io -c "fsync" testdir/bar
2157 * mount fs, log replay happens
2159 * File foo would remain with a link count of 1 when it has two
2160 * entries pointing to it in the directory testdir. This would
2161 * make it impossible to ever delete the parent directory has
2162 * it would result in stale dentries that can never be deleted.
2164 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2165 struct btrfs_key di_key;
2168 fixup_path = btrfs_alloc_path();
2175 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2176 ret = link_to_fixup_dir(trans, root, fixup_path,
2183 btrfs_free_path(fixup_path);
2188 * directory replay has two parts. There are the standard directory
2189 * items in the log copied from the subvolume, and range items
2190 * created in the log while the subvolume was logged.
2192 * The range items tell us which parts of the key space the log
2193 * is authoritative for. During replay, if a key in the subvolume
2194 * directory is in a logged range item, but not actually in the log
2195 * that means it was deleted from the directory before the fsync
2196 * and should be removed.
2198 static noinline int find_dir_range(struct btrfs_root *root,
2199 struct btrfs_path *path,
2200 u64 dirid, int key_type,
2201 u64 *start_ret, u64 *end_ret)
2203 struct btrfs_key key;
2205 struct btrfs_dir_log_item *item;
2209 if (*start_ret == (u64)-1)
2212 key.objectid = dirid;
2213 key.type = key_type;
2214 key.offset = *start_ret;
2216 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2220 if (path->slots[0] == 0)
2225 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2227 if (key.type != key_type || key.objectid != dirid) {
2231 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2232 struct btrfs_dir_log_item);
2233 found_end = btrfs_dir_log_end(path->nodes[0], item);
2235 if (*start_ret >= key.offset && *start_ret <= found_end) {
2237 *start_ret = key.offset;
2238 *end_ret = found_end;
2243 /* check the next slot in the tree to see if it is a valid item */
2244 nritems = btrfs_header_nritems(path->nodes[0]);
2246 if (path->slots[0] >= nritems) {
2247 ret = btrfs_next_leaf(root, path);
2252 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2254 if (key.type != key_type || key.objectid != dirid) {
2258 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2259 struct btrfs_dir_log_item);
2260 found_end = btrfs_dir_log_end(path->nodes[0], item);
2261 *start_ret = key.offset;
2262 *end_ret = found_end;
2265 btrfs_release_path(path);
2270 * this looks for a given directory item in the log. If the directory
2271 * item is not in the log, the item is removed and the inode it points
2274 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2275 struct btrfs_root *root,
2276 struct btrfs_root *log,
2277 struct btrfs_path *path,
2278 struct btrfs_path *log_path,
2280 struct btrfs_key *dir_key)
2283 struct extent_buffer *eb;
2286 struct btrfs_dir_item *di;
2287 struct btrfs_dir_item *log_di;
2290 unsigned long ptr_end;
2292 struct inode *inode;
2293 struct btrfs_key location;
2296 eb = path->nodes[0];
2297 slot = path->slots[0];
2298 item_size = btrfs_item_size_nr(eb, slot);
2299 ptr = btrfs_item_ptr_offset(eb, slot);
2300 ptr_end = ptr + item_size;
2301 while (ptr < ptr_end) {
2302 di = (struct btrfs_dir_item *)ptr;
2303 name_len = btrfs_dir_name_len(eb, di);
2304 name = kmalloc(name_len, GFP_NOFS);
2309 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2312 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2313 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2316 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2317 log_di = btrfs_lookup_dir_index_item(trans, log,
2324 btrfs_dir_item_key_to_cpu(eb, di, &location);
2325 btrfs_release_path(path);
2326 btrfs_release_path(log_path);
2327 inode = read_one_inode(root, location.objectid);
2333 ret = link_to_fixup_dir(trans, root,
2334 path, location.objectid);
2342 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2343 BTRFS_I(inode), name, name_len);
2345 ret = btrfs_run_delayed_items(trans);
2351 /* there might still be more names under this key
2352 * check and repeat if required
2354 ret = btrfs_search_slot(NULL, root, dir_key, path,
2360 } else if (IS_ERR(log_di)) {
2362 return PTR_ERR(log_di);
2364 btrfs_release_path(log_path);
2367 ptr = (unsigned long)(di + 1);
2372 btrfs_release_path(path);
2373 btrfs_release_path(log_path);
2377 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2378 struct btrfs_root *root,
2379 struct btrfs_root *log,
2380 struct btrfs_path *path,
2383 struct btrfs_key search_key;
2384 struct btrfs_path *log_path;
2389 log_path = btrfs_alloc_path();
2393 search_key.objectid = ino;
2394 search_key.type = BTRFS_XATTR_ITEM_KEY;
2395 search_key.offset = 0;
2397 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2401 nritems = btrfs_header_nritems(path->nodes[0]);
2402 for (i = path->slots[0]; i < nritems; i++) {
2403 struct btrfs_key key;
2404 struct btrfs_dir_item *di;
2405 struct btrfs_dir_item *log_di;
2409 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2410 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2415 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2416 total_size = btrfs_item_size_nr(path->nodes[0], i);
2418 while (cur < total_size) {
2419 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2420 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2421 u32 this_len = sizeof(*di) + name_len + data_len;
2424 name = kmalloc(name_len, GFP_NOFS);
2429 read_extent_buffer(path->nodes[0], name,
2430 (unsigned long)(di + 1), name_len);
2432 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2434 btrfs_release_path(log_path);
2436 /* Doesn't exist in log tree, so delete it. */
2437 btrfs_release_path(path);
2438 di = btrfs_lookup_xattr(trans, root, path, ino,
2439 name, name_len, -1);
2446 ret = btrfs_delete_one_dir_name(trans, root,
2450 btrfs_release_path(path);
2455 if (IS_ERR(log_di)) {
2456 ret = PTR_ERR(log_di);
2460 di = (struct btrfs_dir_item *)((char *)di + this_len);
2463 ret = btrfs_next_leaf(root, path);
2469 btrfs_free_path(log_path);
2470 btrfs_release_path(path);
2476 * deletion replay happens before we copy any new directory items
2477 * out of the log or out of backreferences from inodes. It
2478 * scans the log to find ranges of keys that log is authoritative for,
2479 * and then scans the directory to find items in those ranges that are
2480 * not present in the log.
2482 * Anything we don't find in the log is unlinked and removed from the
2485 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2486 struct btrfs_root *root,
2487 struct btrfs_root *log,
2488 struct btrfs_path *path,
2489 u64 dirid, int del_all)
2493 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2495 struct btrfs_key dir_key;
2496 struct btrfs_key found_key;
2497 struct btrfs_path *log_path;
2500 dir_key.objectid = dirid;
2501 dir_key.type = BTRFS_DIR_ITEM_KEY;
2502 log_path = btrfs_alloc_path();
2506 dir = read_one_inode(root, dirid);
2507 /* it isn't an error if the inode isn't there, that can happen
2508 * because we replay the deletes before we copy in the inode item
2512 btrfs_free_path(log_path);
2520 range_end = (u64)-1;
2522 ret = find_dir_range(log, path, dirid, key_type,
2523 &range_start, &range_end);
2530 dir_key.offset = range_start;
2533 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2538 nritems = btrfs_header_nritems(path->nodes[0]);
2539 if (path->slots[0] >= nritems) {
2540 ret = btrfs_next_leaf(root, path);
2546 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2548 if (found_key.objectid != dirid ||
2549 found_key.type != dir_key.type)
2552 if (found_key.offset > range_end)
2555 ret = check_item_in_log(trans, root, log, path,
2560 if (found_key.offset == (u64)-1)
2562 dir_key.offset = found_key.offset + 1;
2564 btrfs_release_path(path);
2565 if (range_end == (u64)-1)
2567 range_start = range_end + 1;
2572 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2573 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2574 dir_key.type = BTRFS_DIR_INDEX_KEY;
2575 btrfs_release_path(path);
2579 btrfs_release_path(path);
2580 btrfs_free_path(log_path);
2586 * the process_func used to replay items from the log tree. This
2587 * gets called in two different stages. The first stage just looks
2588 * for inodes and makes sure they are all copied into the subvolume.
2590 * The second stage copies all the other item types from the log into
2591 * the subvolume. The two stage approach is slower, but gets rid of
2592 * lots of complexity around inodes referencing other inodes that exist
2593 * only in the log (references come from either directory items or inode
2596 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2597 struct walk_control *wc, u64 gen, int level)
2600 struct btrfs_path *path;
2601 struct btrfs_root *root = wc->replay_dest;
2602 struct btrfs_key key;
2606 ret = btrfs_read_buffer(eb, gen, level, NULL);
2610 level = btrfs_header_level(eb);
2615 path = btrfs_alloc_path();
2619 nritems = btrfs_header_nritems(eb);
2620 for (i = 0; i < nritems; i++) {
2621 btrfs_item_key_to_cpu(eb, &key, i);
2623 /* inode keys are done during the first stage */
2624 if (key.type == BTRFS_INODE_ITEM_KEY &&
2625 wc->stage == LOG_WALK_REPLAY_INODES) {
2626 struct btrfs_inode_item *inode_item;
2629 inode_item = btrfs_item_ptr(eb, i,
2630 struct btrfs_inode_item);
2632 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2633 * and never got linked before the fsync, skip it, as
2634 * replaying it is pointless since it would be deleted
2635 * later. We skip logging tmpfiles, but it's always
2636 * possible we are replaying a log created with a kernel
2637 * that used to log tmpfiles.
2639 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2640 wc->ignore_cur_inode = true;
2643 wc->ignore_cur_inode = false;
2645 ret = replay_xattr_deletes(wc->trans, root, log,
2646 path, key.objectid);
2649 mode = btrfs_inode_mode(eb, inode_item);
2650 if (S_ISDIR(mode)) {
2651 ret = replay_dir_deletes(wc->trans,
2652 root, log, path, key.objectid, 0);
2656 ret = overwrite_item(wc->trans, root, path,
2662 * Before replaying extents, truncate the inode to its
2663 * size. We need to do it now and not after log replay
2664 * because before an fsync we can have prealloc extents
2665 * added beyond the inode's i_size. If we did it after,
2666 * through orphan cleanup for example, we would drop
2667 * those prealloc extents just after replaying them.
2669 if (S_ISREG(mode)) {
2670 struct btrfs_drop_extents_args drop_args = { 0 };
2671 struct inode *inode;
2674 inode = read_one_inode(root, key.objectid);
2679 from = ALIGN(i_size_read(inode),
2680 root->fs_info->sectorsize);
2681 drop_args.start = from;
2682 drop_args.end = (u64)-1;
2683 drop_args.drop_cache = true;
2684 ret = btrfs_drop_extents(wc->trans, root,
2688 inode_sub_bytes(inode,
2689 drop_args.bytes_found);
2690 /* Update the inode's nbytes. */
2691 ret = btrfs_update_inode(wc->trans,
2692 root, BTRFS_I(inode));
2699 ret = link_to_fixup_dir(wc->trans, root,
2700 path, key.objectid);
2705 if (wc->ignore_cur_inode)
2708 if (key.type == BTRFS_DIR_INDEX_KEY &&
2709 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2710 ret = replay_one_dir_item(wc->trans, root, path,
2716 if (wc->stage < LOG_WALK_REPLAY_ALL)
2719 /* these keys are simply copied */
2720 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2721 ret = overwrite_item(wc->trans, root, path,
2725 } else if (key.type == BTRFS_INODE_REF_KEY ||
2726 key.type == BTRFS_INODE_EXTREF_KEY) {
2727 ret = add_inode_ref(wc->trans, root, log, path,
2729 if (ret && ret != -ENOENT)
2732 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2733 ret = replay_one_extent(wc->trans, root, path,
2737 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2738 ret = replay_one_dir_item(wc->trans, root, path,
2744 btrfs_free_path(path);
2749 * Correctly adjust the reserved bytes occupied by a log tree extent buffer
2751 static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
2753 struct btrfs_block_group *cache;
2755 cache = btrfs_lookup_block_group(fs_info, start);
2757 btrfs_err(fs_info, "unable to find block group for %llu", start);
2761 spin_lock(&cache->space_info->lock);
2762 spin_lock(&cache->lock);
2763 cache->reserved -= fs_info->nodesize;
2764 cache->space_info->bytes_reserved -= fs_info->nodesize;
2765 spin_unlock(&cache->lock);
2766 spin_unlock(&cache->space_info->lock);
2768 btrfs_put_block_group(cache);
2771 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2772 struct btrfs_root *root,
2773 struct btrfs_path *path, int *level,
2774 struct walk_control *wc)
2776 struct btrfs_fs_info *fs_info = root->fs_info;
2779 struct extent_buffer *next;
2780 struct extent_buffer *cur;
2784 while (*level > 0) {
2785 struct btrfs_key first_key;
2787 cur = path->nodes[*level];
2789 WARN_ON(btrfs_header_level(cur) != *level);
2791 if (path->slots[*level] >=
2792 btrfs_header_nritems(cur))
2795 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2796 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2797 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2798 blocksize = fs_info->nodesize;
2800 next = btrfs_find_create_tree_block(fs_info, bytenr,
2801 btrfs_header_owner(cur),
2804 return PTR_ERR(next);
2807 ret = wc->process_func(root, next, wc, ptr_gen,
2810 free_extent_buffer(next);
2814 path->slots[*level]++;
2816 ret = btrfs_read_buffer(next, ptr_gen,
2817 *level - 1, &first_key);
2819 free_extent_buffer(next);
2824 btrfs_tree_lock(next);
2825 btrfs_clean_tree_block(next);
2826 btrfs_wait_tree_block_writeback(next);
2827 btrfs_tree_unlock(next);
2828 ret = btrfs_pin_reserved_extent(trans,
2831 free_extent_buffer(next);
2834 btrfs_redirty_list_add(
2835 trans->transaction, next);
2837 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2838 clear_extent_buffer_dirty(next);
2839 unaccount_log_buffer(fs_info, bytenr);
2842 free_extent_buffer(next);
2845 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2847 free_extent_buffer(next);
2851 if (path->nodes[*level-1])
2852 free_extent_buffer(path->nodes[*level-1]);
2853 path->nodes[*level-1] = next;
2854 *level = btrfs_header_level(next);
2855 path->slots[*level] = 0;
2858 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2864 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2865 struct btrfs_root *root,
2866 struct btrfs_path *path, int *level,
2867 struct walk_control *wc)
2869 struct btrfs_fs_info *fs_info = root->fs_info;
2874 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2875 slot = path->slots[i];
2876 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2879 WARN_ON(*level == 0);
2882 ret = wc->process_func(root, path->nodes[*level], wc,
2883 btrfs_header_generation(path->nodes[*level]),
2889 struct extent_buffer *next;
2891 next = path->nodes[*level];
2894 btrfs_tree_lock(next);
2895 btrfs_clean_tree_block(next);
2896 btrfs_wait_tree_block_writeback(next);
2897 btrfs_tree_unlock(next);
2898 ret = btrfs_pin_reserved_extent(trans,
2899 path->nodes[*level]->start,
2900 path->nodes[*level]->len);
2903 btrfs_redirty_list_add(trans->transaction,
2906 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2907 clear_extent_buffer_dirty(next);
2909 unaccount_log_buffer(fs_info,
2910 path->nodes[*level]->start);
2913 free_extent_buffer(path->nodes[*level]);
2914 path->nodes[*level] = NULL;
2922 * drop the reference count on the tree rooted at 'snap'. This traverses
2923 * the tree freeing any blocks that have a ref count of zero after being
2926 static int walk_log_tree(struct btrfs_trans_handle *trans,
2927 struct btrfs_root *log, struct walk_control *wc)
2929 struct btrfs_fs_info *fs_info = log->fs_info;
2933 struct btrfs_path *path;
2936 path = btrfs_alloc_path();
2940 level = btrfs_header_level(log->node);
2942 path->nodes[level] = log->node;
2943 atomic_inc(&log->node->refs);
2944 path->slots[level] = 0;
2947 wret = walk_down_log_tree(trans, log, path, &level, wc);
2955 wret = walk_up_log_tree(trans, log, path, &level, wc);
2964 /* was the root node processed? if not, catch it here */
2965 if (path->nodes[orig_level]) {
2966 ret = wc->process_func(log, path->nodes[orig_level], wc,
2967 btrfs_header_generation(path->nodes[orig_level]),
2972 struct extent_buffer *next;
2974 next = path->nodes[orig_level];
2977 btrfs_tree_lock(next);
2978 btrfs_clean_tree_block(next);
2979 btrfs_wait_tree_block_writeback(next);
2980 btrfs_tree_unlock(next);
2981 ret = btrfs_pin_reserved_extent(trans,
2982 next->start, next->len);
2985 btrfs_redirty_list_add(trans->transaction, next);
2987 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2988 clear_extent_buffer_dirty(next);
2989 unaccount_log_buffer(fs_info, next->start);
2995 btrfs_free_path(path);
3000 * helper function to update the item for a given subvolumes log root
3001 * in the tree of log roots
3003 static int update_log_root(struct btrfs_trans_handle *trans,
3004 struct btrfs_root *log,
3005 struct btrfs_root_item *root_item)
3007 struct btrfs_fs_info *fs_info = log->fs_info;
3010 if (log->log_transid == 1) {
3011 /* insert root item on the first sync */
3012 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
3013 &log->root_key, root_item);
3015 ret = btrfs_update_root(trans, fs_info->log_root_tree,
3016 &log->root_key, root_item);
3021 static void wait_log_commit(struct btrfs_root *root, int transid)
3024 int index = transid % 2;
3027 * we only allow two pending log transactions at a time,
3028 * so we know that if ours is more than 2 older than the
3029 * current transaction, we're done
3032 prepare_to_wait(&root->log_commit_wait[index],
3033 &wait, TASK_UNINTERRUPTIBLE);
3035 if (!(root->log_transid_committed < transid &&
3036 atomic_read(&root->log_commit[index])))
3039 mutex_unlock(&root->log_mutex);
3041 mutex_lock(&root->log_mutex);
3043 finish_wait(&root->log_commit_wait[index], &wait);
3046 static void wait_for_writer(struct btrfs_root *root)
3051 prepare_to_wait(&root->log_writer_wait, &wait,
3052 TASK_UNINTERRUPTIBLE);
3053 if (!atomic_read(&root->log_writers))
3056 mutex_unlock(&root->log_mutex);
3058 mutex_lock(&root->log_mutex);
3060 finish_wait(&root->log_writer_wait, &wait);
3063 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
3064 struct btrfs_log_ctx *ctx)
3069 mutex_lock(&root->log_mutex);
3070 list_del_init(&ctx->list);
3071 mutex_unlock(&root->log_mutex);
3075 * Invoked in log mutex context, or be sure there is no other task which
3076 * can access the list.
3078 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3079 int index, int error)
3081 struct btrfs_log_ctx *ctx;
3082 struct btrfs_log_ctx *safe;
3084 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3085 list_del_init(&ctx->list);
3086 ctx->log_ret = error;
3091 * btrfs_sync_log does sends a given tree log down to the disk and
3092 * updates the super blocks to record it. When this call is done,
3093 * you know that any inodes previously logged are safely on disk only
3096 * Any other return value means you need to call btrfs_commit_transaction.
3097 * Some of the edge cases for fsyncing directories that have had unlinks
3098 * or renames done in the past mean that sometimes the only safe
3099 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3100 * that has happened.
3102 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3103 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3109 struct btrfs_fs_info *fs_info = root->fs_info;
3110 struct btrfs_root *log = root->log_root;
3111 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3112 struct btrfs_root_item new_root_item;
3113 int log_transid = 0;
3114 struct btrfs_log_ctx root_log_ctx;
3115 struct blk_plug plug;
3119 mutex_lock(&root->log_mutex);
3120 log_transid = ctx->log_transid;
3121 if (root->log_transid_committed >= log_transid) {
3122 mutex_unlock(&root->log_mutex);
3123 return ctx->log_ret;
3126 index1 = log_transid % 2;
3127 if (atomic_read(&root->log_commit[index1])) {
3128 wait_log_commit(root, log_transid);
3129 mutex_unlock(&root->log_mutex);
3130 return ctx->log_ret;
3132 ASSERT(log_transid == root->log_transid);
3133 atomic_set(&root->log_commit[index1], 1);
3135 /* wait for previous tree log sync to complete */
3136 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3137 wait_log_commit(root, log_transid - 1);
3140 int batch = atomic_read(&root->log_batch);
3141 /* when we're on an ssd, just kick the log commit out */
3142 if (!btrfs_test_opt(fs_info, SSD) &&
3143 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3144 mutex_unlock(&root->log_mutex);
3145 schedule_timeout_uninterruptible(1);
3146 mutex_lock(&root->log_mutex);
3148 wait_for_writer(root);
3149 if (batch == atomic_read(&root->log_batch))
3153 /* bail out if we need to do a full commit */
3154 if (btrfs_need_log_full_commit(trans)) {
3156 mutex_unlock(&root->log_mutex);
3160 if (log_transid % 2 == 0)
3161 mark = EXTENT_DIRTY;
3165 /* we start IO on all the marked extents here, but we don't actually
3166 * wait for them until later.
3168 blk_start_plug(&plug);
3169 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3171 * -EAGAIN happens when someone, e.g., a concurrent transaction
3172 * commit, writes a dirty extent in this tree-log commit. This
3173 * concurrent write will create a hole writing out the extents,
3174 * and we cannot proceed on a zoned filesystem, requiring
3175 * sequential writing. While we can bail out to a full commit
3176 * here, but we can continue hoping the concurrent writing fills
3179 if (ret == -EAGAIN && btrfs_is_zoned(fs_info))
3182 blk_finish_plug(&plug);
3183 btrfs_abort_transaction(trans, ret);
3184 btrfs_set_log_full_commit(trans);
3185 mutex_unlock(&root->log_mutex);
3190 * We _must_ update under the root->log_mutex in order to make sure we
3191 * have a consistent view of the log root we are trying to commit at
3194 * We _must_ copy this into a local copy, because we are not holding the
3195 * log_root_tree->log_mutex yet. This is important because when we
3196 * commit the log_root_tree we must have a consistent view of the
3197 * log_root_tree when we update the super block to point at the
3198 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3199 * with the commit and possibly point at the new block which we may not
3202 btrfs_set_root_node(&log->root_item, log->node);
3203 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3205 root->log_transid++;
3206 log->log_transid = root->log_transid;
3207 root->log_start_pid = 0;
3209 * IO has been started, blocks of the log tree have WRITTEN flag set
3210 * in their headers. new modifications of the log will be written to
3211 * new positions. so it's safe to allow log writers to go in.
3213 mutex_unlock(&root->log_mutex);
3215 if (btrfs_is_zoned(fs_info)) {
3216 mutex_lock(&fs_info->tree_root->log_mutex);
3217 if (!log_root_tree->node) {
3218 ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
3220 mutex_unlock(&fs_info->tree_root->log_mutex);
3221 blk_finish_plug(&plug);
3225 mutex_unlock(&fs_info->tree_root->log_mutex);
3228 btrfs_init_log_ctx(&root_log_ctx, NULL);
3230 mutex_lock(&log_root_tree->log_mutex);
3232 index2 = log_root_tree->log_transid % 2;
3233 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3234 root_log_ctx.log_transid = log_root_tree->log_transid;
3237 * Now we are safe to update the log_root_tree because we're under the
3238 * log_mutex, and we're a current writer so we're holding the commit
3239 * open until we drop the log_mutex.
3241 ret = update_log_root(trans, log, &new_root_item);
3243 if (!list_empty(&root_log_ctx.list))
3244 list_del_init(&root_log_ctx.list);
3246 blk_finish_plug(&plug);
3247 btrfs_set_log_full_commit(trans);
3249 if (ret != -ENOSPC) {
3250 btrfs_abort_transaction(trans, ret);
3251 mutex_unlock(&log_root_tree->log_mutex);
3254 btrfs_wait_tree_log_extents(log, mark);
3255 mutex_unlock(&log_root_tree->log_mutex);
3260 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3261 blk_finish_plug(&plug);
3262 list_del_init(&root_log_ctx.list);
3263 mutex_unlock(&log_root_tree->log_mutex);
3264 ret = root_log_ctx.log_ret;
3268 index2 = root_log_ctx.log_transid % 2;
3269 if (atomic_read(&log_root_tree->log_commit[index2])) {
3270 blk_finish_plug(&plug);
3271 ret = btrfs_wait_tree_log_extents(log, mark);
3272 wait_log_commit(log_root_tree,
3273 root_log_ctx.log_transid);
3274 mutex_unlock(&log_root_tree->log_mutex);
3276 ret = root_log_ctx.log_ret;
3279 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3280 atomic_set(&log_root_tree->log_commit[index2], 1);
3282 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3283 wait_log_commit(log_root_tree,
3284 root_log_ctx.log_transid - 1);
3288 * now that we've moved on to the tree of log tree roots,
3289 * check the full commit flag again
3291 if (btrfs_need_log_full_commit(trans)) {
3292 blk_finish_plug(&plug);
3293 btrfs_wait_tree_log_extents(log, mark);
3294 mutex_unlock(&log_root_tree->log_mutex);
3296 goto out_wake_log_root;
3299 ret = btrfs_write_marked_extents(fs_info,
3300 &log_root_tree->dirty_log_pages,
3301 EXTENT_DIRTY | EXTENT_NEW);
3302 blk_finish_plug(&plug);
3304 * As described above, -EAGAIN indicates a hole in the extents. We
3305 * cannot wait for these write outs since the waiting cause a
3306 * deadlock. Bail out to the full commit instead.
3308 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) {
3309 btrfs_set_log_full_commit(trans);
3310 btrfs_wait_tree_log_extents(log, mark);
3311 mutex_unlock(&log_root_tree->log_mutex);
3312 goto out_wake_log_root;
3314 btrfs_set_log_full_commit(trans);
3315 btrfs_abort_transaction(trans, ret);
3316 mutex_unlock(&log_root_tree->log_mutex);
3317 goto out_wake_log_root;
3319 ret = btrfs_wait_tree_log_extents(log, mark);
3321 ret = btrfs_wait_tree_log_extents(log_root_tree,
3322 EXTENT_NEW | EXTENT_DIRTY);
3324 btrfs_set_log_full_commit(trans);
3325 mutex_unlock(&log_root_tree->log_mutex);
3326 goto out_wake_log_root;
3329 log_root_start = log_root_tree->node->start;
3330 log_root_level = btrfs_header_level(log_root_tree->node);
3331 log_root_tree->log_transid++;
3332 mutex_unlock(&log_root_tree->log_mutex);
3335 * Here we are guaranteed that nobody is going to write the superblock
3336 * for the current transaction before us and that neither we do write
3337 * our superblock before the previous transaction finishes its commit
3338 * and writes its superblock, because:
3340 * 1) We are holding a handle on the current transaction, so no body
3341 * can commit it until we release the handle;
3343 * 2) Before writing our superblock we acquire the tree_log_mutex, so
3344 * if the previous transaction is still committing, and hasn't yet
3345 * written its superblock, we wait for it to do it, because a
3346 * transaction commit acquires the tree_log_mutex when the commit
3347 * begins and releases it only after writing its superblock.
3349 mutex_lock(&fs_info->tree_log_mutex);
3352 * The previous transaction writeout phase could have failed, and thus
3353 * marked the fs in an error state. We must not commit here, as we
3354 * could have updated our generation in the super_for_commit and
3355 * writing the super here would result in transid mismatches. If there
3356 * is an error here just bail.
3358 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3360 btrfs_set_log_full_commit(trans);
3361 btrfs_abort_transaction(trans, ret);
3362 mutex_unlock(&fs_info->tree_log_mutex);
3363 goto out_wake_log_root;
3366 btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
3367 btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
3368 ret = write_all_supers(fs_info, 1);
3369 mutex_unlock(&fs_info->tree_log_mutex);
3371 btrfs_set_log_full_commit(trans);
3372 btrfs_abort_transaction(trans, ret);
3373 goto out_wake_log_root;
3377 * We know there can only be one task here, since we have not yet set
3378 * root->log_commit[index1] to 0 and any task attempting to sync the
3379 * log must wait for the previous log transaction to commit if it's
3380 * still in progress or wait for the current log transaction commit if
3381 * someone else already started it. We use <= and not < because the
3382 * first log transaction has an ID of 0.
3384 ASSERT(root->last_log_commit <= log_transid);
3385 root->last_log_commit = log_transid;
3388 mutex_lock(&log_root_tree->log_mutex);
3389 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3391 log_root_tree->log_transid_committed++;
3392 atomic_set(&log_root_tree->log_commit[index2], 0);
3393 mutex_unlock(&log_root_tree->log_mutex);
3396 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3397 * all the updates above are seen by the woken threads. It might not be
3398 * necessary, but proving that seems to be hard.
3400 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3402 mutex_lock(&root->log_mutex);
3403 btrfs_remove_all_log_ctxs(root, index1, ret);
3404 root->log_transid_committed++;
3405 atomic_set(&root->log_commit[index1], 0);
3406 mutex_unlock(&root->log_mutex);
3409 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3410 * all the updates above are seen by the woken threads. It might not be
3411 * necessary, but proving that seems to be hard.
3413 cond_wake_up(&root->log_commit_wait[index1]);
3417 static void free_log_tree(struct btrfs_trans_handle *trans,
3418 struct btrfs_root *log)
3421 struct walk_control wc = {
3423 .process_func = process_one_buffer
3427 ret = walk_log_tree(trans, log, &wc);
3430 * We weren't able to traverse the entire log tree, the
3431 * typical scenario is getting an -EIO when reading an
3432 * extent buffer of the tree, due to a previous writeback
3435 set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
3436 &log->fs_info->fs_state);
3439 * Some extent buffers of the log tree may still be dirty
3440 * and not yet written back to storage, because we may
3441 * have updates to a log tree without syncing a log tree,
3442 * such as during rename and link operations. So flush
3443 * them out and wait for their writeback to complete, so
3444 * that we properly cleanup their state and pages.
3446 btrfs_write_marked_extents(log->fs_info,
3447 &log->dirty_log_pages,
3448 EXTENT_DIRTY | EXTENT_NEW);
3449 btrfs_wait_tree_log_extents(log,
3450 EXTENT_DIRTY | EXTENT_NEW);
3453 btrfs_abort_transaction(trans, ret);
3455 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3459 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3460 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3461 extent_io_tree_release(&log->log_csum_range);
3463 btrfs_put_root(log);
3467 * free all the extents used by the tree log. This should be called
3468 * at commit time of the full transaction
3470 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3472 if (root->log_root) {
3473 free_log_tree(trans, root->log_root);
3474 root->log_root = NULL;
3475 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3480 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3481 struct btrfs_fs_info *fs_info)
3483 if (fs_info->log_root_tree) {
3484 free_log_tree(trans, fs_info->log_root_tree);
3485 fs_info->log_root_tree = NULL;
3486 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state);
3492 * Check if an inode was logged in the current transaction. This may often
3493 * return some false positives, because logged_trans is an in memory only field,
3494 * not persisted anywhere. This is meant to be used in contexts where a false
3495 * positive has no functional consequences.
3497 static bool inode_logged(struct btrfs_trans_handle *trans,
3498 struct btrfs_inode *inode)
3500 if (inode->logged_trans == trans->transid)
3504 * The inode's logged_trans is always 0 when we load it (because it is
3505 * not persisted in the inode item or elsewhere). So if it is 0, the
3506 * inode was last modified in the current transaction then the inode may
3507 * have been logged before in the current transaction, then evicted and
3508 * loaded again in the current transaction - or may have never been logged
3509 * in the current transaction, but since we can not be sure, we have to
3510 * assume it was, otherwise our callers can leave an inconsistent log.
3512 if (inode->logged_trans == 0 &&
3513 inode->last_trans == trans->transid &&
3514 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3521 * If both a file and directory are logged, and unlinks or renames are
3522 * mixed in, we have a few interesting corners:
3524 * create file X in dir Y
3525 * link file X to X.link in dir Y
3527 * unlink file X but leave X.link
3530 * After a crash we would expect only X.link to exist. But file X
3531 * didn't get fsync'd again so the log has back refs for X and X.link.
3533 * We solve this by removing directory entries and inode backrefs from the
3534 * log when a file that was logged in the current transaction is
3535 * unlinked. Any later fsync will include the updated log entries, and
3536 * we'll be able to reconstruct the proper directory items from backrefs.
3538 * This optimizations allows us to avoid relogging the entire inode
3539 * or the entire directory.
3541 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3542 struct btrfs_root *root,
3543 const char *name, int name_len,
3544 struct btrfs_inode *dir, u64 index)
3546 struct btrfs_root *log;
3547 struct btrfs_dir_item *di;
3548 struct btrfs_path *path;
3551 u64 dir_ino = btrfs_ino(dir);
3553 if (!inode_logged(trans, dir))
3556 ret = join_running_log_trans(root);
3560 mutex_lock(&dir->log_mutex);
3562 log = root->log_root;
3563 path = btrfs_alloc_path();
3569 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3570 name, name_len, -1);
3576 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3582 btrfs_release_path(path);
3583 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3584 index, name, name_len, -1);
3590 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3598 * We do not need to update the size field of the directory's inode item
3599 * because on log replay we update the field to reflect all existing
3600 * entries in the directory (see overwrite_item()).
3603 btrfs_free_path(path);
3605 mutex_unlock(&dir->log_mutex);
3606 if (err == -ENOSPC) {
3607 btrfs_set_log_full_commit(trans);
3609 } else if (err < 0) {
3610 btrfs_abort_transaction(trans, err);
3613 btrfs_end_log_trans(root);
3618 /* see comments for btrfs_del_dir_entries_in_log */
3619 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3620 struct btrfs_root *root,
3621 const char *name, int name_len,
3622 struct btrfs_inode *inode, u64 dirid)
3624 struct btrfs_root *log;
3628 if (!inode_logged(trans, inode))
3631 ret = join_running_log_trans(root);
3634 log = root->log_root;
3635 mutex_lock(&inode->log_mutex);
3637 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3639 mutex_unlock(&inode->log_mutex);
3640 if (ret == -ENOSPC) {
3641 btrfs_set_log_full_commit(trans);
3643 } else if (ret < 0 && ret != -ENOENT)
3644 btrfs_abort_transaction(trans, ret);
3645 btrfs_end_log_trans(root);
3651 * creates a range item in the log for 'dirid'. first_offset and
3652 * last_offset tell us which parts of the key space the log should
3653 * be considered authoritative for.
3655 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3656 struct btrfs_root *log,
3657 struct btrfs_path *path,
3658 int key_type, u64 dirid,
3659 u64 first_offset, u64 last_offset)
3662 struct btrfs_key key;
3663 struct btrfs_dir_log_item *item;
3665 key.objectid = dirid;
3666 key.offset = first_offset;
3667 if (key_type == BTRFS_DIR_ITEM_KEY)
3668 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3670 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3671 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3675 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3676 struct btrfs_dir_log_item);
3677 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3678 btrfs_mark_buffer_dirty(path->nodes[0]);
3679 btrfs_release_path(path);
3684 * log all the items included in the current transaction for a given
3685 * directory. This also creates the range items in the log tree required
3686 * to replay anything deleted before the fsync
3688 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3689 struct btrfs_root *root, struct btrfs_inode *inode,
3690 struct btrfs_path *path,
3691 struct btrfs_path *dst_path, int key_type,
3692 struct btrfs_log_ctx *ctx,
3693 u64 min_offset, u64 *last_offset_ret)
3695 struct btrfs_key min_key;
3696 struct btrfs_root *log = root->log_root;
3697 struct extent_buffer *src;
3702 u64 first_offset = min_offset;
3703 u64 last_offset = (u64)-1;
3704 u64 ino = btrfs_ino(inode);
3706 log = root->log_root;
3708 min_key.objectid = ino;
3709 min_key.type = key_type;
3710 min_key.offset = min_offset;
3712 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3715 * we didn't find anything from this transaction, see if there
3716 * is anything at all
3718 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3719 min_key.objectid = ino;
3720 min_key.type = key_type;
3721 min_key.offset = (u64)-1;
3722 btrfs_release_path(path);
3723 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3725 btrfs_release_path(path);
3728 ret = btrfs_previous_item(root, path, ino, key_type);
3730 /* if ret == 0 there are items for this type,
3731 * create a range to tell us the last key of this type.
3732 * otherwise, there are no items in this directory after
3733 * *min_offset, and we create a range to indicate that.
3736 struct btrfs_key tmp;
3737 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3739 if (key_type == tmp.type)
3740 first_offset = max(min_offset, tmp.offset) + 1;
3745 /* go backward to find any previous key */
3746 ret = btrfs_previous_item(root, path, ino, key_type);
3748 struct btrfs_key tmp;
3749 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3750 if (key_type == tmp.type) {
3751 first_offset = tmp.offset;
3752 ret = overwrite_item(trans, log, dst_path,
3753 path->nodes[0], path->slots[0],
3761 btrfs_release_path(path);
3764 * Find the first key from this transaction again. See the note for
3765 * log_new_dir_dentries, if we're logging a directory recursively we
3766 * won't be holding its i_mutex, which means we can modify the directory
3767 * while we're logging it. If we remove an entry between our first
3768 * search and this search we'll not find the key again and can just
3772 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3777 * we have a block from this transaction, log every item in it
3778 * from our directory
3781 struct btrfs_key tmp;
3782 src = path->nodes[0];
3783 nritems = btrfs_header_nritems(src);
3784 for (i = path->slots[0]; i < nritems; i++) {
3785 struct btrfs_dir_item *di;
3787 btrfs_item_key_to_cpu(src, &min_key, i);
3789 if (min_key.objectid != ino || min_key.type != key_type)
3792 if (need_resched()) {
3793 btrfs_release_path(path);
3798 ret = overwrite_item(trans, log, dst_path, src, i,
3806 * We must make sure that when we log a directory entry,
3807 * the corresponding inode, after log replay, has a
3808 * matching link count. For example:
3814 * xfs_io -c "fsync" mydir
3816 * <mount fs and log replay>
3818 * Would result in a fsync log that when replayed, our
3819 * file inode would have a link count of 1, but we get
3820 * two directory entries pointing to the same inode.
3821 * After removing one of the names, it would not be
3822 * possible to remove the other name, which resulted
3823 * always in stale file handle errors, and would not
3824 * be possible to rmdir the parent directory, since
3825 * its i_size could never decrement to the value
3826 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3828 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3829 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3831 (btrfs_dir_transid(src, di) == trans->transid ||
3832 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3833 tmp.type != BTRFS_ROOT_ITEM_KEY)
3834 ctx->log_new_dentries = true;
3836 path->slots[0] = nritems;
3839 * look ahead to the next item and see if it is also
3840 * from this directory and from this transaction
3842 ret = btrfs_next_leaf(root, path);
3845 last_offset = (u64)-1;
3850 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3851 if (tmp.objectid != ino || tmp.type != key_type) {
3852 last_offset = (u64)-1;
3855 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3856 ret = overwrite_item(trans, log, dst_path,
3857 path->nodes[0], path->slots[0],
3862 last_offset = tmp.offset;
3867 btrfs_release_path(path);
3868 btrfs_release_path(dst_path);
3871 *last_offset_ret = last_offset;
3873 * insert the log range keys to indicate where the log
3876 ret = insert_dir_log_key(trans, log, path, key_type,
3877 ino, first_offset, last_offset);
3885 * logging directories is very similar to logging inodes, We find all the items
3886 * from the current transaction and write them to the log.
3888 * The recovery code scans the directory in the subvolume, and if it finds a
3889 * key in the range logged that is not present in the log tree, then it means
3890 * that dir entry was unlinked during the transaction.
3892 * In order for that scan to work, we must include one key smaller than
3893 * the smallest logged by this transaction and one key larger than the largest
3894 * key logged by this transaction.
3896 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3897 struct btrfs_root *root, struct btrfs_inode *inode,
3898 struct btrfs_path *path,
3899 struct btrfs_path *dst_path,
3900 struct btrfs_log_ctx *ctx)
3905 int key_type = BTRFS_DIR_ITEM_KEY;
3911 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3912 ctx, min_key, &max_key);
3915 if (max_key == (u64)-1)
3917 min_key = max_key + 1;
3920 if (key_type == BTRFS_DIR_ITEM_KEY) {
3921 key_type = BTRFS_DIR_INDEX_KEY;
3928 * a helper function to drop items from the log before we relog an
3929 * inode. max_key_type indicates the highest item type to remove.
3930 * This cannot be run for file data extents because it does not
3931 * free the extents they point to.
3933 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3934 struct btrfs_root *log,
3935 struct btrfs_path *path,
3936 u64 objectid, int max_key_type)
3939 struct btrfs_key key;
3940 struct btrfs_key found_key;
3943 key.objectid = objectid;
3944 key.type = max_key_type;
3945 key.offset = (u64)-1;
3948 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3949 BUG_ON(ret == 0); /* Logic error */
3953 if (path->slots[0] == 0)
3957 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3960 if (found_key.objectid != objectid)
3963 found_key.offset = 0;
3965 ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot);
3969 ret = btrfs_del_items(trans, log, path, start_slot,
3970 path->slots[0] - start_slot + 1);
3972 * If start slot isn't 0 then we don't need to re-search, we've
3973 * found the last guy with the objectid in this tree.
3975 if (ret || start_slot != 0)
3977 btrfs_release_path(path);
3979 btrfs_release_path(path);
3985 static void fill_inode_item(struct btrfs_trans_handle *trans,
3986 struct extent_buffer *leaf,
3987 struct btrfs_inode_item *item,
3988 struct inode *inode, int log_inode_only,
3991 struct btrfs_map_token token;
3994 btrfs_init_map_token(&token, leaf);
3996 if (log_inode_only) {
3997 /* set the generation to zero so the recover code
3998 * can tell the difference between an logging
3999 * just to say 'this inode exists' and a logging
4000 * to say 'update this inode with these values'
4002 btrfs_set_token_inode_generation(&token, item, 0);
4003 btrfs_set_token_inode_size(&token, item, logged_isize);
4005 btrfs_set_token_inode_generation(&token, item,
4006 BTRFS_I(inode)->generation);
4007 btrfs_set_token_inode_size(&token, item, inode->i_size);
4010 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
4011 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
4012 btrfs_set_token_inode_mode(&token, item, inode->i_mode);
4013 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
4015 btrfs_set_token_timespec_sec(&token, &item->atime,
4016 inode->i_atime.tv_sec);
4017 btrfs_set_token_timespec_nsec(&token, &item->atime,
4018 inode->i_atime.tv_nsec);
4020 btrfs_set_token_timespec_sec(&token, &item->mtime,
4021 inode->i_mtime.tv_sec);
4022 btrfs_set_token_timespec_nsec(&token, &item->mtime,
4023 inode->i_mtime.tv_nsec);
4025 btrfs_set_token_timespec_sec(&token, &item->ctime,
4026 inode->i_ctime.tv_sec);
4027 btrfs_set_token_timespec_nsec(&token, &item->ctime,
4028 inode->i_ctime.tv_nsec);
4031 * We do not need to set the nbytes field, in fact during a fast fsync
4032 * its value may not even be correct, since a fast fsync does not wait
4033 * for ordered extent completion, which is where we update nbytes, it
4034 * only waits for writeback to complete. During log replay as we find
4035 * file extent items and replay them, we adjust the nbytes field of the
4036 * inode item in subvolume tree as needed (see overwrite_item()).
4039 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4040 btrfs_set_token_inode_transid(&token, item, trans->transid);
4041 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4042 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4043 BTRFS_I(inode)->ro_flags);
4044 btrfs_set_token_inode_flags(&token, item, flags);
4045 btrfs_set_token_inode_block_group(&token, item, 0);
4048 static int log_inode_item(struct btrfs_trans_handle *trans,
4049 struct btrfs_root *log, struct btrfs_path *path,
4050 struct btrfs_inode *inode, bool inode_item_dropped)
4052 struct btrfs_inode_item *inode_item;
4056 * If we are doing a fast fsync and the inode was logged before in the
4057 * current transaction, then we know the inode was previously logged and
4058 * it exists in the log tree. For performance reasons, in this case use
4059 * btrfs_search_slot() directly with ins_len set to 0 so that we never
4060 * attempt a write lock on the leaf's parent, which adds unnecessary lock
4061 * contention in case there are concurrent fsyncs for other inodes of the
4062 * same subvolume. Using btrfs_insert_empty_item() when the inode item
4063 * already exists can also result in unnecessarily splitting a leaf.
4065 if (!inode_item_dropped && inode->logged_trans == trans->transid) {
4066 ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
4072 * This means it is the first fsync in the current transaction,
4073 * so the inode item is not in the log and we need to insert it.
4074 * We can never get -EEXIST because we are only called for a fast
4075 * fsync and in case an inode eviction happens after the inode was
4076 * logged before in the current transaction, when we load again
4077 * the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime
4078 * flags and set ->logged_trans to 0.
4080 ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
4081 sizeof(*inode_item));
4082 ASSERT(ret != -EEXIST);
4086 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4087 struct btrfs_inode_item);
4088 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
4090 btrfs_release_path(path);
4094 static int log_csums(struct btrfs_trans_handle *trans,
4095 struct btrfs_inode *inode,
4096 struct btrfs_root *log_root,
4097 struct btrfs_ordered_sum *sums)
4099 const u64 lock_end = sums->bytenr + sums->len - 1;
4100 struct extent_state *cached_state = NULL;
4104 * If this inode was not used for reflink operations in the current
4105 * transaction with new extents, then do the fast path, no need to
4106 * worry about logging checksum items with overlapping ranges.
4108 if (inode->last_reflink_trans < trans->transid)
4109 return btrfs_csum_file_blocks(trans, log_root, sums);
4112 * Serialize logging for checksums. This is to avoid racing with the
4113 * same checksum being logged by another task that is logging another
4114 * file which happens to refer to the same extent as well. Such races
4115 * can leave checksum items in the log with overlapping ranges.
4117 ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr,
4118 lock_end, &cached_state);
4122 * Due to extent cloning, we might have logged a csum item that covers a
4123 * subrange of a cloned extent, and later we can end up logging a csum
4124 * item for a larger subrange of the same extent or the entire range.
4125 * This would leave csum items in the log tree that cover the same range
4126 * and break the searches for checksums in the log tree, resulting in
4127 * some checksums missing in the fs/subvolume tree. So just delete (or
4128 * trim and adjust) any existing csum items in the log for this range.
4130 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
4132 ret = btrfs_csum_file_blocks(trans, log_root, sums);
4134 unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end,
4140 static noinline int copy_items(struct btrfs_trans_handle *trans,
4141 struct btrfs_inode *inode,
4142 struct btrfs_path *dst_path,
4143 struct btrfs_path *src_path,
4144 int start_slot, int nr, int inode_only,
4147 struct btrfs_fs_info *fs_info = trans->fs_info;
4148 unsigned long src_offset;
4149 unsigned long dst_offset;
4150 struct btrfs_root *log = inode->root->log_root;
4151 struct btrfs_file_extent_item *extent;
4152 struct btrfs_inode_item *inode_item;
4153 struct extent_buffer *src = src_path->nodes[0];
4155 struct btrfs_key *ins_keys;
4159 struct list_head ordered_sums;
4160 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
4162 INIT_LIST_HEAD(&ordered_sums);
4164 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
4165 nr * sizeof(u32), GFP_NOFS);
4169 ins_sizes = (u32 *)ins_data;
4170 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
4172 for (i = 0; i < nr; i++) {
4173 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
4174 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
4176 ret = btrfs_insert_empty_items(trans, log, dst_path,
4177 ins_keys, ins_sizes, nr);
4183 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
4184 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
4185 dst_path->slots[0]);
4187 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
4189 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
4190 inode_item = btrfs_item_ptr(dst_path->nodes[0],
4192 struct btrfs_inode_item);
4193 fill_inode_item(trans, dst_path->nodes[0], inode_item,
4195 inode_only == LOG_INODE_EXISTS,
4198 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4199 src_offset, ins_sizes[i]);
4202 /* take a reference on file data extents so that truncates
4203 * or deletes of this inode don't have to relog the inode
4206 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4209 extent = btrfs_item_ptr(src, start_slot + i,
4210 struct btrfs_file_extent_item);
4212 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4215 found_type = btrfs_file_extent_type(src, extent);
4216 if (found_type == BTRFS_FILE_EXTENT_REG) {
4218 ds = btrfs_file_extent_disk_bytenr(src,
4220 /* ds == 0 is a hole */
4224 dl = btrfs_file_extent_disk_num_bytes(src,
4226 cs = btrfs_file_extent_offset(src, extent);
4227 cl = btrfs_file_extent_num_bytes(src,
4229 if (btrfs_file_extent_compression(src,
4235 ret = btrfs_lookup_csums_range(
4237 ds + cs, ds + cs + cl - 1,
4245 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4246 btrfs_release_path(dst_path);
4250 * we have to do this after the loop above to avoid changing the
4251 * log tree while trying to change the log tree.
4253 while (!list_empty(&ordered_sums)) {
4254 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4255 struct btrfs_ordered_sum,
4258 ret = log_csums(trans, inode, log, sums);
4259 list_del(&sums->list);
4266 static int extent_cmp(void *priv, const struct list_head *a,
4267 const struct list_head *b)
4269 const struct extent_map *em1, *em2;
4271 em1 = list_entry(a, struct extent_map, list);
4272 em2 = list_entry(b, struct extent_map, list);
4274 if (em1->start < em2->start)
4276 else if (em1->start > em2->start)
4281 static int log_extent_csums(struct btrfs_trans_handle *trans,
4282 struct btrfs_inode *inode,
4283 struct btrfs_root *log_root,
4284 const struct extent_map *em,
4285 struct btrfs_log_ctx *ctx)
4287 struct btrfs_ordered_extent *ordered;
4290 u64 mod_start = em->mod_start;
4291 u64 mod_len = em->mod_len;
4292 LIST_HEAD(ordered_sums);
4295 if (inode->flags & BTRFS_INODE_NODATASUM ||
4296 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4297 em->block_start == EXTENT_MAP_HOLE)
4300 list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
4301 const u64 ordered_end = ordered->file_offset + ordered->num_bytes;
4302 const u64 mod_end = mod_start + mod_len;
4303 struct btrfs_ordered_sum *sums;
4308 if (ordered_end <= mod_start)
4310 if (mod_end <= ordered->file_offset)
4314 * We are going to copy all the csums on this ordered extent, so
4315 * go ahead and adjust mod_start and mod_len in case this ordered
4316 * extent has already been logged.
4318 if (ordered->file_offset > mod_start) {
4319 if (ordered_end >= mod_end)
4320 mod_len = ordered->file_offset - mod_start;
4322 * If we have this case
4324 * |--------- logged extent ---------|
4325 * |----- ordered extent ----|
4327 * Just don't mess with mod_start and mod_len, we'll
4328 * just end up logging more csums than we need and it
4332 if (ordered_end < mod_end) {
4333 mod_len = mod_end - ordered_end;
4334 mod_start = ordered_end;
4341 * To keep us from looping for the above case of an ordered
4342 * extent that falls inside of the logged extent.
4344 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags))
4347 list_for_each_entry(sums, &ordered->list, list) {
4348 ret = log_csums(trans, inode, log_root, sums);
4354 /* We're done, found all csums in the ordered extents. */
4358 /* If we're compressed we have to save the entire range of csums. */
4359 if (em->compress_type) {
4361 csum_len = max(em->block_len, em->orig_block_len);
4363 csum_offset = mod_start - em->start;
4367 /* block start is already adjusted for the file extent offset. */
4368 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4369 em->block_start + csum_offset,
4370 em->block_start + csum_offset +
4371 csum_len - 1, &ordered_sums, 0);
4375 while (!list_empty(&ordered_sums)) {
4376 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4377 struct btrfs_ordered_sum,
4380 ret = log_csums(trans, inode, log_root, sums);
4381 list_del(&sums->list);
4388 static int log_one_extent(struct btrfs_trans_handle *trans,
4389 struct btrfs_inode *inode, struct btrfs_root *root,
4390 const struct extent_map *em,
4391 struct btrfs_path *path,
4392 struct btrfs_log_ctx *ctx)
4394 struct btrfs_drop_extents_args drop_args = { 0 };
4395 struct btrfs_root *log = root->log_root;
4396 struct btrfs_file_extent_item *fi;
4397 struct extent_buffer *leaf;
4398 struct btrfs_map_token token;
4399 struct btrfs_key key;
4400 u64 extent_offset = em->start - em->orig_start;
4404 ret = log_extent_csums(trans, inode, log, em, ctx);
4408 drop_args.path = path;
4409 drop_args.start = em->start;
4410 drop_args.end = em->start + em->len;
4411 drop_args.replace_extent = true;
4412 drop_args.extent_item_size = sizeof(*fi);
4413 ret = btrfs_drop_extents(trans, log, inode, &drop_args);
4417 if (!drop_args.extent_inserted) {
4418 key.objectid = btrfs_ino(inode);
4419 key.type = BTRFS_EXTENT_DATA_KEY;
4420 key.offset = em->start;
4422 ret = btrfs_insert_empty_item(trans, log, path, &key,
4427 leaf = path->nodes[0];
4428 btrfs_init_map_token(&token, leaf);
4429 fi = btrfs_item_ptr(leaf, path->slots[0],
4430 struct btrfs_file_extent_item);
4432 btrfs_set_token_file_extent_generation(&token, fi, trans->transid);
4433 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4434 btrfs_set_token_file_extent_type(&token, fi,
4435 BTRFS_FILE_EXTENT_PREALLOC);
4437 btrfs_set_token_file_extent_type(&token, fi,
4438 BTRFS_FILE_EXTENT_REG);
4440 block_len = max(em->block_len, em->orig_block_len);
4441 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4442 btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4444 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4445 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4446 btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4449 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4451 btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0);
4452 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0);
4455 btrfs_set_token_file_extent_offset(&token, fi, extent_offset);
4456 btrfs_set_token_file_extent_num_bytes(&token, fi, em->len);
4457 btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes);
4458 btrfs_set_token_file_extent_compression(&token, fi, em->compress_type);
4459 btrfs_set_token_file_extent_encryption(&token, fi, 0);
4460 btrfs_set_token_file_extent_other_encoding(&token, fi, 0);
4461 btrfs_mark_buffer_dirty(leaf);
4463 btrfs_release_path(path);
4469 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4470 * lose them after doing a full/fast fsync and replaying the log. We scan the
4471 * subvolume's root instead of iterating the inode's extent map tree because
4472 * otherwise we can log incorrect extent items based on extent map conversion.
4473 * That can happen due to the fact that extent maps are merged when they
4474 * are not in the extent map tree's list of modified extents.
4476 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4477 struct btrfs_inode *inode,
4478 struct btrfs_path *path)
4480 struct btrfs_root *root = inode->root;
4481 struct btrfs_key key;
4482 const u64 i_size = i_size_read(&inode->vfs_inode);
4483 const u64 ino = btrfs_ino(inode);
4484 struct btrfs_path *dst_path = NULL;
4485 bool dropped_extents = false;
4486 u64 truncate_offset = i_size;
4487 struct extent_buffer *leaf;
4493 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4497 key.type = BTRFS_EXTENT_DATA_KEY;
4498 key.offset = i_size;
4499 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4504 * We must check if there is a prealloc extent that starts before the
4505 * i_size and crosses the i_size boundary. This is to ensure later we
4506 * truncate down to the end of that extent and not to the i_size, as
4507 * otherwise we end up losing part of the prealloc extent after a log
4508 * replay and with an implicit hole if there is another prealloc extent
4509 * that starts at an offset beyond i_size.
4511 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4516 struct btrfs_file_extent_item *ei;
4518 leaf = path->nodes[0];
4519 slot = path->slots[0];
4520 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4522 if (btrfs_file_extent_type(leaf, ei) ==
4523 BTRFS_FILE_EXTENT_PREALLOC) {
4526 btrfs_item_key_to_cpu(leaf, &key, slot);
4527 extent_end = key.offset +
4528 btrfs_file_extent_num_bytes(leaf, ei);
4530 if (extent_end > i_size)
4531 truncate_offset = extent_end;
4538 leaf = path->nodes[0];
4539 slot = path->slots[0];
4541 if (slot >= btrfs_header_nritems(leaf)) {
4543 ret = copy_items(trans, inode, dst_path, path,
4544 start_slot, ins_nr, 1, 0);
4549 ret = btrfs_next_leaf(root, path);
4559 btrfs_item_key_to_cpu(leaf, &key, slot);
4560 if (key.objectid > ino)
4562 if (WARN_ON_ONCE(key.objectid < ino) ||
4563 key.type < BTRFS_EXTENT_DATA_KEY ||
4564 key.offset < i_size) {
4568 if (!dropped_extents) {
4570 * Avoid logging extent items logged in past fsync calls
4571 * and leading to duplicate keys in the log tree.
4574 ret = btrfs_truncate_inode_items(trans,
4576 inode, truncate_offset,
4577 BTRFS_EXTENT_DATA_KEY,
4579 } while (ret == -EAGAIN);
4582 dropped_extents = true;
4589 dst_path = btrfs_alloc_path();
4597 ret = copy_items(trans, inode, dst_path, path,
4598 start_slot, ins_nr, 1, 0);
4600 btrfs_release_path(path);
4601 btrfs_free_path(dst_path);
4605 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4606 struct btrfs_root *root,
4607 struct btrfs_inode *inode,
4608 struct btrfs_path *path,
4609 struct btrfs_log_ctx *ctx)
4611 struct btrfs_ordered_extent *ordered;
4612 struct btrfs_ordered_extent *tmp;
4613 struct extent_map *em, *n;
4614 struct list_head extents;
4615 struct extent_map_tree *tree = &inode->extent_tree;
4619 INIT_LIST_HEAD(&extents);
4621 write_lock(&tree->lock);
4623 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4624 list_del_init(&em->list);
4626 * Just an arbitrary number, this can be really CPU intensive
4627 * once we start getting a lot of extents, and really once we
4628 * have a bunch of extents we just want to commit since it will
4631 if (++num > 32768) {
4632 list_del_init(&tree->modified_extents);
4637 if (em->generation < trans->transid)
4640 /* We log prealloc extents beyond eof later. */
4641 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4642 em->start >= i_size_read(&inode->vfs_inode))
4645 /* Need a ref to keep it from getting evicted from cache */
4646 refcount_inc(&em->refs);
4647 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4648 list_add_tail(&em->list, &extents);
4652 list_sort(NULL, &extents, extent_cmp);
4654 while (!list_empty(&extents)) {
4655 em = list_entry(extents.next, struct extent_map, list);
4657 list_del_init(&em->list);
4660 * If we had an error we just need to delete everybody from our
4664 clear_em_logging(tree, em);
4665 free_extent_map(em);
4669 write_unlock(&tree->lock);
4671 ret = log_one_extent(trans, inode, root, em, path, ctx);
4672 write_lock(&tree->lock);
4673 clear_em_logging(tree, em);
4674 free_extent_map(em);
4676 WARN_ON(!list_empty(&extents));
4677 write_unlock(&tree->lock);
4679 btrfs_release_path(path);
4681 ret = btrfs_log_prealloc_extents(trans, inode, path);
4686 * We have logged all extents successfully, now make sure the commit of
4687 * the current transaction waits for the ordered extents to complete
4688 * before it commits and wipes out the log trees, otherwise we would
4689 * lose data if an ordered extents completes after the transaction
4690 * commits and a power failure happens after the transaction commit.
4692 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
4693 list_del_init(&ordered->log_list);
4694 set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
4696 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4697 spin_lock_irq(&inode->ordered_tree.lock);
4698 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4699 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
4700 atomic_inc(&trans->transaction->pending_ordered);
4702 spin_unlock_irq(&inode->ordered_tree.lock);
4704 btrfs_put_ordered_extent(ordered);
4710 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4711 struct btrfs_path *path, u64 *size_ret)
4713 struct btrfs_key key;
4716 key.objectid = btrfs_ino(inode);
4717 key.type = BTRFS_INODE_ITEM_KEY;
4720 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4723 } else if (ret > 0) {
4726 struct btrfs_inode_item *item;
4728 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4729 struct btrfs_inode_item);
4730 *size_ret = btrfs_inode_size(path->nodes[0], item);
4732 * If the in-memory inode's i_size is smaller then the inode
4733 * size stored in the btree, return the inode's i_size, so
4734 * that we get a correct inode size after replaying the log
4735 * when before a power failure we had a shrinking truncate
4736 * followed by addition of a new name (rename / new hard link).
4737 * Otherwise return the inode size from the btree, to avoid
4738 * data loss when replaying a log due to previously doing a
4739 * write that expands the inode's size and logging a new name
4740 * immediately after.
4742 if (*size_ret > inode->vfs_inode.i_size)
4743 *size_ret = inode->vfs_inode.i_size;
4746 btrfs_release_path(path);
4751 * At the moment we always log all xattrs. This is to figure out at log replay
4752 * time which xattrs must have their deletion replayed. If a xattr is missing
4753 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4754 * because if a xattr is deleted, the inode is fsynced and a power failure
4755 * happens, causing the log to be replayed the next time the fs is mounted,
4756 * we want the xattr to not exist anymore (same behaviour as other filesystems
4757 * with a journal, ext3/4, xfs, f2fs, etc).
4759 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4760 struct btrfs_root *root,
4761 struct btrfs_inode *inode,
4762 struct btrfs_path *path,
4763 struct btrfs_path *dst_path)
4766 struct btrfs_key key;
4767 const u64 ino = btrfs_ino(inode);
4770 bool found_xattrs = false;
4772 if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
4776 key.type = BTRFS_XATTR_ITEM_KEY;
4779 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4784 int slot = path->slots[0];
4785 struct extent_buffer *leaf = path->nodes[0];
4786 int nritems = btrfs_header_nritems(leaf);
4788 if (slot >= nritems) {
4790 ret = copy_items(trans, inode, dst_path, path,
4791 start_slot, ins_nr, 1, 0);
4796 ret = btrfs_next_leaf(root, path);
4804 btrfs_item_key_to_cpu(leaf, &key, slot);
4805 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4812 found_xattrs = true;
4816 ret = copy_items(trans, inode, dst_path, path,
4817 start_slot, ins_nr, 1, 0);
4823 set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
4829 * When using the NO_HOLES feature if we punched a hole that causes the
4830 * deletion of entire leafs or all the extent items of the first leaf (the one
4831 * that contains the inode item and references) we may end up not processing
4832 * any extents, because there are no leafs with a generation matching the
4833 * current transaction that have extent items for our inode. So we need to find
4834 * if any holes exist and then log them. We also need to log holes after any
4835 * truncate operation that changes the inode's size.
4837 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
4838 struct btrfs_root *root,
4839 struct btrfs_inode *inode,
4840 struct btrfs_path *path)
4842 struct btrfs_fs_info *fs_info = root->fs_info;
4843 struct btrfs_key key;
4844 const u64 ino = btrfs_ino(inode);
4845 const u64 i_size = i_size_read(&inode->vfs_inode);
4846 u64 prev_extent_end = 0;
4849 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
4853 key.type = BTRFS_EXTENT_DATA_KEY;
4856 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4861 struct extent_buffer *leaf = path->nodes[0];
4863 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4864 ret = btrfs_next_leaf(root, path);
4871 leaf = path->nodes[0];
4874 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4875 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
4878 /* We have a hole, log it. */
4879 if (prev_extent_end < key.offset) {
4880 const u64 hole_len = key.offset - prev_extent_end;
4883 * Release the path to avoid deadlocks with other code
4884 * paths that search the root while holding locks on
4885 * leafs from the log root.
4887 btrfs_release_path(path);
4888 ret = btrfs_insert_file_extent(trans, root->log_root,
4889 ino, prev_extent_end, 0,
4890 0, hole_len, 0, hole_len,
4896 * Search for the same key again in the root. Since it's
4897 * an extent item and we are holding the inode lock, the
4898 * key must still exist. If it doesn't just emit warning
4899 * and return an error to fall back to a transaction
4902 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4905 if (WARN_ON(ret > 0))
4907 leaf = path->nodes[0];
4910 prev_extent_end = btrfs_file_extent_end(path);
4915 if (prev_extent_end < i_size) {
4918 btrfs_release_path(path);
4919 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
4920 ret = btrfs_insert_file_extent(trans, root->log_root,
4921 ino, prev_extent_end, 0, 0,
4922 hole_len, 0, hole_len,
4932 * When we are logging a new inode X, check if it doesn't have a reference that
4933 * matches the reference from some other inode Y created in a past transaction
4934 * and that was renamed in the current transaction. If we don't do this, then at
4935 * log replay time we can lose inode Y (and all its files if it's a directory):
4938 * echo "hello world" > /mnt/x/foobar
4941 * mkdir /mnt/x # or touch /mnt/x
4942 * xfs_io -c fsync /mnt/x
4944 * mount fs, trigger log replay
4946 * After the log replay procedure, we would lose the first directory and all its
4947 * files (file foobar).
4948 * For the case where inode Y is not a directory we simply end up losing it:
4950 * echo "123" > /mnt/foo
4952 * mv /mnt/foo /mnt/bar
4953 * echo "abc" > /mnt/foo
4954 * xfs_io -c fsync /mnt/foo
4957 * We also need this for cases where a snapshot entry is replaced by some other
4958 * entry (file or directory) otherwise we end up with an unreplayable log due to
4959 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4960 * if it were a regular entry:
4963 * btrfs subvolume snapshot /mnt /mnt/x/snap
4964 * btrfs subvolume delete /mnt/x/snap
4967 * fsync /mnt/x or fsync some new file inside it
4970 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4971 * the same transaction.
4973 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4975 const struct btrfs_key *key,
4976 struct btrfs_inode *inode,
4977 u64 *other_ino, u64 *other_parent)
4980 struct btrfs_path *search_path;
4983 u32 item_size = btrfs_item_size_nr(eb, slot);
4985 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4987 search_path = btrfs_alloc_path();
4990 search_path->search_commit_root = 1;
4991 search_path->skip_locking = 1;
4993 while (cur_offset < item_size) {
4997 unsigned long name_ptr;
4998 struct btrfs_dir_item *di;
5000 if (key->type == BTRFS_INODE_REF_KEY) {
5001 struct btrfs_inode_ref *iref;
5003 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
5004 parent = key->offset;
5005 this_name_len = btrfs_inode_ref_name_len(eb, iref);
5006 name_ptr = (unsigned long)(iref + 1);
5007 this_len = sizeof(*iref) + this_name_len;
5009 struct btrfs_inode_extref *extref;
5011 extref = (struct btrfs_inode_extref *)(ptr +
5013 parent = btrfs_inode_extref_parent(eb, extref);
5014 this_name_len = btrfs_inode_extref_name_len(eb, extref);
5015 name_ptr = (unsigned long)&extref->name;
5016 this_len = sizeof(*extref) + this_name_len;
5019 if (this_name_len > name_len) {
5022 new_name = krealloc(name, this_name_len, GFP_NOFS);
5027 name_len = this_name_len;
5031 read_extent_buffer(eb, name, name_ptr, this_name_len);
5032 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
5033 parent, name, this_name_len, 0);
5034 if (di && !IS_ERR(di)) {
5035 struct btrfs_key di_key;
5037 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
5039 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
5040 if (di_key.objectid != key->objectid) {
5042 *other_ino = di_key.objectid;
5043 *other_parent = parent;
5051 } else if (IS_ERR(di)) {
5055 btrfs_release_path(search_path);
5057 cur_offset += this_len;
5061 btrfs_free_path(search_path);
5066 struct btrfs_ino_list {
5069 struct list_head list;
5072 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5073 struct btrfs_root *root,
5074 struct btrfs_path *path,
5075 struct btrfs_log_ctx *ctx,
5076 u64 ino, u64 parent)
5078 struct btrfs_ino_list *ino_elem;
5079 LIST_HEAD(inode_list);
5082 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5085 ino_elem->ino = ino;
5086 ino_elem->parent = parent;
5087 list_add_tail(&ino_elem->list, &inode_list);
5089 while (!list_empty(&inode_list)) {
5090 struct btrfs_fs_info *fs_info = root->fs_info;
5091 struct btrfs_key key;
5092 struct inode *inode;
5094 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
5096 ino = ino_elem->ino;
5097 parent = ino_elem->parent;
5098 list_del(&ino_elem->list);
5103 btrfs_release_path(path);
5105 inode = btrfs_iget(fs_info->sb, ino, root);
5107 * If the other inode that had a conflicting dir entry was
5108 * deleted in the current transaction, we need to log its parent
5111 if (IS_ERR(inode)) {
5112 ret = PTR_ERR(inode);
5113 if (ret == -ENOENT) {
5114 inode = btrfs_iget(fs_info->sb, parent, root);
5115 if (IS_ERR(inode)) {
5116 ret = PTR_ERR(inode);
5118 ret = btrfs_log_inode(trans, root,
5120 LOG_OTHER_INODE_ALL,
5122 btrfs_add_delayed_iput(inode);
5128 * If the inode was already logged skip it - otherwise we can
5129 * hit an infinite loop. Example:
5131 * From the commit root (previous transaction) we have the
5134 * inode 257 a directory
5135 * inode 258 with references "zz" and "zz_link" on inode 257
5136 * inode 259 with reference "a" on inode 257
5138 * And in the current (uncommitted) transaction we have:
5140 * inode 257 a directory, unchanged
5141 * inode 258 with references "a" and "a2" on inode 257
5142 * inode 259 with reference "zz_link" on inode 257
5143 * inode 261 with reference "zz" on inode 257
5145 * When logging inode 261 the following infinite loop could
5146 * happen if we don't skip already logged inodes:
5148 * - we detect inode 258 as a conflicting inode, with inode 261
5149 * on reference "zz", and log it;
5151 * - we detect inode 259 as a conflicting inode, with inode 258
5152 * on reference "a", and log it;
5154 * - we detect inode 258 as a conflicting inode, with inode 259
5155 * on reference "zz_link", and log it - again! After this we
5156 * repeat the above steps forever.
5158 spin_lock(&BTRFS_I(inode)->lock);
5160 * Check the inode's logged_trans only instead of
5161 * btrfs_inode_in_log(). This is because the last_log_commit of
5162 * the inode is not updated when we only log that it exists (see
5163 * btrfs_log_inode()).
5165 if (BTRFS_I(inode)->logged_trans == trans->transid) {
5166 spin_unlock(&BTRFS_I(inode)->lock);
5167 btrfs_add_delayed_iput(inode);
5170 spin_unlock(&BTRFS_I(inode)->lock);
5172 * We are safe logging the other inode without acquiring its
5173 * lock as long as we log with the LOG_INODE_EXISTS mode. We
5174 * are safe against concurrent renames of the other inode as
5175 * well because during a rename we pin the log and update the
5176 * log with the new name before we unpin it.
5178 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5179 LOG_OTHER_INODE, ctx);
5181 btrfs_add_delayed_iput(inode);
5186 key.type = BTRFS_INODE_REF_KEY;
5188 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5190 btrfs_add_delayed_iput(inode);
5195 struct extent_buffer *leaf = path->nodes[0];
5196 int slot = path->slots[0];
5198 u64 other_parent = 0;
5200 if (slot >= btrfs_header_nritems(leaf)) {
5201 ret = btrfs_next_leaf(root, path);
5204 } else if (ret > 0) {
5211 btrfs_item_key_to_cpu(leaf, &key, slot);
5212 if (key.objectid != ino ||
5213 (key.type != BTRFS_INODE_REF_KEY &&
5214 key.type != BTRFS_INODE_EXTREF_KEY)) {
5219 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5220 BTRFS_I(inode), &other_ino,
5225 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5230 ino_elem->ino = other_ino;
5231 ino_elem->parent = other_parent;
5232 list_add_tail(&ino_elem->list, &inode_list);
5237 btrfs_add_delayed_iput(inode);
5243 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5244 struct btrfs_inode *inode,
5245 struct btrfs_key *min_key,
5246 const struct btrfs_key *max_key,
5247 struct btrfs_path *path,
5248 struct btrfs_path *dst_path,
5249 const u64 logged_isize,
5250 const bool recursive_logging,
5251 const int inode_only,
5252 struct btrfs_log_ctx *ctx,
5253 bool *need_log_inode_item)
5255 const u64 i_size = i_size_read(&inode->vfs_inode);
5256 struct btrfs_root *root = inode->root;
5257 int ins_start_slot = 0;
5262 ret = btrfs_search_forward(root, min_key, path, trans->transid);
5270 /* Note, ins_nr might be > 0 here, cleanup outside the loop */
5271 if (min_key->objectid != max_key->objectid)
5273 if (min_key->type > max_key->type)
5276 if (min_key->type == BTRFS_INODE_ITEM_KEY) {
5277 *need_log_inode_item = false;
5278 } else if (min_key->type == BTRFS_EXTENT_DATA_KEY &&
5279 min_key->offset >= i_size) {
5281 * Extents at and beyond eof are logged with
5282 * btrfs_log_prealloc_extents().
5283 * Only regular files have BTRFS_EXTENT_DATA_KEY keys,
5284 * and no keys greater than that, so bail out.
5287 } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
5288 min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5289 inode->generation == trans->transid &&
5290 !recursive_logging) {
5292 u64 other_parent = 0;
5294 ret = btrfs_check_ref_name_override(path->nodes[0],
5295 path->slots[0], min_key, inode,
5296 &other_ino, &other_parent);
5299 } else if (ret > 0 && ctx &&
5300 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5305 ins_start_slot = path->slots[0];
5307 ret = copy_items(trans, inode, dst_path, path,
5308 ins_start_slot, ins_nr,
5309 inode_only, logged_isize);
5314 ret = log_conflicting_inodes(trans, root, path,
5315 ctx, other_ino, other_parent);
5318 btrfs_release_path(path);
5321 } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5322 /* Skip xattrs, logged later with btrfs_log_all_xattrs() */
5325 ret = copy_items(trans, inode, dst_path, path,
5327 ins_nr, inode_only, logged_isize);
5334 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5337 } else if (!ins_nr) {
5338 ins_start_slot = path->slots[0];
5343 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5344 ins_nr, inode_only, logged_isize);
5348 ins_start_slot = path->slots[0];
5351 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5352 btrfs_item_key_to_cpu(path->nodes[0], min_key,
5357 ret = copy_items(trans, inode, dst_path, path,
5358 ins_start_slot, ins_nr, inode_only,
5364 btrfs_release_path(path);
5366 if (min_key->offset < (u64)-1) {
5368 } else if (min_key->type < max_key->type) {
5370 min_key->offset = 0;
5376 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5377 ins_nr, inode_only, logged_isize);
5382 if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) {
5384 * Release the path because otherwise we might attempt to double
5385 * lock the same leaf with btrfs_log_prealloc_extents() below.
5387 btrfs_release_path(path);
5388 ret = btrfs_log_prealloc_extents(trans, inode, dst_path);
5394 /* log a single inode in the tree log.
5395 * At least one parent directory for this inode must exist in the tree
5396 * or be logged already.
5398 * Any items from this inode changed by the current transaction are copied
5399 * to the log tree. An extra reference is taken on any extents in this
5400 * file, allowing us to avoid a whole pile of corner cases around logging
5401 * blocks that have been removed from the tree.
5403 * See LOG_INODE_ALL and related defines for a description of what inode_only
5406 * This handles both files and directories.
5408 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5409 struct btrfs_root *root, struct btrfs_inode *inode,
5411 struct btrfs_log_ctx *ctx)
5413 struct btrfs_path *path;
5414 struct btrfs_path *dst_path;
5415 struct btrfs_key min_key;
5416 struct btrfs_key max_key;
5417 struct btrfs_root *log = root->log_root;
5420 bool fast_search = false;
5421 u64 ino = btrfs_ino(inode);
5422 struct extent_map_tree *em_tree = &inode->extent_tree;
5423 u64 logged_isize = 0;
5424 bool need_log_inode_item = true;
5425 bool xattrs_logged = false;
5426 bool recursive_logging = false;
5427 bool inode_item_dropped = true;
5429 path = btrfs_alloc_path();
5432 dst_path = btrfs_alloc_path();
5434 btrfs_free_path(path);
5438 min_key.objectid = ino;
5439 min_key.type = BTRFS_INODE_ITEM_KEY;
5442 max_key.objectid = ino;
5445 /* today the code can only do partial logging of directories */
5446 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5447 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5448 &inode->runtime_flags) &&
5449 inode_only >= LOG_INODE_EXISTS))
5450 max_key.type = BTRFS_XATTR_ITEM_KEY;
5452 max_key.type = (u8)-1;
5453 max_key.offset = (u64)-1;
5456 * Only run delayed items if we are a directory. We want to make sure
5457 * all directory indexes hit the fs/subvolume tree so we can find them
5458 * and figure out which index ranges have to be logged.
5460 * Otherwise commit the delayed inode only if the full sync flag is set,
5461 * as we want to make sure an up to date version is in the subvolume
5462 * tree so copy_inode_items_to_log() / copy_items() can find it and copy
5463 * it to the log tree. For a non full sync, we always log the inode item
5464 * based on the in-memory struct btrfs_inode which is always up to date.
5466 if (S_ISDIR(inode->vfs_inode.i_mode))
5467 ret = btrfs_commit_inode_delayed_items(trans, inode);
5468 else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5469 ret = btrfs_commit_inode_delayed_inode(inode);
5472 btrfs_free_path(path);
5473 btrfs_free_path(dst_path);
5477 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5478 recursive_logging = true;
5479 if (inode_only == LOG_OTHER_INODE)
5480 inode_only = LOG_INODE_EXISTS;
5482 inode_only = LOG_INODE_ALL;
5483 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5485 mutex_lock(&inode->log_mutex);
5489 * For symlinks, we must always log their content, which is stored in an
5490 * inline extent, otherwise we could end up with an empty symlink after
5491 * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
5492 * one attempts to create an empty symlink).
5493 * We don't need to worry about flushing delalloc, because when we create
5494 * the inline extent when the symlink is created (we never have delalloc
5497 if (S_ISLNK(inode->vfs_inode.i_mode))
5498 inode_only = LOG_INODE_ALL;
5501 * This is for cases where logging a directory could result in losing a
5502 * a file after replaying the log. For example, if we move a file from a
5503 * directory A to a directory B, then fsync directory A, we have no way
5504 * to known the file was moved from A to B, so logging just A would
5505 * result in losing the file after a log replay.
5507 if (S_ISDIR(inode->vfs_inode.i_mode) &&
5508 inode_only == LOG_INODE_ALL &&
5509 inode->last_unlink_trans >= trans->transid) {
5510 btrfs_set_log_full_commit(trans);
5516 * a brute force approach to making sure we get the most uptodate
5517 * copies of everything.
5519 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5520 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5522 clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
5523 if (inode_only == LOG_INODE_EXISTS)
5524 max_key_type = BTRFS_XATTR_ITEM_KEY;
5525 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5527 if (inode_only == LOG_INODE_EXISTS) {
5529 * Make sure the new inode item we write to the log has
5530 * the same isize as the current one (if it exists).
5531 * This is necessary to prevent data loss after log
5532 * replay, and also to prevent doing a wrong expanding
5533 * truncate - for e.g. create file, write 4K into offset
5534 * 0, fsync, write 4K into offset 4096, add hard link,
5535 * fsync some other file (to sync log), power fail - if
5536 * we use the inode's current i_size, after log replay
5537 * we get a 8Kb file, with the last 4Kb extent as a hole
5538 * (zeroes), as if an expanding truncate happened,
5539 * instead of getting a file of 4Kb only.
5541 err = logged_inode_size(log, inode, path, &logged_isize);
5545 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5546 &inode->runtime_flags)) {
5547 if (inode_only == LOG_INODE_EXISTS) {
5548 max_key.type = BTRFS_XATTR_ITEM_KEY;
5549 ret = drop_objectid_items(trans, log, path, ino,
5552 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5553 &inode->runtime_flags);
5554 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5555 &inode->runtime_flags);
5557 ret = btrfs_truncate_inode_items(trans,
5558 log, inode, 0, 0, NULL);
5563 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5564 &inode->runtime_flags) ||
5565 inode_only == LOG_INODE_EXISTS) {
5566 if (inode_only == LOG_INODE_ALL)
5568 max_key.type = BTRFS_XATTR_ITEM_KEY;
5569 ret = drop_objectid_items(trans, log, path, ino,
5572 if (inode_only == LOG_INODE_ALL)
5574 inode_item_dropped = false;
5584 err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5585 path, dst_path, logged_isize,
5586 recursive_logging, inode_only, ctx,
5587 &need_log_inode_item);
5591 btrfs_release_path(path);
5592 btrfs_release_path(dst_path);
5593 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5596 xattrs_logged = true;
5597 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5598 btrfs_release_path(path);
5599 btrfs_release_path(dst_path);
5600 err = btrfs_log_holes(trans, root, inode, path);
5605 btrfs_release_path(path);
5606 btrfs_release_path(dst_path);
5607 if (need_log_inode_item) {
5608 err = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
5612 * If we are doing a fast fsync and the inode was logged before
5613 * in this transaction, we don't need to log the xattrs because
5614 * they were logged before. If xattrs were added, changed or
5615 * deleted since the last time we logged the inode, then we have
5616 * already logged them because the inode had the runtime flag
5617 * BTRFS_INODE_COPY_EVERYTHING set.
5619 if (!xattrs_logged && inode->logged_trans < trans->transid) {
5620 err = btrfs_log_all_xattrs(trans, root, inode, path,
5624 btrfs_release_path(path);
5628 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5634 } else if (inode_only == LOG_INODE_ALL) {
5635 struct extent_map *em, *n;
5637 write_lock(&em_tree->lock);
5638 list_for_each_entry_safe(em, n, &em_tree->modified_extents, list)
5639 list_del_init(&em->list);
5640 write_unlock(&em_tree->lock);
5643 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5644 ret = log_directory_changes(trans, root, inode, path, dst_path,
5653 * If we are logging that an ancestor inode exists as part of logging a
5654 * new name from a link or rename operation, don't mark the inode as
5655 * logged - otherwise if an explicit fsync is made against an ancestor,
5656 * the fsync considers the inode in the log and doesn't sync the log,
5657 * resulting in the ancestor missing after a power failure unless the
5658 * log was synced as part of an fsync against any other unrelated inode.
5659 * So keep it simple for this case and just don't flag the ancestors as
5663 !(S_ISDIR(inode->vfs_inode.i_mode) && ctx->logging_new_name &&
5664 &inode->vfs_inode != ctx->inode)) {
5665 spin_lock(&inode->lock);
5666 inode->logged_trans = trans->transid;
5668 * Don't update last_log_commit if we logged that an inode exists.
5669 * We do this for two reasons:
5671 * 1) We might have had buffered writes to this inode that were
5672 * flushed and had their ordered extents completed in this
5673 * transaction, but we did not previously log the inode with
5674 * LOG_INODE_ALL. Later the inode was evicted and after that
5675 * it was loaded again and this LOG_INODE_EXISTS log operation
5676 * happened. We must make sure that if an explicit fsync against
5677 * the inode is performed later, it logs the new extents, an
5678 * updated inode item, etc, and syncs the log. The same logic
5679 * applies to direct IO writes instead of buffered writes.
5681 * 2) When we log the inode with LOG_INODE_EXISTS, its inode item
5682 * is logged with an i_size of 0 or whatever value was logged
5683 * before. If later the i_size of the inode is increased by a
5684 * truncate operation, the log is synced through an fsync of
5685 * some other inode and then finally an explicit fsync against
5686 * this inode is made, we must make sure this fsync logs the
5687 * inode with the new i_size, the hole between old i_size and
5688 * the new i_size, and syncs the log.
5690 if (inode_only != LOG_INODE_EXISTS)
5691 inode->last_log_commit = inode->last_sub_trans;
5692 spin_unlock(&inode->lock);
5695 mutex_unlock(&inode->log_mutex);
5697 btrfs_free_path(path);
5698 btrfs_free_path(dst_path);
5703 * Check if we need to log an inode. This is used in contexts where while
5704 * logging an inode we need to log another inode (either that it exists or in
5705 * full mode). This is used instead of btrfs_inode_in_log() because the later
5706 * requires the inode to be in the log and have the log transaction committed,
5707 * while here we do not care if the log transaction was already committed - our
5708 * caller will commit the log later - and we want to avoid logging an inode
5709 * multiple times when multiple tasks have joined the same log transaction.
5711 static bool need_log_inode(struct btrfs_trans_handle *trans,
5712 struct btrfs_inode *inode)
5715 * If a directory was not modified, no dentries added or removed, we can
5716 * and should avoid logging it.
5718 if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
5722 * If this inode does not have new/updated/deleted xattrs since the last
5723 * time it was logged and is flagged as logged in the current transaction,
5724 * we can skip logging it. As for new/deleted names, those are updated in
5725 * the log by link/unlink/rename operations.
5726 * In case the inode was logged and then evicted and reloaded, its
5727 * logged_trans will be 0, in which case we have to fully log it since
5728 * logged_trans is a transient field, not persisted.
5730 if (inode->logged_trans == trans->transid &&
5731 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
5737 struct btrfs_dir_list {
5739 struct list_head list;
5743 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5744 * details about the why it is needed.
5745 * This is a recursive operation - if an existing dentry corresponds to a
5746 * directory, that directory's new entries are logged too (same behaviour as
5747 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5748 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5749 * complains about the following circular lock dependency / possible deadlock:
5753 * lock(&type->i_mutex_dir_key#3/2);
5754 * lock(sb_internal#2);
5755 * lock(&type->i_mutex_dir_key#3/2);
5756 * lock(&sb->s_type->i_mutex_key#14);
5758 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5759 * sb_start_intwrite() in btrfs_start_transaction().
5760 * Not locking i_mutex of the inodes is still safe because:
5762 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5763 * that while logging the inode new references (names) are added or removed
5764 * from the inode, leaving the logged inode item with a link count that does
5765 * not match the number of logged inode reference items. This is fine because
5766 * at log replay time we compute the real number of links and correct the
5767 * link count in the inode item (see replay_one_buffer() and
5768 * link_to_fixup_dir());
5770 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5771 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5772 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5773 * has a size that doesn't match the sum of the lengths of all the logged
5774 * names. This does not result in a problem because if a dir_item key is
5775 * logged but its matching dir_index key is not logged, at log replay time we
5776 * don't use it to replay the respective name (see replay_one_name()). On the
5777 * other hand if only the dir_index key ends up being logged, the respective
5778 * name is added to the fs/subvol tree with both the dir_item and dir_index
5779 * keys created (see replay_one_name()).
5780 * The directory's inode item with a wrong i_size is not a problem as well,
5781 * since we don't use it at log replay time to set the i_size in the inode
5782 * item of the fs/subvol tree (see overwrite_item()).
5784 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5785 struct btrfs_root *root,
5786 struct btrfs_inode *start_inode,
5787 struct btrfs_log_ctx *ctx)
5789 struct btrfs_fs_info *fs_info = root->fs_info;
5790 struct btrfs_root *log = root->log_root;
5791 struct btrfs_path *path;
5792 LIST_HEAD(dir_list);
5793 struct btrfs_dir_list *dir_elem;
5796 path = btrfs_alloc_path();
5800 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5802 btrfs_free_path(path);
5805 dir_elem->ino = btrfs_ino(start_inode);
5806 list_add_tail(&dir_elem->list, &dir_list);
5808 while (!list_empty(&dir_list)) {
5809 struct extent_buffer *leaf;
5810 struct btrfs_key min_key;
5814 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5817 goto next_dir_inode;
5819 min_key.objectid = dir_elem->ino;
5820 min_key.type = BTRFS_DIR_ITEM_KEY;
5823 btrfs_release_path(path);
5824 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5826 goto next_dir_inode;
5827 } else if (ret > 0) {
5829 goto next_dir_inode;
5833 leaf = path->nodes[0];
5834 nritems = btrfs_header_nritems(leaf);
5835 for (i = path->slots[0]; i < nritems; i++) {
5836 struct btrfs_dir_item *di;
5837 struct btrfs_key di_key;
5838 struct inode *di_inode;
5839 struct btrfs_dir_list *new_dir_elem;
5840 int log_mode = LOG_INODE_EXISTS;
5843 btrfs_item_key_to_cpu(leaf, &min_key, i);
5844 if (min_key.objectid != dir_elem->ino ||
5845 min_key.type != BTRFS_DIR_ITEM_KEY)
5846 goto next_dir_inode;
5848 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5849 type = btrfs_dir_type(leaf, di);
5850 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5851 type != BTRFS_FT_DIR)
5853 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5854 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5857 btrfs_release_path(path);
5858 di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
5859 if (IS_ERR(di_inode)) {
5860 ret = PTR_ERR(di_inode);
5861 goto next_dir_inode;
5864 if (!need_log_inode(trans, BTRFS_I(di_inode))) {
5865 btrfs_add_delayed_iput(di_inode);
5869 ctx->log_new_dentries = false;
5870 if (type == BTRFS_FT_DIR)
5871 log_mode = LOG_INODE_ALL;
5872 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5874 btrfs_add_delayed_iput(di_inode);
5876 goto next_dir_inode;
5877 if (ctx->log_new_dentries) {
5878 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5880 if (!new_dir_elem) {
5882 goto next_dir_inode;
5884 new_dir_elem->ino = di_key.objectid;
5885 list_add_tail(&new_dir_elem->list, &dir_list);
5890 ret = btrfs_next_leaf(log, path);
5892 goto next_dir_inode;
5893 } else if (ret > 0) {
5895 goto next_dir_inode;
5899 if (min_key.offset < (u64)-1) {
5904 list_del(&dir_elem->list);
5908 btrfs_free_path(path);
5912 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5913 struct btrfs_inode *inode,
5914 struct btrfs_log_ctx *ctx)
5916 struct btrfs_fs_info *fs_info = trans->fs_info;
5918 struct btrfs_path *path;
5919 struct btrfs_key key;
5920 struct btrfs_root *root = inode->root;
5921 const u64 ino = btrfs_ino(inode);
5923 path = btrfs_alloc_path();
5926 path->skip_locking = 1;
5927 path->search_commit_root = 1;
5930 key.type = BTRFS_INODE_REF_KEY;
5932 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5937 struct extent_buffer *leaf = path->nodes[0];
5938 int slot = path->slots[0];
5943 if (slot >= btrfs_header_nritems(leaf)) {
5944 ret = btrfs_next_leaf(root, path);
5952 btrfs_item_key_to_cpu(leaf, &key, slot);
5953 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5954 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5957 item_size = btrfs_item_size_nr(leaf, slot);
5958 ptr = btrfs_item_ptr_offset(leaf, slot);
5959 while (cur_offset < item_size) {
5960 struct btrfs_key inode_key;
5961 struct inode *dir_inode;
5963 inode_key.type = BTRFS_INODE_ITEM_KEY;
5964 inode_key.offset = 0;
5966 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5967 struct btrfs_inode_extref *extref;
5969 extref = (struct btrfs_inode_extref *)
5971 inode_key.objectid = btrfs_inode_extref_parent(
5973 cur_offset += sizeof(*extref);
5974 cur_offset += btrfs_inode_extref_name_len(leaf,
5977 inode_key.objectid = key.offset;
5978 cur_offset = item_size;
5981 dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
5984 * If the parent inode was deleted, return an error to
5985 * fallback to a transaction commit. This is to prevent
5986 * getting an inode that was moved from one parent A to
5987 * a parent B, got its former parent A deleted and then
5988 * it got fsync'ed, from existing at both parents after
5989 * a log replay (and the old parent still existing).
5996 * mv /mnt/B/bar /mnt/A/bar
5997 * mv -T /mnt/A /mnt/B
6001 * If we ignore the old parent B which got deleted,
6002 * after a log replay we would have file bar linked
6003 * at both parents and the old parent B would still
6006 if (IS_ERR(dir_inode)) {
6007 ret = PTR_ERR(dir_inode);
6011 if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
6012 btrfs_add_delayed_iput(dir_inode);
6017 ctx->log_new_dentries = false;
6018 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
6019 LOG_INODE_ALL, ctx);
6020 if (!ret && ctx && ctx->log_new_dentries)
6021 ret = log_new_dir_dentries(trans, root,
6022 BTRFS_I(dir_inode), ctx);
6023 btrfs_add_delayed_iput(dir_inode);
6031 btrfs_free_path(path);
6035 static int log_new_ancestors(struct btrfs_trans_handle *trans,
6036 struct btrfs_root *root,
6037 struct btrfs_path *path,
6038 struct btrfs_log_ctx *ctx)
6040 struct btrfs_key found_key;
6042 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
6045 struct btrfs_fs_info *fs_info = root->fs_info;
6046 struct extent_buffer *leaf = path->nodes[0];
6047 int slot = path->slots[0];
6048 struct btrfs_key search_key;
6049 struct inode *inode;
6053 btrfs_release_path(path);
6055 ino = found_key.offset;
6057 search_key.objectid = found_key.offset;
6058 search_key.type = BTRFS_INODE_ITEM_KEY;
6059 search_key.offset = 0;
6060 inode = btrfs_iget(fs_info->sb, ino, root);
6062 return PTR_ERR(inode);
6064 if (BTRFS_I(inode)->generation >= trans->transid &&
6065 need_log_inode(trans, BTRFS_I(inode)))
6066 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
6067 LOG_INODE_EXISTS, ctx);
6068 btrfs_add_delayed_iput(inode);
6072 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
6075 search_key.type = BTRFS_INODE_REF_KEY;
6076 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6080 leaf = path->nodes[0];
6081 slot = path->slots[0];
6082 if (slot >= btrfs_header_nritems(leaf)) {
6083 ret = btrfs_next_leaf(root, path);
6088 leaf = path->nodes[0];
6089 slot = path->slots[0];
6092 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6093 if (found_key.objectid != search_key.objectid ||
6094 found_key.type != BTRFS_INODE_REF_KEY)
6100 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
6101 struct btrfs_inode *inode,
6102 struct dentry *parent,
6103 struct btrfs_log_ctx *ctx)
6105 struct btrfs_root *root = inode->root;
6106 struct dentry *old_parent = NULL;
6107 struct super_block *sb = inode->vfs_inode.i_sb;
6111 if (!parent || d_really_is_negative(parent) ||
6115 inode = BTRFS_I(d_inode(parent));
6116 if (root != inode->root)
6119 if (inode->generation >= trans->transid &&
6120 need_log_inode(trans, inode)) {
6121 ret = btrfs_log_inode(trans, root, inode,
6122 LOG_INODE_EXISTS, ctx);
6126 if (IS_ROOT(parent))
6129 parent = dget_parent(parent);
6131 old_parent = parent;
6138 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
6139 struct btrfs_inode *inode,
6140 struct dentry *parent,
6141 struct btrfs_log_ctx *ctx)
6143 struct btrfs_root *root = inode->root;
6144 const u64 ino = btrfs_ino(inode);
6145 struct btrfs_path *path;
6146 struct btrfs_key search_key;
6150 * For a single hard link case, go through a fast path that does not
6151 * need to iterate the fs/subvolume tree.
6153 if (inode->vfs_inode.i_nlink < 2)
6154 return log_new_ancestors_fast(trans, inode, parent, ctx);
6156 path = btrfs_alloc_path();
6160 search_key.objectid = ino;
6161 search_key.type = BTRFS_INODE_REF_KEY;
6162 search_key.offset = 0;
6164 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6171 struct extent_buffer *leaf = path->nodes[0];
6172 int slot = path->slots[0];
6173 struct btrfs_key found_key;
6175 if (slot >= btrfs_header_nritems(leaf)) {
6176 ret = btrfs_next_leaf(root, path);
6184 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6185 if (found_key.objectid != ino ||
6186 found_key.type > BTRFS_INODE_EXTREF_KEY)
6190 * Don't deal with extended references because they are rare
6191 * cases and too complex to deal with (we would need to keep
6192 * track of which subitem we are processing for each item in
6193 * this loop, etc). So just return some error to fallback to
6194 * a transaction commit.
6196 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6202 * Logging ancestors needs to do more searches on the fs/subvol
6203 * tree, so it releases the path as needed to avoid deadlocks.
6204 * Keep track of the last inode ref key and resume from that key
6205 * after logging all new ancestors for the current hard link.
6207 memcpy(&search_key, &found_key, sizeof(search_key));
6209 ret = log_new_ancestors(trans, root, path, ctx);
6212 btrfs_release_path(path);
6217 btrfs_free_path(path);
6222 * helper function around btrfs_log_inode to make sure newly created
6223 * parent directories also end up in the log. A minimal inode and backref
6224 * only logging is done of any parent directories that are older than
6225 * the last committed transaction
6227 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6228 struct btrfs_inode *inode,
6229 struct dentry *parent,
6231 struct btrfs_log_ctx *ctx)
6233 struct btrfs_root *root = inode->root;
6234 struct btrfs_fs_info *fs_info = root->fs_info;
6236 bool log_dentries = false;
6238 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6243 if (btrfs_root_refs(&root->root_item) == 0) {
6249 * Skip already logged inodes or inodes corresponding to tmpfiles
6250 * (since logging them is pointless, a link count of 0 means they
6251 * will never be accessible).
6253 if ((btrfs_inode_in_log(inode, trans->transid) &&
6254 list_empty(&ctx->ordered_extents)) ||
6255 inode->vfs_inode.i_nlink == 0) {
6256 ret = BTRFS_NO_LOG_SYNC;
6260 ret = start_log_trans(trans, root, ctx);
6264 ret = btrfs_log_inode(trans, root, inode, inode_only, ctx);
6269 * for regular files, if its inode is already on disk, we don't
6270 * have to worry about the parents at all. This is because
6271 * we can use the last_unlink_trans field to record renames
6272 * and other fun in this file.
6274 if (S_ISREG(inode->vfs_inode.i_mode) &&
6275 inode->generation < trans->transid &&
6276 inode->last_unlink_trans < trans->transid) {
6281 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
6282 log_dentries = true;
6285 * On unlink we must make sure all our current and old parent directory
6286 * inodes are fully logged. This is to prevent leaving dangling
6287 * directory index entries in directories that were our parents but are
6288 * not anymore. Not doing this results in old parent directory being
6289 * impossible to delete after log replay (rmdir will always fail with
6290 * error -ENOTEMPTY).
6296 * ln testdir/foo testdir/bar
6298 * unlink testdir/bar
6299 * xfs_io -c fsync testdir/foo
6301 * mount fs, triggers log replay
6303 * If we don't log the parent directory (testdir), after log replay the
6304 * directory still has an entry pointing to the file inode using the bar
6305 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6306 * the file inode has a link count of 1.
6312 * ln foo testdir/foo2
6313 * ln foo testdir/foo3
6315 * unlink testdir/foo3
6316 * xfs_io -c fsync foo
6318 * mount fs, triggers log replay
6320 * Similar as the first example, after log replay the parent directory
6321 * testdir still has an entry pointing to the inode file with name foo3
6322 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6323 * and has a link count of 2.
6325 if (inode->last_unlink_trans >= trans->transid) {
6326 ret = btrfs_log_all_parents(trans, inode, ctx);
6331 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6336 ret = log_new_dir_dentries(trans, root, inode, ctx);
6341 btrfs_set_log_full_commit(trans);
6346 btrfs_remove_log_ctx(root, ctx);
6347 btrfs_end_log_trans(root);
6353 * it is not safe to log dentry if the chunk root has added new
6354 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6355 * If this returns 1, you must commit the transaction to safely get your
6358 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6359 struct dentry *dentry,
6360 struct btrfs_log_ctx *ctx)
6362 struct dentry *parent = dget_parent(dentry);
6365 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6366 LOG_INODE_ALL, ctx);
6373 * should be called during mount to recover any replay any log trees
6376 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6379 struct btrfs_path *path;
6380 struct btrfs_trans_handle *trans;
6381 struct btrfs_key key;
6382 struct btrfs_key found_key;
6383 struct btrfs_root *log;
6384 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6385 struct walk_control wc = {
6386 .process_func = process_one_buffer,
6387 .stage = LOG_WALK_PIN_ONLY,
6390 path = btrfs_alloc_path();
6394 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6396 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6397 if (IS_ERR(trans)) {
6398 ret = PTR_ERR(trans);
6405 ret = walk_log_tree(trans, log_root_tree, &wc);
6407 btrfs_handle_fs_error(fs_info, ret,
6408 "Failed to pin buffers while recovering log root tree.");
6413 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6414 key.offset = (u64)-1;
6415 key.type = BTRFS_ROOT_ITEM_KEY;
6418 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6421 btrfs_handle_fs_error(fs_info, ret,
6422 "Couldn't find tree log root.");
6426 if (path->slots[0] == 0)
6430 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6432 btrfs_release_path(path);
6433 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6436 log = btrfs_read_tree_root(log_root_tree, &found_key);
6439 btrfs_handle_fs_error(fs_info, ret,
6440 "Couldn't read tree log root.");
6444 wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
6446 if (IS_ERR(wc.replay_dest)) {
6447 ret = PTR_ERR(wc.replay_dest);
6450 * We didn't find the subvol, likely because it was
6451 * deleted. This is ok, simply skip this log and go to
6454 * We need to exclude the root because we can't have
6455 * other log replays overwriting this log as we'll read
6456 * it back in a few more times. This will keep our
6457 * block from being modified, and we'll just bail for
6458 * each subsequent pass.
6461 ret = btrfs_pin_extent_for_log_replay(trans,
6464 btrfs_put_root(log);
6468 btrfs_handle_fs_error(fs_info, ret,
6469 "Couldn't read target root for tree log recovery.");
6473 wc.replay_dest->log_root = log;
6474 ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
6476 /* The loop needs to continue due to the root refs */
6477 btrfs_handle_fs_error(fs_info, ret,
6478 "failed to record the log root in transaction");
6480 ret = walk_log_tree(trans, log, &wc);
6482 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6483 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6487 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6488 struct btrfs_root *root = wc.replay_dest;
6490 btrfs_release_path(path);
6493 * We have just replayed everything, and the highest
6494 * objectid of fs roots probably has changed in case
6495 * some inode_item's got replayed.
6497 * root->objectid_mutex is not acquired as log replay
6498 * could only happen during mount.
6500 ret = btrfs_init_root_free_objectid(root);
6503 wc.replay_dest->log_root = NULL;
6504 btrfs_put_root(wc.replay_dest);
6505 btrfs_put_root(log);
6510 if (found_key.offset == 0)
6512 key.offset = found_key.offset - 1;
6514 btrfs_release_path(path);
6516 /* step one is to pin it all, step two is to replay just inodes */
6519 wc.process_func = replay_one_buffer;
6520 wc.stage = LOG_WALK_REPLAY_INODES;
6523 /* step three is to replay everything */
6524 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6529 btrfs_free_path(path);
6531 /* step 4: commit the transaction, which also unpins the blocks */
6532 ret = btrfs_commit_transaction(trans);
6536 log_root_tree->log_root = NULL;
6537 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6538 btrfs_put_root(log_root_tree);
6543 btrfs_end_transaction(wc.trans);
6544 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6545 btrfs_free_path(path);
6550 * there are some corner cases where we want to force a full
6551 * commit instead of allowing a directory to be logged.
6553 * They revolve around files there were unlinked from the directory, and
6554 * this function updates the parent directory so that a full commit is
6555 * properly done if it is fsync'd later after the unlinks are done.
6557 * Must be called before the unlink operations (updates to the subvolume tree,
6558 * inodes, etc) are done.
6560 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6561 struct btrfs_inode *dir, struct btrfs_inode *inode,
6565 * when we're logging a file, if it hasn't been renamed
6566 * or unlinked, and its inode is fully committed on disk,
6567 * we don't have to worry about walking up the directory chain
6568 * to log its parents.
6570 * So, we use the last_unlink_trans field to put this transid
6571 * into the file. When the file is logged we check it and
6572 * don't log the parents if the file is fully on disk.
6574 mutex_lock(&inode->log_mutex);
6575 inode->last_unlink_trans = trans->transid;
6576 mutex_unlock(&inode->log_mutex);
6579 * if this directory was already logged any new
6580 * names for this file/dir will get recorded
6582 if (dir->logged_trans == trans->transid)
6586 * if the inode we're about to unlink was logged,
6587 * the log will be properly updated for any new names
6589 if (inode->logged_trans == trans->transid)
6593 * when renaming files across directories, if the directory
6594 * there we're unlinking from gets fsync'd later on, there's
6595 * no way to find the destination directory later and fsync it
6596 * properly. So, we have to be conservative and force commits
6597 * so the new name gets discovered.
6602 /* we can safely do the unlink without any special recording */
6606 mutex_lock(&dir->log_mutex);
6607 dir->last_unlink_trans = trans->transid;
6608 mutex_unlock(&dir->log_mutex);
6612 * Make sure that if someone attempts to fsync the parent directory of a deleted
6613 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6614 * that after replaying the log tree of the parent directory's root we will not
6615 * see the snapshot anymore and at log replay time we will not see any log tree
6616 * corresponding to the deleted snapshot's root, which could lead to replaying
6617 * it after replaying the log tree of the parent directory (which would replay
6618 * the snapshot delete operation).
6620 * Must be called before the actual snapshot destroy operation (updates to the
6621 * parent root and tree of tree roots trees, etc) are done.
6623 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6624 struct btrfs_inode *dir)
6626 mutex_lock(&dir->log_mutex);
6627 dir->last_unlink_trans = trans->transid;
6628 mutex_unlock(&dir->log_mutex);
6632 * Update the log after adding a new name for an inode.
6634 * @trans: Transaction handle.
6635 * @old_dentry: The dentry associated with the old name and the old
6637 * @old_dir: The inode of the previous parent directory for the case
6638 * of a rename. For a link operation, it must be NULL.
6639 * @parent: The dentry associated with the directory under which the
6640 * new name is located.
6642 * Call this after adding a new name for an inode, as a result of a link or
6643 * rename operation, and it will properly update the log to reflect the new name.
6645 void btrfs_log_new_name(struct btrfs_trans_handle *trans,
6646 struct dentry *old_dentry, struct btrfs_inode *old_dir,
6647 struct dentry *parent)
6649 struct btrfs_inode *inode = BTRFS_I(d_inode(old_dentry));
6650 struct btrfs_log_ctx ctx;
6653 * this will force the logging code to walk the dentry chain
6656 if (!S_ISDIR(inode->vfs_inode.i_mode))
6657 inode->last_unlink_trans = trans->transid;
6660 * if this inode hasn't been logged and directory we're renaming it
6661 * from hasn't been logged, we don't need to log it
6663 if (!inode_logged(trans, inode) &&
6664 (!old_dir || !inode_logged(trans, old_dir)))
6668 * If we are doing a rename (old_dir is not NULL) from a directory that
6669 * was previously logged, make sure the next log attempt on the directory
6670 * is not skipped and logs the inode again. This is because the log may
6671 * not currently be authoritative for a range including the old
6672 * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
6673 * sure after a log replay we do not end up with both the new and old
6674 * dentries around (in case the inode is a directory we would have a
6675 * directory with two hard links and 2 inode references for different
6676 * parents). The next log attempt of old_dir will happen at
6677 * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
6678 * below, because we have previously set inode->last_unlink_trans to the
6679 * current transaction ID, either here or at btrfs_record_unlink_dir() in
6680 * case inode is a directory.
6683 old_dir->logged_trans = 0;
6685 btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
6686 ctx.logging_new_name = true;
6688 * We don't care about the return value. If we fail to log the new name
6689 * then we know the next attempt to sync the log will fallback to a full
6690 * transaction commit (due to a call to btrfs_set_log_full_commit()), so
6691 * we don't need to worry about getting a log committed that has an
6692 * inconsistent state after a rename operation.
6694 btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);