2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
29 #include "compression.h"
32 /* magic values for the inode_only field in btrfs_log_inode:
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
40 #define LOG_OTHER_INODE 2
43 * directory trouble cases
45 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
46 * log, we must force a full commit before doing an fsync of the directory
47 * where the unlink was done.
48 * ---> record transid of last unlink/rename per directory
52 * rename foo/some_dir foo2/some_dir
54 * fsync foo/some_dir/some_file
56 * The fsync above will unlink the original some_dir without recording
57 * it in its new location (foo2). After a crash, some_dir will be gone
58 * unless the fsync of some_file forces a full commit
60 * 2) we must log any new names for any file or dir that is in the fsync
61 * log. ---> check inode while renaming/linking.
63 * 2a) we must log any new names for any file or dir during rename
64 * when the directory they are being removed from was logged.
65 * ---> check inode and old parent dir during rename
67 * 2a is actually the more important variant. With the extra logging
68 * a crash might unlink the old name without recreating the new one
70 * 3) after a crash, we must go through any directories with a link count
71 * of zero and redo the rm -rf
78 * The directory f1 was fully removed from the FS, but fsync was never
79 * called on f1, only its parent dir. After a crash the rm -rf must
80 * be replayed. This must be able to recurse down the entire
81 * directory tree. The inode link count fixup code takes care of the
86 * stages for the tree walking. The first
87 * stage (0) is to only pin down the blocks we find
88 * the second stage (1) is to make sure that all the inodes
89 * we find in the log are created in the subvolume.
91 * The last stage is to deal with directories and links and extents
92 * and all the other fun semantics
94 #define LOG_WALK_PIN_ONLY 0
95 #define LOG_WALK_REPLAY_INODES 1
96 #define LOG_WALK_REPLAY_DIR_INDEX 2
97 #define LOG_WALK_REPLAY_ALL 3
99 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
100 struct btrfs_root *root, struct btrfs_inode *inode,
104 struct btrfs_log_ctx *ctx);
105 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
107 struct btrfs_path *path, u64 objectid);
108 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
109 struct btrfs_root *root,
110 struct btrfs_root *log,
111 struct btrfs_path *path,
112 u64 dirid, int del_all);
115 * tree logging is a special write ahead log used to make sure that
116 * fsyncs and O_SYNCs can happen without doing full tree commits.
118 * Full tree commits are expensive because they require commonly
119 * modified blocks to be recowed, creating many dirty pages in the
120 * extent tree an 4x-6x higher write load than ext3.
122 * Instead of doing a tree commit on every fsync, we use the
123 * key ranges and transaction ids to find items for a given file or directory
124 * that have changed in this transaction. Those items are copied into
125 * a special tree (one per subvolume root), that tree is written to disk
126 * and then the fsync is considered complete.
128 * After a crash, items are copied out of the log-tree back into the
129 * subvolume tree. Any file data extents found are recorded in the extent
130 * allocation tree, and the log-tree freed.
132 * The log tree is read three times, once to pin down all the extents it is
133 * using in ram and once, once to create all the inodes logged in the tree
134 * and once to do all the other items.
138 * start a sub transaction and setup the log tree
139 * this increments the log tree writer count to make the people
140 * syncing the tree wait for us to finish
142 static int start_log_trans(struct btrfs_trans_handle *trans,
143 struct btrfs_root *root,
144 struct btrfs_log_ctx *ctx)
146 struct btrfs_fs_info *fs_info = root->fs_info;
149 mutex_lock(&root->log_mutex);
151 if (root->log_root) {
152 if (btrfs_need_log_full_commit(fs_info, trans)) {
157 if (!root->log_start_pid) {
158 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
159 root->log_start_pid = current->pid;
160 } else if (root->log_start_pid != current->pid) {
161 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
164 mutex_lock(&fs_info->tree_log_mutex);
165 if (!fs_info->log_root_tree)
166 ret = btrfs_init_log_root_tree(trans, fs_info);
167 mutex_unlock(&fs_info->tree_log_mutex);
171 ret = btrfs_add_log_tree(trans, root);
175 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
176 root->log_start_pid = current->pid;
179 atomic_inc(&root->log_batch);
180 atomic_inc(&root->log_writers);
182 int index = root->log_transid % 2;
183 list_add_tail(&ctx->list, &root->log_ctxs[index]);
184 ctx->log_transid = root->log_transid;
188 mutex_unlock(&root->log_mutex);
193 * returns 0 if there was a log transaction running and we were able
194 * to join, or returns -ENOENT if there were not transactions
197 static int join_running_log_trans(struct btrfs_root *root)
205 mutex_lock(&root->log_mutex);
206 if (root->log_root) {
208 atomic_inc(&root->log_writers);
210 mutex_unlock(&root->log_mutex);
215 * This either makes the current running log transaction wait
216 * until you call btrfs_end_log_trans() or it makes any future
217 * log transactions wait until you call btrfs_end_log_trans()
219 int btrfs_pin_log_trans(struct btrfs_root *root)
223 mutex_lock(&root->log_mutex);
224 atomic_inc(&root->log_writers);
225 mutex_unlock(&root->log_mutex);
230 * indicate we're done making changes to the log tree
231 * and wake up anyone waiting to do a sync
233 void btrfs_end_log_trans(struct btrfs_root *root)
235 if (atomic_dec_and_test(&root->log_writers)) {
237 * Implicit memory barrier after atomic_dec_and_test
239 if (waitqueue_active(&root->log_writer_wait))
240 wake_up(&root->log_writer_wait);
246 * the walk control struct is used to pass state down the chain when
247 * processing the log tree. The stage field tells us which part
248 * of the log tree processing we are currently doing. The others
249 * are state fields used for that specific part
251 struct walk_control {
252 /* should we free the extent on disk when done? This is used
253 * at transaction commit time while freeing a log tree
257 /* should we write out the extent buffer? This is used
258 * while flushing the log tree to disk during a sync
262 /* should we wait for the extent buffer io to finish? Also used
263 * while flushing the log tree to disk for a sync
267 /* pin only walk, we record which extents on disk belong to the
272 /* what stage of the replay code we're currently in */
275 /* the root we are currently replaying */
276 struct btrfs_root *replay_dest;
278 /* the trans handle for the current replay */
279 struct btrfs_trans_handle *trans;
281 /* the function that gets used to process blocks we find in the
282 * tree. Note the extent_buffer might not be up to date when it is
283 * passed in, and it must be checked or read if you need the data
286 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
287 struct walk_control *wc, u64 gen);
291 * process_func used to pin down extents, write them or wait on them
293 static int process_one_buffer(struct btrfs_root *log,
294 struct extent_buffer *eb,
295 struct walk_control *wc, u64 gen)
297 struct btrfs_fs_info *fs_info = log->fs_info;
301 * If this fs is mixed then we need to be able to process the leaves to
302 * pin down any logged extents, so we have to read the block.
304 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
305 ret = btrfs_read_buffer(eb, gen);
311 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
314 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
315 if (wc->pin && btrfs_header_level(eb) == 0)
316 ret = btrfs_exclude_logged_extents(fs_info, eb);
318 btrfs_write_tree_block(eb);
320 btrfs_wait_tree_block_writeback(eb);
326 * Item overwrite used by replay and tree logging. eb, slot and key all refer
327 * to the src data we are copying out.
329 * root is the tree we are copying into, and path is a scratch
330 * path for use in this function (it should be released on entry and
331 * will be released on exit).
333 * If the key is already in the destination tree the existing item is
334 * overwritten. If the existing item isn't big enough, it is extended.
335 * If it is too large, it is truncated.
337 * If the key isn't in the destination yet, a new item is inserted.
339 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
340 struct btrfs_root *root,
341 struct btrfs_path *path,
342 struct extent_buffer *eb, int slot,
343 struct btrfs_key *key)
345 struct btrfs_fs_info *fs_info = root->fs_info;
348 u64 saved_i_size = 0;
349 int save_old_i_size = 0;
350 unsigned long src_ptr;
351 unsigned long dst_ptr;
352 int overwrite_root = 0;
353 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
355 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
358 item_size = btrfs_item_size_nr(eb, slot);
359 src_ptr = btrfs_item_ptr_offset(eb, slot);
361 /* look for the key in the destination tree */
362 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
369 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
371 if (dst_size != item_size)
374 if (item_size == 0) {
375 btrfs_release_path(path);
378 dst_copy = kmalloc(item_size, GFP_NOFS);
379 src_copy = kmalloc(item_size, GFP_NOFS);
380 if (!dst_copy || !src_copy) {
381 btrfs_release_path(path);
387 read_extent_buffer(eb, src_copy, src_ptr, item_size);
389 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
390 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
392 ret = memcmp(dst_copy, src_copy, item_size);
397 * they have the same contents, just return, this saves
398 * us from cowing blocks in the destination tree and doing
399 * extra writes that may not have been done by a previous
403 btrfs_release_path(path);
408 * We need to load the old nbytes into the inode so when we
409 * replay the extents we've logged we get the right nbytes.
412 struct btrfs_inode_item *item;
416 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
417 struct btrfs_inode_item);
418 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
419 item = btrfs_item_ptr(eb, slot,
420 struct btrfs_inode_item);
421 btrfs_set_inode_nbytes(eb, item, nbytes);
424 * If this is a directory we need to reset the i_size to
425 * 0 so that we can set it up properly when replaying
426 * the rest of the items in this log.
428 mode = btrfs_inode_mode(eb, item);
430 btrfs_set_inode_size(eb, item, 0);
432 } else if (inode_item) {
433 struct btrfs_inode_item *item;
437 * New inode, set nbytes to 0 so that the nbytes comes out
438 * properly when we replay the extents.
440 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
441 btrfs_set_inode_nbytes(eb, item, 0);
444 * If this is a directory we need to reset the i_size to 0 so
445 * that we can set it up properly when replaying the rest of
446 * the items in this log.
448 mode = btrfs_inode_mode(eb, item);
450 btrfs_set_inode_size(eb, item, 0);
453 btrfs_release_path(path);
454 /* try to insert the key into the destination tree */
455 path->skip_release_on_error = 1;
456 ret = btrfs_insert_empty_item(trans, root, path,
458 path->skip_release_on_error = 0;
460 /* make sure any existing item is the correct size */
461 if (ret == -EEXIST || ret == -EOVERFLOW) {
463 found_size = btrfs_item_size_nr(path->nodes[0],
465 if (found_size > item_size)
466 btrfs_truncate_item(fs_info, path, item_size, 1);
467 else if (found_size < item_size)
468 btrfs_extend_item(fs_info, path,
469 item_size - found_size);
473 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
476 /* don't overwrite an existing inode if the generation number
477 * was logged as zero. This is done when the tree logging code
478 * is just logging an inode to make sure it exists after recovery.
480 * Also, don't overwrite i_size on directories during replay.
481 * log replay inserts and removes directory items based on the
482 * state of the tree found in the subvolume, and i_size is modified
485 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
486 struct btrfs_inode_item *src_item;
487 struct btrfs_inode_item *dst_item;
489 src_item = (struct btrfs_inode_item *)src_ptr;
490 dst_item = (struct btrfs_inode_item *)dst_ptr;
492 if (btrfs_inode_generation(eb, src_item) == 0) {
493 struct extent_buffer *dst_eb = path->nodes[0];
494 const u64 ino_size = btrfs_inode_size(eb, src_item);
497 * For regular files an ino_size == 0 is used only when
498 * logging that an inode exists, as part of a directory
499 * fsync, and the inode wasn't fsynced before. In this
500 * case don't set the size of the inode in the fs/subvol
501 * tree, otherwise we would be throwing valid data away.
503 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
504 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
506 struct btrfs_map_token token;
508 btrfs_init_map_token(&token);
509 btrfs_set_token_inode_size(dst_eb, dst_item,
515 if (overwrite_root &&
516 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
517 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
519 saved_i_size = btrfs_inode_size(path->nodes[0],
524 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
527 if (save_old_i_size) {
528 struct btrfs_inode_item *dst_item;
529 dst_item = (struct btrfs_inode_item *)dst_ptr;
530 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
533 /* make sure the generation is filled in */
534 if (key->type == BTRFS_INODE_ITEM_KEY) {
535 struct btrfs_inode_item *dst_item;
536 dst_item = (struct btrfs_inode_item *)dst_ptr;
537 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
538 btrfs_set_inode_generation(path->nodes[0], dst_item,
543 btrfs_mark_buffer_dirty(path->nodes[0]);
544 btrfs_release_path(path);
549 * simple helper to read an inode off the disk from a given root
550 * This can only be called for subvolume roots and not for the log
552 static noinline struct inode *read_one_inode(struct btrfs_root *root,
555 struct btrfs_key key;
558 key.objectid = objectid;
559 key.type = BTRFS_INODE_ITEM_KEY;
561 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
564 } else if (is_bad_inode(inode)) {
571 /* replays a single extent in 'eb' at 'slot' with 'key' into the
572 * subvolume 'root'. path is released on entry and should be released
575 * extents in the log tree have not been allocated out of the extent
576 * tree yet. So, this completes the allocation, taking a reference
577 * as required if the extent already exists or creating a new extent
578 * if it isn't in the extent allocation tree yet.
580 * The extent is inserted into the file, dropping any existing extents
581 * from the file that overlap the new one.
583 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
584 struct btrfs_root *root,
585 struct btrfs_path *path,
586 struct extent_buffer *eb, int slot,
587 struct btrfs_key *key)
589 struct btrfs_fs_info *fs_info = root->fs_info;
592 u64 start = key->offset;
594 struct btrfs_file_extent_item *item;
595 struct inode *inode = NULL;
599 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
600 found_type = btrfs_file_extent_type(eb, item);
602 if (found_type == BTRFS_FILE_EXTENT_REG ||
603 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
604 nbytes = btrfs_file_extent_num_bytes(eb, item);
605 extent_end = start + nbytes;
608 * We don't add to the inodes nbytes if we are prealloc or a
611 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
613 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
614 size = btrfs_file_extent_inline_len(eb, slot, item);
615 nbytes = btrfs_file_extent_ram_bytes(eb, item);
616 extent_end = ALIGN(start + size,
617 fs_info->sectorsize);
623 inode = read_one_inode(root, key->objectid);
630 * first check to see if we already have this extent in the
631 * file. This must be done before the btrfs_drop_extents run
632 * so we don't try to drop this extent.
634 ret = btrfs_lookup_file_extent(trans, root, path,
635 btrfs_ino(BTRFS_I(inode)), start, 0);
638 (found_type == BTRFS_FILE_EXTENT_REG ||
639 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
640 struct btrfs_file_extent_item cmp1;
641 struct btrfs_file_extent_item cmp2;
642 struct btrfs_file_extent_item *existing;
643 struct extent_buffer *leaf;
645 leaf = path->nodes[0];
646 existing = btrfs_item_ptr(leaf, path->slots[0],
647 struct btrfs_file_extent_item);
649 read_extent_buffer(eb, &cmp1, (unsigned long)item,
651 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
655 * we already have a pointer to this exact extent,
656 * we don't have to do anything
658 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
659 btrfs_release_path(path);
663 btrfs_release_path(path);
665 /* drop any overlapping extents */
666 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
670 if (found_type == BTRFS_FILE_EXTENT_REG ||
671 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
673 unsigned long dest_offset;
674 struct btrfs_key ins;
676 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
677 btrfs_fs_incompat(fs_info, NO_HOLES))
680 ret = btrfs_insert_empty_item(trans, root, path, key,
684 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
686 copy_extent_buffer(path->nodes[0], eb, dest_offset,
687 (unsigned long)item, sizeof(*item));
689 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
690 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
691 ins.type = BTRFS_EXTENT_ITEM_KEY;
692 offset = key->offset - btrfs_file_extent_offset(eb, item);
695 * Manually record dirty extent, as here we did a shallow
696 * file extent item copy and skip normal backref update,
697 * but modifying extent tree all by ourselves.
698 * So need to manually record dirty extent for qgroup,
699 * as the owner of the file extent changed from log tree
700 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
702 ret = btrfs_qgroup_trace_extent(trans, fs_info,
703 btrfs_file_extent_disk_bytenr(eb, item),
704 btrfs_file_extent_disk_num_bytes(eb, item),
709 if (ins.objectid > 0) {
712 LIST_HEAD(ordered_sums);
714 * is this extent already allocated in the extent
715 * allocation tree? If so, just add a reference
717 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
720 ret = btrfs_inc_extent_ref(trans, fs_info,
721 ins.objectid, ins.offset,
722 0, root->root_key.objectid,
723 key->objectid, offset);
728 * insert the extent pointer in the extent
731 ret = btrfs_alloc_logged_file_extent(trans,
733 root->root_key.objectid,
734 key->objectid, offset, &ins);
738 btrfs_release_path(path);
740 if (btrfs_file_extent_compression(eb, item)) {
741 csum_start = ins.objectid;
742 csum_end = csum_start + ins.offset;
744 csum_start = ins.objectid +
745 btrfs_file_extent_offset(eb, item);
746 csum_end = csum_start +
747 btrfs_file_extent_num_bytes(eb, item);
750 ret = btrfs_lookup_csums_range(root->log_root,
751 csum_start, csum_end - 1,
756 * Now delete all existing cums in the csum root that
757 * cover our range. We do this because we can have an
758 * extent that is completely referenced by one file
759 * extent item and partially referenced by another
760 * file extent item (like after using the clone or
761 * extent_same ioctls). In this case if we end up doing
762 * the replay of the one that partially references the
763 * extent first, and we do not do the csum deletion
764 * below, we can get 2 csum items in the csum tree that
765 * overlap each other. For example, imagine our log has
766 * the two following file extent items:
768 * key (257 EXTENT_DATA 409600)
769 * extent data disk byte 12845056 nr 102400
770 * extent data offset 20480 nr 20480 ram 102400
772 * key (257 EXTENT_DATA 819200)
773 * extent data disk byte 12845056 nr 102400
774 * extent data offset 0 nr 102400 ram 102400
776 * Where the second one fully references the 100K extent
777 * that starts at disk byte 12845056, and the log tree
778 * has a single csum item that covers the entire range
781 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
783 * After the first file extent item is replayed, the
784 * csum tree gets the following csum item:
786 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
788 * Which covers the 20K sub-range starting at offset 20K
789 * of our extent. Now when we replay the second file
790 * extent item, if we do not delete existing csum items
791 * that cover any of its blocks, we end up getting two
792 * csum items in our csum tree that overlap each other:
794 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
795 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
797 * Which is a problem, because after this anyone trying
798 * to lookup up for the checksum of any block of our
799 * extent starting at an offset of 40K or higher, will
800 * end up looking at the second csum item only, which
801 * does not contain the checksum for any block starting
802 * at offset 40K or higher of our extent.
804 while (!list_empty(&ordered_sums)) {
805 struct btrfs_ordered_sum *sums;
806 sums = list_entry(ordered_sums.next,
807 struct btrfs_ordered_sum,
810 ret = btrfs_del_csums(trans, fs_info,
814 ret = btrfs_csum_file_blocks(trans,
815 fs_info->csum_root, sums);
816 list_del(&sums->list);
822 btrfs_release_path(path);
824 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
825 /* inline extents are easy, we just overwrite them */
826 ret = overwrite_item(trans, root, path, eb, slot, key);
831 inode_add_bytes(inode, nbytes);
833 ret = btrfs_update_inode(trans, root, inode);
841 * when cleaning up conflicts between the directory names in the
842 * subvolume, directory names in the log and directory names in the
843 * inode back references, we may have to unlink inodes from directories.
845 * This is a helper function to do the unlink of a specific directory
848 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
849 struct btrfs_root *root,
850 struct btrfs_path *path,
851 struct btrfs_inode *dir,
852 struct btrfs_dir_item *di)
854 struct btrfs_fs_info *fs_info = root->fs_info;
858 struct extent_buffer *leaf;
859 struct btrfs_key location;
862 leaf = path->nodes[0];
864 btrfs_dir_item_key_to_cpu(leaf, di, &location);
865 name_len = btrfs_dir_name_len(leaf, di);
866 name = kmalloc(name_len, GFP_NOFS);
870 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
871 btrfs_release_path(path);
873 inode = read_one_inode(root, location.objectid);
879 ret = link_to_fixup_dir(trans, root, path, location.objectid);
883 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
888 ret = btrfs_run_delayed_items(trans, fs_info);
896 * helper function to see if a given name and sequence number found
897 * in an inode back reference are already in a directory and correctly
898 * point to this inode
900 static noinline int inode_in_dir(struct btrfs_root *root,
901 struct btrfs_path *path,
902 u64 dirid, u64 objectid, u64 index,
903 const char *name, int name_len)
905 struct btrfs_dir_item *di;
906 struct btrfs_key location;
909 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
910 index, name, name_len, 0);
911 if (di && !IS_ERR(di)) {
912 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
913 if (location.objectid != objectid)
917 btrfs_release_path(path);
919 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
920 if (di && !IS_ERR(di)) {
921 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
922 if (location.objectid != objectid)
928 btrfs_release_path(path);
933 * helper function to check a log tree for a named back reference in
934 * an inode. This is used to decide if a back reference that is
935 * found in the subvolume conflicts with what we find in the log.
937 * inode backreferences may have multiple refs in a single item,
938 * during replay we process one reference at a time, and we don't
939 * want to delete valid links to a file from the subvolume if that
940 * link is also in the log.
942 static noinline int backref_in_log(struct btrfs_root *log,
943 struct btrfs_key *key,
945 const char *name, int namelen)
947 struct btrfs_path *path;
948 struct btrfs_inode_ref *ref;
950 unsigned long ptr_end;
951 unsigned long name_ptr;
957 path = btrfs_alloc_path();
961 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
965 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
967 if (key->type == BTRFS_INODE_EXTREF_KEY) {
968 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
969 name, namelen, NULL))
975 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
976 ptr_end = ptr + item_size;
977 while (ptr < ptr_end) {
978 ref = (struct btrfs_inode_ref *)ptr;
979 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
980 if (found_name_len == namelen) {
981 name_ptr = (unsigned long)(ref + 1);
982 ret = memcmp_extent_buffer(path->nodes[0], name,
989 ptr = (unsigned long)(ref + 1) + found_name_len;
992 btrfs_free_path(path);
996 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
997 struct btrfs_root *root,
998 struct btrfs_path *path,
999 struct btrfs_root *log_root,
1000 struct btrfs_inode *dir,
1001 struct btrfs_inode *inode,
1002 u64 inode_objectid, u64 parent_objectid,
1003 u64 ref_index, char *name, int namelen,
1006 struct btrfs_fs_info *fs_info = root->fs_info;
1009 int victim_name_len;
1010 struct extent_buffer *leaf;
1011 struct btrfs_dir_item *di;
1012 struct btrfs_key search_key;
1013 struct btrfs_inode_extref *extref;
1016 /* Search old style refs */
1017 search_key.objectid = inode_objectid;
1018 search_key.type = BTRFS_INODE_REF_KEY;
1019 search_key.offset = parent_objectid;
1020 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1022 struct btrfs_inode_ref *victim_ref;
1024 unsigned long ptr_end;
1026 leaf = path->nodes[0];
1028 /* are we trying to overwrite a back ref for the root directory
1029 * if so, just jump out, we're done
1031 if (search_key.objectid == search_key.offset)
1034 /* check all the names in this back reference to see
1035 * if they are in the log. if so, we allow them to stay
1036 * otherwise they must be unlinked as a conflict
1038 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1039 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1040 while (ptr < ptr_end) {
1041 victim_ref = (struct btrfs_inode_ref *)ptr;
1042 victim_name_len = btrfs_inode_ref_name_len(leaf,
1044 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1048 read_extent_buffer(leaf, victim_name,
1049 (unsigned long)(victim_ref + 1),
1052 if (!backref_in_log(log_root, &search_key,
1056 inc_nlink(&inode->vfs_inode);
1057 btrfs_release_path(path);
1059 ret = btrfs_unlink_inode(trans, root, dir, inode,
1060 victim_name, victim_name_len);
1064 ret = btrfs_run_delayed_items(trans, fs_info);
1072 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1076 * NOTE: we have searched root tree and checked the
1077 * corresponding ref, it does not need to check again.
1081 btrfs_release_path(path);
1083 /* Same search but for extended refs */
1084 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1085 inode_objectid, parent_objectid, 0,
1087 if (!IS_ERR_OR_NULL(extref)) {
1091 struct inode *victim_parent;
1093 leaf = path->nodes[0];
1095 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1096 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1098 while (cur_offset < item_size) {
1099 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1101 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1103 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1106 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1109 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1112 search_key.objectid = inode_objectid;
1113 search_key.type = BTRFS_INODE_EXTREF_KEY;
1114 search_key.offset = btrfs_extref_hash(parent_objectid,
1118 if (!backref_in_log(log_root, &search_key,
1119 parent_objectid, victim_name,
1122 victim_parent = read_one_inode(root,
1124 if (victim_parent) {
1125 inc_nlink(&inode->vfs_inode);
1126 btrfs_release_path(path);
1128 ret = btrfs_unlink_inode(trans, root,
1129 BTRFS_I(victim_parent),
1134 ret = btrfs_run_delayed_items(
1138 iput(victim_parent);
1149 cur_offset += victim_name_len + sizeof(*extref);
1153 btrfs_release_path(path);
1155 /* look for a conflicting sequence number */
1156 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1157 ref_index, name, namelen, 0);
1158 if (di && !IS_ERR(di)) {
1159 ret = drop_one_dir_item(trans, root, path, dir, di);
1163 btrfs_release_path(path);
1165 /* look for a conflicing name */
1166 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1168 if (di && !IS_ERR(di)) {
1169 ret = drop_one_dir_item(trans, root, path, dir, di);
1173 btrfs_release_path(path);
1178 static int extref_get_fields(struct extent_buffer *eb, int slot,
1179 unsigned long ref_ptr, u32 *namelen, char **name,
1180 u64 *index, u64 *parent_objectid)
1182 struct btrfs_inode_extref *extref;
1184 extref = (struct btrfs_inode_extref *)ref_ptr;
1186 *namelen = btrfs_inode_extref_name_len(eb, extref);
1187 if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)&extref->name,
1191 *name = kmalloc(*namelen, GFP_NOFS);
1195 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1198 *index = btrfs_inode_extref_index(eb, extref);
1199 if (parent_objectid)
1200 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1205 static int ref_get_fields(struct extent_buffer *eb, int slot,
1206 unsigned long ref_ptr, u32 *namelen, char **name,
1209 struct btrfs_inode_ref *ref;
1211 ref = (struct btrfs_inode_ref *)ref_ptr;
1213 *namelen = btrfs_inode_ref_name_len(eb, ref);
1214 if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)(ref + 1),
1218 *name = kmalloc(*namelen, GFP_NOFS);
1222 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1224 *index = btrfs_inode_ref_index(eb, ref);
1230 * replay one inode back reference item found in the log tree.
1231 * eb, slot and key refer to the buffer and key found in the log tree.
1232 * root is the destination we are replaying into, and path is for temp
1233 * use by this function. (it should be released on return).
1235 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1236 struct btrfs_root *root,
1237 struct btrfs_root *log,
1238 struct btrfs_path *path,
1239 struct extent_buffer *eb, int slot,
1240 struct btrfs_key *key)
1242 struct inode *dir = NULL;
1243 struct inode *inode = NULL;
1244 unsigned long ref_ptr;
1245 unsigned long ref_end;
1249 int search_done = 0;
1250 int log_ref_ver = 0;
1251 u64 parent_objectid;
1254 int ref_struct_size;
1256 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1257 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1259 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1260 struct btrfs_inode_extref *r;
1262 ref_struct_size = sizeof(struct btrfs_inode_extref);
1264 r = (struct btrfs_inode_extref *)ref_ptr;
1265 parent_objectid = btrfs_inode_extref_parent(eb, r);
1267 ref_struct_size = sizeof(struct btrfs_inode_ref);
1268 parent_objectid = key->offset;
1270 inode_objectid = key->objectid;
1273 * it is possible that we didn't log all the parent directories
1274 * for a given inode. If we don't find the dir, just don't
1275 * copy the back ref in. The link count fixup code will take
1278 dir = read_one_inode(root, parent_objectid);
1284 inode = read_one_inode(root, inode_objectid);
1290 while (ref_ptr < ref_end) {
1292 ret = extref_get_fields(eb, slot, ref_ptr, &namelen,
1293 &name, &ref_index, &parent_objectid);
1295 * parent object can change from one array
1299 dir = read_one_inode(root, parent_objectid);
1305 ret = ref_get_fields(eb, slot, ref_ptr, &namelen,
1311 /* if we already have a perfect match, we're done */
1312 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1313 btrfs_ino(BTRFS_I(inode)), ref_index,
1316 * look for a conflicting back reference in the
1317 * metadata. if we find one we have to unlink that name
1318 * of the file before we add our new link. Later on, we
1319 * overwrite any existing back reference, and we don't
1320 * want to create dangling pointers in the directory.
1324 ret = __add_inode_ref(trans, root, path, log,
1329 ref_index, name, namelen,
1338 /* insert our name */
1339 ret = btrfs_add_link(trans, BTRFS_I(dir),
1341 name, namelen, 0, ref_index);
1345 btrfs_update_inode(trans, root, inode);
1348 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1357 /* finally write the back reference in the inode */
1358 ret = overwrite_item(trans, root, path, eb, slot, key);
1360 btrfs_release_path(path);
1367 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1368 struct btrfs_root *root, u64 ino)
1372 ret = btrfs_insert_orphan_item(trans, root, ino);
1379 static int count_inode_extrefs(struct btrfs_root *root,
1380 struct btrfs_inode *inode, struct btrfs_path *path)
1384 unsigned int nlink = 0;
1387 u64 inode_objectid = btrfs_ino(inode);
1390 struct btrfs_inode_extref *extref;
1391 struct extent_buffer *leaf;
1394 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1399 leaf = path->nodes[0];
1400 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1401 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1404 while (cur_offset < item_size) {
1405 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1406 name_len = btrfs_inode_extref_name_len(leaf, extref);
1410 cur_offset += name_len + sizeof(*extref);
1414 btrfs_release_path(path);
1416 btrfs_release_path(path);
1418 if (ret < 0 && ret != -ENOENT)
1423 static int count_inode_refs(struct btrfs_root *root,
1424 struct btrfs_inode *inode, struct btrfs_path *path)
1427 struct btrfs_key key;
1428 unsigned int nlink = 0;
1430 unsigned long ptr_end;
1432 u64 ino = btrfs_ino(inode);
1435 key.type = BTRFS_INODE_REF_KEY;
1436 key.offset = (u64)-1;
1439 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1443 if (path->slots[0] == 0)
1448 btrfs_item_key_to_cpu(path->nodes[0], &key,
1450 if (key.objectid != ino ||
1451 key.type != BTRFS_INODE_REF_KEY)
1453 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1454 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1456 while (ptr < ptr_end) {
1457 struct btrfs_inode_ref *ref;
1459 ref = (struct btrfs_inode_ref *)ptr;
1460 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1462 ptr = (unsigned long)(ref + 1) + name_len;
1466 if (key.offset == 0)
1468 if (path->slots[0] > 0) {
1473 btrfs_release_path(path);
1475 btrfs_release_path(path);
1481 * There are a few corners where the link count of the file can't
1482 * be properly maintained during replay. So, instead of adding
1483 * lots of complexity to the log code, we just scan the backrefs
1484 * for any file that has been through replay.
1486 * The scan will update the link count on the inode to reflect the
1487 * number of back refs found. If it goes down to zero, the iput
1488 * will free the inode.
1490 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1491 struct btrfs_root *root,
1492 struct inode *inode)
1494 struct btrfs_path *path;
1497 u64 ino = btrfs_ino(BTRFS_I(inode));
1499 path = btrfs_alloc_path();
1503 ret = count_inode_refs(root, BTRFS_I(inode), path);
1509 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1517 if (nlink != inode->i_nlink) {
1518 set_nlink(inode, nlink);
1519 btrfs_update_inode(trans, root, inode);
1521 BTRFS_I(inode)->index_cnt = (u64)-1;
1523 if (inode->i_nlink == 0) {
1524 if (S_ISDIR(inode->i_mode)) {
1525 ret = replay_dir_deletes(trans, root, NULL, path,
1530 ret = insert_orphan_item(trans, root, ino);
1534 btrfs_free_path(path);
1538 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1539 struct btrfs_root *root,
1540 struct btrfs_path *path)
1543 struct btrfs_key key;
1544 struct inode *inode;
1546 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1547 key.type = BTRFS_ORPHAN_ITEM_KEY;
1548 key.offset = (u64)-1;
1550 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1555 if (path->slots[0] == 0)
1560 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1561 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1562 key.type != BTRFS_ORPHAN_ITEM_KEY)
1565 ret = btrfs_del_item(trans, root, path);
1569 btrfs_release_path(path);
1570 inode = read_one_inode(root, key.offset);
1574 ret = fixup_inode_link_count(trans, root, inode);
1580 * fixup on a directory may create new entries,
1581 * make sure we always look for the highset possible
1584 key.offset = (u64)-1;
1588 btrfs_release_path(path);
1594 * record a given inode in the fixup dir so we can check its link
1595 * count when replay is done. The link count is incremented here
1596 * so the inode won't go away until we check it
1598 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1599 struct btrfs_root *root,
1600 struct btrfs_path *path,
1603 struct btrfs_key key;
1605 struct inode *inode;
1607 inode = read_one_inode(root, objectid);
1611 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1612 key.type = BTRFS_ORPHAN_ITEM_KEY;
1613 key.offset = objectid;
1615 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1617 btrfs_release_path(path);
1619 if (!inode->i_nlink)
1620 set_nlink(inode, 1);
1623 ret = btrfs_update_inode(trans, root, inode);
1624 } else if (ret == -EEXIST) {
1627 BUG(); /* Logic Error */
1635 * when replaying the log for a directory, we only insert names
1636 * for inodes that actually exist. This means an fsync on a directory
1637 * does not implicitly fsync all the new files in it
1639 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1640 struct btrfs_root *root,
1641 u64 dirid, u64 index,
1642 char *name, int name_len,
1643 struct btrfs_key *location)
1645 struct inode *inode;
1649 inode = read_one_inode(root, location->objectid);
1653 dir = read_one_inode(root, dirid);
1659 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1660 name_len, 1, index);
1662 /* FIXME, put inode into FIXUP list */
1670 * Return true if an inode reference exists in the log for the given name,
1671 * inode and parent inode.
1673 static bool name_in_log_ref(struct btrfs_root *log_root,
1674 const char *name, const int name_len,
1675 const u64 dirid, const u64 ino)
1677 struct btrfs_key search_key;
1679 search_key.objectid = ino;
1680 search_key.type = BTRFS_INODE_REF_KEY;
1681 search_key.offset = dirid;
1682 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1685 search_key.type = BTRFS_INODE_EXTREF_KEY;
1686 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1687 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1694 * take a single entry in a log directory item and replay it into
1697 * if a conflicting item exists in the subdirectory already,
1698 * the inode it points to is unlinked and put into the link count
1701 * If a name from the log points to a file or directory that does
1702 * not exist in the FS, it is skipped. fsyncs on directories
1703 * do not force down inodes inside that directory, just changes to the
1704 * names or unlinks in a directory.
1706 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1707 * non-existing inode) and 1 if the name was replayed.
1709 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1710 struct btrfs_root *root,
1711 struct btrfs_path *path,
1712 struct extent_buffer *eb,
1713 struct btrfs_dir_item *di,
1714 struct btrfs_key *key)
1718 struct btrfs_dir_item *dst_di;
1719 struct btrfs_key found_key;
1720 struct btrfs_key log_key;
1725 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1726 bool name_added = false;
1728 dir = read_one_inode(root, key->objectid);
1732 name_len = btrfs_dir_name_len(eb, di);
1733 name = kmalloc(name_len, GFP_NOFS);
1739 log_type = btrfs_dir_type(eb, di);
1740 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1743 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1744 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1749 btrfs_release_path(path);
1751 if (key->type == BTRFS_DIR_ITEM_KEY) {
1752 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1754 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1755 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1764 if (IS_ERR_OR_NULL(dst_di)) {
1765 /* we need a sequence number to insert, so we only
1766 * do inserts for the BTRFS_DIR_INDEX_KEY types
1768 if (key->type != BTRFS_DIR_INDEX_KEY)
1773 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1774 /* the existing item matches the logged item */
1775 if (found_key.objectid == log_key.objectid &&
1776 found_key.type == log_key.type &&
1777 found_key.offset == log_key.offset &&
1778 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1779 update_size = false;
1784 * don't drop the conflicting directory entry if the inode
1785 * for the new entry doesn't exist
1790 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1794 if (key->type == BTRFS_DIR_INDEX_KEY)
1797 btrfs_release_path(path);
1798 if (!ret && update_size) {
1799 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
1800 ret = btrfs_update_inode(trans, root, dir);
1804 if (!ret && name_added)
1809 if (name_in_log_ref(root->log_root, name, name_len,
1810 key->objectid, log_key.objectid)) {
1811 /* The dentry will be added later. */
1813 update_size = false;
1816 btrfs_release_path(path);
1817 ret = insert_one_name(trans, root, key->objectid, key->offset,
1818 name, name_len, &log_key);
1819 if (ret && ret != -ENOENT && ret != -EEXIST)
1823 update_size = false;
1829 * find all the names in a directory item and reconcile them into
1830 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1831 * one name in a directory item, but the same code gets used for
1832 * both directory index types
1834 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1835 struct btrfs_root *root,
1836 struct btrfs_path *path,
1837 struct extent_buffer *eb, int slot,
1838 struct btrfs_key *key)
1840 struct btrfs_fs_info *fs_info = root->fs_info;
1842 u32 item_size = btrfs_item_size_nr(eb, slot);
1843 struct btrfs_dir_item *di;
1846 unsigned long ptr_end;
1847 struct btrfs_path *fixup_path = NULL;
1849 ptr = btrfs_item_ptr_offset(eb, slot);
1850 ptr_end = ptr + item_size;
1851 while (ptr < ptr_end) {
1852 di = (struct btrfs_dir_item *)ptr;
1853 if (verify_dir_item(fs_info, eb, slot, di))
1855 name_len = btrfs_dir_name_len(eb, di);
1856 ret = replay_one_name(trans, root, path, eb, di, key);
1859 ptr = (unsigned long)(di + 1);
1863 * If this entry refers to a non-directory (directories can not
1864 * have a link count > 1) and it was added in the transaction
1865 * that was not committed, make sure we fixup the link count of
1866 * the inode it the entry points to. Otherwise something like
1867 * the following would result in a directory pointing to an
1868 * inode with a wrong link that does not account for this dir
1876 * ln testdir/bar testdir/bar_link
1877 * ln testdir/foo testdir/foo_link
1878 * xfs_io -c "fsync" testdir/bar
1882 * mount fs, log replay happens
1884 * File foo would remain with a link count of 1 when it has two
1885 * entries pointing to it in the directory testdir. This would
1886 * make it impossible to ever delete the parent directory has
1887 * it would result in stale dentries that can never be deleted.
1889 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1890 struct btrfs_key di_key;
1893 fixup_path = btrfs_alloc_path();
1900 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1901 ret = link_to_fixup_dir(trans, root, fixup_path,
1908 btrfs_free_path(fixup_path);
1913 * directory replay has two parts. There are the standard directory
1914 * items in the log copied from the subvolume, and range items
1915 * created in the log while the subvolume was logged.
1917 * The range items tell us which parts of the key space the log
1918 * is authoritative for. During replay, if a key in the subvolume
1919 * directory is in a logged range item, but not actually in the log
1920 * that means it was deleted from the directory before the fsync
1921 * and should be removed.
1923 static noinline int find_dir_range(struct btrfs_root *root,
1924 struct btrfs_path *path,
1925 u64 dirid, int key_type,
1926 u64 *start_ret, u64 *end_ret)
1928 struct btrfs_key key;
1930 struct btrfs_dir_log_item *item;
1934 if (*start_ret == (u64)-1)
1937 key.objectid = dirid;
1938 key.type = key_type;
1939 key.offset = *start_ret;
1941 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1945 if (path->slots[0] == 0)
1950 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1952 if (key.type != key_type || key.objectid != dirid) {
1956 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1957 struct btrfs_dir_log_item);
1958 found_end = btrfs_dir_log_end(path->nodes[0], item);
1960 if (*start_ret >= key.offset && *start_ret <= found_end) {
1962 *start_ret = key.offset;
1963 *end_ret = found_end;
1968 /* check the next slot in the tree to see if it is a valid item */
1969 nritems = btrfs_header_nritems(path->nodes[0]);
1971 if (path->slots[0] >= nritems) {
1972 ret = btrfs_next_leaf(root, path);
1977 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1979 if (key.type != key_type || key.objectid != dirid) {
1983 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1984 struct btrfs_dir_log_item);
1985 found_end = btrfs_dir_log_end(path->nodes[0], item);
1986 *start_ret = key.offset;
1987 *end_ret = found_end;
1990 btrfs_release_path(path);
1995 * this looks for a given directory item in the log. If the directory
1996 * item is not in the log, the item is removed and the inode it points
1999 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2000 struct btrfs_root *root,
2001 struct btrfs_root *log,
2002 struct btrfs_path *path,
2003 struct btrfs_path *log_path,
2005 struct btrfs_key *dir_key)
2007 struct btrfs_fs_info *fs_info = root->fs_info;
2009 struct extent_buffer *eb;
2012 struct btrfs_dir_item *di;
2013 struct btrfs_dir_item *log_di;
2016 unsigned long ptr_end;
2018 struct inode *inode;
2019 struct btrfs_key location;
2022 eb = path->nodes[0];
2023 slot = path->slots[0];
2024 item_size = btrfs_item_size_nr(eb, slot);
2025 ptr = btrfs_item_ptr_offset(eb, slot);
2026 ptr_end = ptr + item_size;
2027 while (ptr < ptr_end) {
2028 di = (struct btrfs_dir_item *)ptr;
2029 if (verify_dir_item(fs_info, eb, slot, di)) {
2034 name_len = btrfs_dir_name_len(eb, di);
2035 name = kmalloc(name_len, GFP_NOFS);
2040 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2043 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2044 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2047 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2048 log_di = btrfs_lookup_dir_index_item(trans, log,
2054 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
2055 btrfs_dir_item_key_to_cpu(eb, di, &location);
2056 btrfs_release_path(path);
2057 btrfs_release_path(log_path);
2058 inode = read_one_inode(root, location.objectid);
2064 ret = link_to_fixup_dir(trans, root,
2065 path, location.objectid);
2073 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2074 BTRFS_I(inode), name, name_len);
2076 ret = btrfs_run_delayed_items(trans, fs_info);
2082 /* there might still be more names under this key
2083 * check and repeat if required
2085 ret = btrfs_search_slot(NULL, root, dir_key, path,
2091 } else if (IS_ERR(log_di)) {
2093 return PTR_ERR(log_di);
2095 btrfs_release_path(log_path);
2098 ptr = (unsigned long)(di + 1);
2103 btrfs_release_path(path);
2104 btrfs_release_path(log_path);
2108 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2109 struct btrfs_root *root,
2110 struct btrfs_root *log,
2111 struct btrfs_path *path,
2114 struct btrfs_key search_key;
2115 struct btrfs_path *log_path;
2120 log_path = btrfs_alloc_path();
2124 search_key.objectid = ino;
2125 search_key.type = BTRFS_XATTR_ITEM_KEY;
2126 search_key.offset = 0;
2128 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2132 nritems = btrfs_header_nritems(path->nodes[0]);
2133 for (i = path->slots[0]; i < nritems; i++) {
2134 struct btrfs_key key;
2135 struct btrfs_dir_item *di;
2136 struct btrfs_dir_item *log_di;
2140 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2141 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2146 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2147 total_size = btrfs_item_size_nr(path->nodes[0], i);
2149 while (cur < total_size) {
2150 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2151 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2152 u32 this_len = sizeof(*di) + name_len + data_len;
2155 name = kmalloc(name_len, GFP_NOFS);
2160 read_extent_buffer(path->nodes[0], name,
2161 (unsigned long)(di + 1), name_len);
2163 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2165 btrfs_release_path(log_path);
2167 /* Doesn't exist in log tree, so delete it. */
2168 btrfs_release_path(path);
2169 di = btrfs_lookup_xattr(trans, root, path, ino,
2170 name, name_len, -1);
2177 ret = btrfs_delete_one_dir_name(trans, root,
2181 btrfs_release_path(path);
2186 if (IS_ERR(log_di)) {
2187 ret = PTR_ERR(log_di);
2191 di = (struct btrfs_dir_item *)((char *)di + this_len);
2194 ret = btrfs_next_leaf(root, path);
2200 btrfs_free_path(log_path);
2201 btrfs_release_path(path);
2207 * deletion replay happens before we copy any new directory items
2208 * out of the log or out of backreferences from inodes. It
2209 * scans the log to find ranges of keys that log is authoritative for,
2210 * and then scans the directory to find items in those ranges that are
2211 * not present in the log.
2213 * Anything we don't find in the log is unlinked and removed from the
2216 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2217 struct btrfs_root *root,
2218 struct btrfs_root *log,
2219 struct btrfs_path *path,
2220 u64 dirid, int del_all)
2224 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2226 struct btrfs_key dir_key;
2227 struct btrfs_key found_key;
2228 struct btrfs_path *log_path;
2231 dir_key.objectid = dirid;
2232 dir_key.type = BTRFS_DIR_ITEM_KEY;
2233 log_path = btrfs_alloc_path();
2237 dir = read_one_inode(root, dirid);
2238 /* it isn't an error if the inode isn't there, that can happen
2239 * because we replay the deletes before we copy in the inode item
2243 btrfs_free_path(log_path);
2251 range_end = (u64)-1;
2253 ret = find_dir_range(log, path, dirid, key_type,
2254 &range_start, &range_end);
2259 dir_key.offset = range_start;
2262 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2267 nritems = btrfs_header_nritems(path->nodes[0]);
2268 if (path->slots[0] >= nritems) {
2269 ret = btrfs_next_leaf(root, path);
2273 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2275 if (found_key.objectid != dirid ||
2276 found_key.type != dir_key.type)
2279 if (found_key.offset > range_end)
2282 ret = check_item_in_log(trans, root, log, path,
2287 if (found_key.offset == (u64)-1)
2289 dir_key.offset = found_key.offset + 1;
2291 btrfs_release_path(path);
2292 if (range_end == (u64)-1)
2294 range_start = range_end + 1;
2299 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2300 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2301 dir_key.type = BTRFS_DIR_INDEX_KEY;
2302 btrfs_release_path(path);
2306 btrfs_release_path(path);
2307 btrfs_free_path(log_path);
2313 * the process_func used to replay items from the log tree. This
2314 * gets called in two different stages. The first stage just looks
2315 * for inodes and makes sure they are all copied into the subvolume.
2317 * The second stage copies all the other item types from the log into
2318 * the subvolume. The two stage approach is slower, but gets rid of
2319 * lots of complexity around inodes referencing other inodes that exist
2320 * only in the log (references come from either directory items or inode
2323 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2324 struct walk_control *wc, u64 gen)
2327 struct btrfs_path *path;
2328 struct btrfs_root *root = wc->replay_dest;
2329 struct btrfs_key key;
2334 ret = btrfs_read_buffer(eb, gen);
2338 level = btrfs_header_level(eb);
2343 path = btrfs_alloc_path();
2347 nritems = btrfs_header_nritems(eb);
2348 for (i = 0; i < nritems; i++) {
2349 btrfs_item_key_to_cpu(eb, &key, i);
2351 /* inode keys are done during the first stage */
2352 if (key.type == BTRFS_INODE_ITEM_KEY &&
2353 wc->stage == LOG_WALK_REPLAY_INODES) {
2354 struct btrfs_inode_item *inode_item;
2357 inode_item = btrfs_item_ptr(eb, i,
2358 struct btrfs_inode_item);
2359 ret = replay_xattr_deletes(wc->trans, root, log,
2360 path, key.objectid);
2363 mode = btrfs_inode_mode(eb, inode_item);
2364 if (S_ISDIR(mode)) {
2365 ret = replay_dir_deletes(wc->trans,
2366 root, log, path, key.objectid, 0);
2370 ret = overwrite_item(wc->trans, root, path,
2375 /* for regular files, make sure corresponding
2376 * orphan item exist. extents past the new EOF
2377 * will be truncated later by orphan cleanup.
2379 if (S_ISREG(mode)) {
2380 ret = insert_orphan_item(wc->trans, root,
2386 ret = link_to_fixup_dir(wc->trans, root,
2387 path, key.objectid);
2392 if (key.type == BTRFS_DIR_INDEX_KEY &&
2393 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2394 ret = replay_one_dir_item(wc->trans, root, path,
2400 if (wc->stage < LOG_WALK_REPLAY_ALL)
2403 /* these keys are simply copied */
2404 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2405 ret = overwrite_item(wc->trans, root, path,
2409 } else if (key.type == BTRFS_INODE_REF_KEY ||
2410 key.type == BTRFS_INODE_EXTREF_KEY) {
2411 ret = add_inode_ref(wc->trans, root, log, path,
2413 if (ret && ret != -ENOENT)
2416 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2417 ret = replay_one_extent(wc->trans, root, path,
2421 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2422 ret = replay_one_dir_item(wc->trans, root, path,
2428 btrfs_free_path(path);
2432 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2433 struct btrfs_root *root,
2434 struct btrfs_path *path, int *level,
2435 struct walk_control *wc)
2437 struct btrfs_fs_info *fs_info = root->fs_info;
2441 struct extent_buffer *next;
2442 struct extent_buffer *cur;
2443 struct extent_buffer *parent;
2447 WARN_ON(*level < 0);
2448 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2450 while (*level > 0) {
2451 WARN_ON(*level < 0);
2452 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2453 cur = path->nodes[*level];
2455 WARN_ON(btrfs_header_level(cur) != *level);
2457 if (path->slots[*level] >=
2458 btrfs_header_nritems(cur))
2461 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2462 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2463 blocksize = fs_info->nodesize;
2465 parent = path->nodes[*level];
2466 root_owner = btrfs_header_owner(parent);
2468 next = btrfs_find_create_tree_block(fs_info, bytenr);
2470 return PTR_ERR(next);
2473 ret = wc->process_func(root, next, wc, ptr_gen);
2475 free_extent_buffer(next);
2479 path->slots[*level]++;
2481 ret = btrfs_read_buffer(next, ptr_gen);
2483 free_extent_buffer(next);
2488 btrfs_tree_lock(next);
2489 btrfs_set_lock_blocking(next);
2490 clean_tree_block(fs_info, next);
2491 btrfs_wait_tree_block_writeback(next);
2492 btrfs_tree_unlock(next);
2495 WARN_ON(root_owner !=
2496 BTRFS_TREE_LOG_OBJECTID);
2497 ret = btrfs_free_and_pin_reserved_extent(
2501 free_extent_buffer(next);
2505 free_extent_buffer(next);
2508 ret = btrfs_read_buffer(next, ptr_gen);
2510 free_extent_buffer(next);
2514 WARN_ON(*level <= 0);
2515 if (path->nodes[*level-1])
2516 free_extent_buffer(path->nodes[*level-1]);
2517 path->nodes[*level-1] = next;
2518 *level = btrfs_header_level(next);
2519 path->slots[*level] = 0;
2522 WARN_ON(*level < 0);
2523 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2525 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2531 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2532 struct btrfs_root *root,
2533 struct btrfs_path *path, int *level,
2534 struct walk_control *wc)
2536 struct btrfs_fs_info *fs_info = root->fs_info;
2542 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2543 slot = path->slots[i];
2544 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2547 WARN_ON(*level == 0);
2550 struct extent_buffer *parent;
2551 if (path->nodes[*level] == root->node)
2552 parent = path->nodes[*level];
2554 parent = path->nodes[*level + 1];
2556 root_owner = btrfs_header_owner(parent);
2557 ret = wc->process_func(root, path->nodes[*level], wc,
2558 btrfs_header_generation(path->nodes[*level]));
2563 struct extent_buffer *next;
2565 next = path->nodes[*level];
2568 btrfs_tree_lock(next);
2569 btrfs_set_lock_blocking(next);
2570 clean_tree_block(fs_info, next);
2571 btrfs_wait_tree_block_writeback(next);
2572 btrfs_tree_unlock(next);
2575 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2576 ret = btrfs_free_and_pin_reserved_extent(
2578 path->nodes[*level]->start,
2579 path->nodes[*level]->len);
2583 free_extent_buffer(path->nodes[*level]);
2584 path->nodes[*level] = NULL;
2592 * drop the reference count on the tree rooted at 'snap'. This traverses
2593 * the tree freeing any blocks that have a ref count of zero after being
2596 static int walk_log_tree(struct btrfs_trans_handle *trans,
2597 struct btrfs_root *log, struct walk_control *wc)
2599 struct btrfs_fs_info *fs_info = log->fs_info;
2603 struct btrfs_path *path;
2606 path = btrfs_alloc_path();
2610 level = btrfs_header_level(log->node);
2612 path->nodes[level] = log->node;
2613 extent_buffer_get(log->node);
2614 path->slots[level] = 0;
2617 wret = walk_down_log_tree(trans, log, path, &level, wc);
2625 wret = walk_up_log_tree(trans, log, path, &level, wc);
2634 /* was the root node processed? if not, catch it here */
2635 if (path->nodes[orig_level]) {
2636 ret = wc->process_func(log, path->nodes[orig_level], wc,
2637 btrfs_header_generation(path->nodes[orig_level]));
2641 struct extent_buffer *next;
2643 next = path->nodes[orig_level];
2646 btrfs_tree_lock(next);
2647 btrfs_set_lock_blocking(next);
2648 clean_tree_block(fs_info, next);
2649 btrfs_wait_tree_block_writeback(next);
2650 btrfs_tree_unlock(next);
2653 WARN_ON(log->root_key.objectid !=
2654 BTRFS_TREE_LOG_OBJECTID);
2655 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2656 next->start, next->len);
2663 btrfs_free_path(path);
2668 * helper function to update the item for a given subvolumes log root
2669 * in the tree of log roots
2671 static int update_log_root(struct btrfs_trans_handle *trans,
2672 struct btrfs_root *log)
2674 struct btrfs_fs_info *fs_info = log->fs_info;
2677 if (log->log_transid == 1) {
2678 /* insert root item on the first sync */
2679 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2680 &log->root_key, &log->root_item);
2682 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2683 &log->root_key, &log->root_item);
2688 static void wait_log_commit(struct btrfs_root *root, int transid)
2691 int index = transid % 2;
2694 * we only allow two pending log transactions at a time,
2695 * so we know that if ours is more than 2 older than the
2696 * current transaction, we're done
2699 prepare_to_wait(&root->log_commit_wait[index],
2700 &wait, TASK_UNINTERRUPTIBLE);
2701 mutex_unlock(&root->log_mutex);
2703 if (root->log_transid_committed < transid &&
2704 atomic_read(&root->log_commit[index]))
2707 finish_wait(&root->log_commit_wait[index], &wait);
2708 mutex_lock(&root->log_mutex);
2709 } while (root->log_transid_committed < transid &&
2710 atomic_read(&root->log_commit[index]));
2713 static void wait_for_writer(struct btrfs_root *root)
2717 while (atomic_read(&root->log_writers)) {
2718 prepare_to_wait(&root->log_writer_wait,
2719 &wait, TASK_UNINTERRUPTIBLE);
2720 mutex_unlock(&root->log_mutex);
2721 if (atomic_read(&root->log_writers))
2723 finish_wait(&root->log_writer_wait, &wait);
2724 mutex_lock(&root->log_mutex);
2728 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2729 struct btrfs_log_ctx *ctx)
2734 mutex_lock(&root->log_mutex);
2735 list_del_init(&ctx->list);
2736 mutex_unlock(&root->log_mutex);
2740 * Invoked in log mutex context, or be sure there is no other task which
2741 * can access the list.
2743 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2744 int index, int error)
2746 struct btrfs_log_ctx *ctx;
2747 struct btrfs_log_ctx *safe;
2749 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2750 list_del_init(&ctx->list);
2751 ctx->log_ret = error;
2754 INIT_LIST_HEAD(&root->log_ctxs[index]);
2758 * btrfs_sync_log does sends a given tree log down to the disk and
2759 * updates the super blocks to record it. When this call is done,
2760 * you know that any inodes previously logged are safely on disk only
2763 * Any other return value means you need to call btrfs_commit_transaction.
2764 * Some of the edge cases for fsyncing directories that have had unlinks
2765 * or renames done in the past mean that sometimes the only safe
2766 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2767 * that has happened.
2769 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2770 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2776 struct btrfs_fs_info *fs_info = root->fs_info;
2777 struct btrfs_root *log = root->log_root;
2778 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
2779 int log_transid = 0;
2780 struct btrfs_log_ctx root_log_ctx;
2781 struct blk_plug plug;
2783 mutex_lock(&root->log_mutex);
2784 log_transid = ctx->log_transid;
2785 if (root->log_transid_committed >= log_transid) {
2786 mutex_unlock(&root->log_mutex);
2787 return ctx->log_ret;
2790 index1 = log_transid % 2;
2791 if (atomic_read(&root->log_commit[index1])) {
2792 wait_log_commit(root, log_transid);
2793 mutex_unlock(&root->log_mutex);
2794 return ctx->log_ret;
2796 ASSERT(log_transid == root->log_transid);
2797 atomic_set(&root->log_commit[index1], 1);
2799 /* wait for previous tree log sync to complete */
2800 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2801 wait_log_commit(root, log_transid - 1);
2804 int batch = atomic_read(&root->log_batch);
2805 /* when we're on an ssd, just kick the log commit out */
2806 if (!btrfs_test_opt(fs_info, SSD) &&
2807 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2808 mutex_unlock(&root->log_mutex);
2809 schedule_timeout_uninterruptible(1);
2810 mutex_lock(&root->log_mutex);
2812 wait_for_writer(root);
2813 if (batch == atomic_read(&root->log_batch))
2817 /* bail out if we need to do a full commit */
2818 if (btrfs_need_log_full_commit(fs_info, trans)) {
2820 btrfs_free_logged_extents(log, log_transid);
2821 mutex_unlock(&root->log_mutex);
2825 if (log_transid % 2 == 0)
2826 mark = EXTENT_DIRTY;
2830 /* we start IO on all the marked extents here, but we don't actually
2831 * wait for them until later.
2833 blk_start_plug(&plug);
2834 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
2836 blk_finish_plug(&plug);
2837 btrfs_abort_transaction(trans, ret);
2838 btrfs_free_logged_extents(log, log_transid);
2839 btrfs_set_log_full_commit(fs_info, trans);
2840 mutex_unlock(&root->log_mutex);
2844 btrfs_set_root_node(&log->root_item, log->node);
2846 root->log_transid++;
2847 log->log_transid = root->log_transid;
2848 root->log_start_pid = 0;
2850 * IO has been started, blocks of the log tree have WRITTEN flag set
2851 * in their headers. new modifications of the log will be written to
2852 * new positions. so it's safe to allow log writers to go in.
2854 mutex_unlock(&root->log_mutex);
2856 btrfs_init_log_ctx(&root_log_ctx, NULL);
2858 mutex_lock(&log_root_tree->log_mutex);
2859 atomic_inc(&log_root_tree->log_batch);
2860 atomic_inc(&log_root_tree->log_writers);
2862 index2 = log_root_tree->log_transid % 2;
2863 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2864 root_log_ctx.log_transid = log_root_tree->log_transid;
2866 mutex_unlock(&log_root_tree->log_mutex);
2868 ret = update_log_root(trans, log);
2870 mutex_lock(&log_root_tree->log_mutex);
2871 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2873 * Implicit memory barrier after atomic_dec_and_test
2875 if (waitqueue_active(&log_root_tree->log_writer_wait))
2876 wake_up(&log_root_tree->log_writer_wait);
2880 if (!list_empty(&root_log_ctx.list))
2881 list_del_init(&root_log_ctx.list);
2883 blk_finish_plug(&plug);
2884 btrfs_set_log_full_commit(fs_info, trans);
2886 if (ret != -ENOSPC) {
2887 btrfs_abort_transaction(trans, ret);
2888 mutex_unlock(&log_root_tree->log_mutex);
2891 btrfs_wait_tree_log_extents(log, mark);
2892 btrfs_free_logged_extents(log, log_transid);
2893 mutex_unlock(&log_root_tree->log_mutex);
2898 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2899 blk_finish_plug(&plug);
2900 list_del_init(&root_log_ctx.list);
2901 mutex_unlock(&log_root_tree->log_mutex);
2902 ret = root_log_ctx.log_ret;
2906 index2 = root_log_ctx.log_transid % 2;
2907 if (atomic_read(&log_root_tree->log_commit[index2])) {
2908 blk_finish_plug(&plug);
2909 ret = btrfs_wait_tree_log_extents(log, mark);
2910 btrfs_wait_logged_extents(trans, log, log_transid);
2911 wait_log_commit(log_root_tree,
2912 root_log_ctx.log_transid);
2913 mutex_unlock(&log_root_tree->log_mutex);
2915 ret = root_log_ctx.log_ret;
2918 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2919 atomic_set(&log_root_tree->log_commit[index2], 1);
2921 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2922 wait_log_commit(log_root_tree,
2923 root_log_ctx.log_transid - 1);
2926 wait_for_writer(log_root_tree);
2929 * now that we've moved on to the tree of log tree roots,
2930 * check the full commit flag again
2932 if (btrfs_need_log_full_commit(fs_info, trans)) {
2933 blk_finish_plug(&plug);
2934 btrfs_wait_tree_log_extents(log, mark);
2935 btrfs_free_logged_extents(log, log_transid);
2936 mutex_unlock(&log_root_tree->log_mutex);
2938 goto out_wake_log_root;
2941 ret = btrfs_write_marked_extents(fs_info,
2942 &log_root_tree->dirty_log_pages,
2943 EXTENT_DIRTY | EXTENT_NEW);
2944 blk_finish_plug(&plug);
2946 btrfs_set_log_full_commit(fs_info, trans);
2947 btrfs_abort_transaction(trans, ret);
2948 btrfs_free_logged_extents(log, log_transid);
2949 mutex_unlock(&log_root_tree->log_mutex);
2950 goto out_wake_log_root;
2952 ret = btrfs_wait_tree_log_extents(log, mark);
2954 ret = btrfs_wait_tree_log_extents(log_root_tree,
2955 EXTENT_NEW | EXTENT_DIRTY);
2957 btrfs_set_log_full_commit(fs_info, trans);
2958 btrfs_free_logged_extents(log, log_transid);
2959 mutex_unlock(&log_root_tree->log_mutex);
2960 goto out_wake_log_root;
2962 btrfs_wait_logged_extents(trans, log, log_transid);
2964 btrfs_set_super_log_root(fs_info->super_for_commit,
2965 log_root_tree->node->start);
2966 btrfs_set_super_log_root_level(fs_info->super_for_commit,
2967 btrfs_header_level(log_root_tree->node));
2969 log_root_tree->log_transid++;
2970 mutex_unlock(&log_root_tree->log_mutex);
2973 * nobody else is going to jump in and write the the ctree
2974 * super here because the log_commit atomic below is protecting
2975 * us. We must be called with a transaction handle pinning
2976 * the running transaction open, so a full commit can't hop
2977 * in and cause problems either.
2979 ret = write_all_supers(fs_info, 1);
2981 btrfs_set_log_full_commit(fs_info, trans);
2982 btrfs_abort_transaction(trans, ret);
2983 goto out_wake_log_root;
2986 mutex_lock(&root->log_mutex);
2987 if (root->last_log_commit < log_transid)
2988 root->last_log_commit = log_transid;
2989 mutex_unlock(&root->log_mutex);
2992 mutex_lock(&log_root_tree->log_mutex);
2993 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2995 log_root_tree->log_transid_committed++;
2996 atomic_set(&log_root_tree->log_commit[index2], 0);
2997 mutex_unlock(&log_root_tree->log_mutex);
3000 * The barrier before waitqueue_active is implied by mutex_unlock
3002 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
3003 wake_up(&log_root_tree->log_commit_wait[index2]);
3005 mutex_lock(&root->log_mutex);
3006 btrfs_remove_all_log_ctxs(root, index1, ret);
3007 root->log_transid_committed++;
3008 atomic_set(&root->log_commit[index1], 0);
3009 mutex_unlock(&root->log_mutex);
3012 * The barrier before waitqueue_active is implied by mutex_unlock
3014 if (waitqueue_active(&root->log_commit_wait[index1]))
3015 wake_up(&root->log_commit_wait[index1]);
3019 static void free_log_tree(struct btrfs_trans_handle *trans,
3020 struct btrfs_root *log)
3025 struct walk_control wc = {
3027 .process_func = process_one_buffer
3030 ret = walk_log_tree(trans, log, &wc);
3031 /* I don't think this can happen but just in case */
3033 btrfs_abort_transaction(trans, ret);
3036 ret = find_first_extent_bit(&log->dirty_log_pages,
3037 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
3042 clear_extent_bits(&log->dirty_log_pages, start, end,
3043 EXTENT_DIRTY | EXTENT_NEW);
3047 * We may have short-circuited the log tree with the full commit logic
3048 * and left ordered extents on our list, so clear these out to keep us
3049 * from leaking inodes and memory.
3051 btrfs_free_logged_extents(log, 0);
3052 btrfs_free_logged_extents(log, 1);
3054 free_extent_buffer(log->node);
3059 * free all the extents used by the tree log. This should be called
3060 * at commit time of the full transaction
3062 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3064 if (root->log_root) {
3065 free_log_tree(trans, root->log_root);
3066 root->log_root = NULL;
3071 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3072 struct btrfs_fs_info *fs_info)
3074 if (fs_info->log_root_tree) {
3075 free_log_tree(trans, fs_info->log_root_tree);
3076 fs_info->log_root_tree = NULL;
3082 * If both a file and directory are logged, and unlinks or renames are
3083 * mixed in, we have a few interesting corners:
3085 * create file X in dir Y
3086 * link file X to X.link in dir Y
3088 * unlink file X but leave X.link
3091 * After a crash we would expect only X.link to exist. But file X
3092 * didn't get fsync'd again so the log has back refs for X and X.link.
3094 * We solve this by removing directory entries and inode backrefs from the
3095 * log when a file that was logged in the current transaction is
3096 * unlinked. Any later fsync will include the updated log entries, and
3097 * we'll be able to reconstruct the proper directory items from backrefs.
3099 * This optimizations allows us to avoid relogging the entire inode
3100 * or the entire directory.
3102 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3103 struct btrfs_root *root,
3104 const char *name, int name_len,
3105 struct btrfs_inode *dir, u64 index)
3107 struct btrfs_root *log;
3108 struct btrfs_dir_item *di;
3109 struct btrfs_path *path;
3113 u64 dir_ino = btrfs_ino(dir);
3115 if (dir->logged_trans < trans->transid)
3118 ret = join_running_log_trans(root);
3122 mutex_lock(&dir->log_mutex);
3124 log = root->log_root;
3125 path = btrfs_alloc_path();
3131 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3132 name, name_len, -1);
3138 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3139 bytes_del += name_len;
3145 btrfs_release_path(path);
3146 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3147 index, name, name_len, -1);
3153 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3154 bytes_del += name_len;
3161 /* update the directory size in the log to reflect the names
3165 struct btrfs_key key;
3167 key.objectid = dir_ino;
3169 key.type = BTRFS_INODE_ITEM_KEY;
3170 btrfs_release_path(path);
3172 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3178 struct btrfs_inode_item *item;
3181 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3182 struct btrfs_inode_item);
3183 i_size = btrfs_inode_size(path->nodes[0], item);
3184 if (i_size > bytes_del)
3185 i_size -= bytes_del;
3188 btrfs_set_inode_size(path->nodes[0], item, i_size);
3189 btrfs_mark_buffer_dirty(path->nodes[0]);
3192 btrfs_release_path(path);
3195 btrfs_free_path(path);
3197 mutex_unlock(&dir->log_mutex);
3198 if (ret == -ENOSPC) {
3199 btrfs_set_log_full_commit(root->fs_info, trans);
3202 btrfs_abort_transaction(trans, ret);
3204 btrfs_end_log_trans(root);
3209 /* see comments for btrfs_del_dir_entries_in_log */
3210 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3211 struct btrfs_root *root,
3212 const char *name, int name_len,
3213 struct btrfs_inode *inode, u64 dirid)
3215 struct btrfs_fs_info *fs_info = root->fs_info;
3216 struct btrfs_root *log;
3220 if (inode->logged_trans < trans->transid)
3223 ret = join_running_log_trans(root);
3226 log = root->log_root;
3227 mutex_lock(&inode->log_mutex);
3229 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3231 mutex_unlock(&inode->log_mutex);
3232 if (ret == -ENOSPC) {
3233 btrfs_set_log_full_commit(fs_info, trans);
3235 } else if (ret < 0 && ret != -ENOENT)
3236 btrfs_abort_transaction(trans, ret);
3237 btrfs_end_log_trans(root);
3243 * creates a range item in the log for 'dirid'. first_offset and
3244 * last_offset tell us which parts of the key space the log should
3245 * be considered authoritative for.
3247 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3248 struct btrfs_root *log,
3249 struct btrfs_path *path,
3250 int key_type, u64 dirid,
3251 u64 first_offset, u64 last_offset)
3254 struct btrfs_key key;
3255 struct btrfs_dir_log_item *item;
3257 key.objectid = dirid;
3258 key.offset = first_offset;
3259 if (key_type == BTRFS_DIR_ITEM_KEY)
3260 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3262 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3263 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3267 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3268 struct btrfs_dir_log_item);
3269 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3270 btrfs_mark_buffer_dirty(path->nodes[0]);
3271 btrfs_release_path(path);
3276 * log all the items included in the current transaction for a given
3277 * directory. This also creates the range items in the log tree required
3278 * to replay anything deleted before the fsync
3280 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3281 struct btrfs_root *root, struct btrfs_inode *inode,
3282 struct btrfs_path *path,
3283 struct btrfs_path *dst_path, int key_type,
3284 struct btrfs_log_ctx *ctx,
3285 u64 min_offset, u64 *last_offset_ret)
3287 struct btrfs_key min_key;
3288 struct btrfs_root *log = root->log_root;
3289 struct extent_buffer *src;
3294 u64 first_offset = min_offset;
3295 u64 last_offset = (u64)-1;
3296 u64 ino = btrfs_ino(inode);
3298 log = root->log_root;
3300 min_key.objectid = ino;
3301 min_key.type = key_type;
3302 min_key.offset = min_offset;
3304 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3307 * we didn't find anything from this transaction, see if there
3308 * is anything at all
3310 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3311 min_key.objectid = ino;
3312 min_key.type = key_type;
3313 min_key.offset = (u64)-1;
3314 btrfs_release_path(path);
3315 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3317 btrfs_release_path(path);
3320 ret = btrfs_previous_item(root, path, ino, key_type);
3322 /* if ret == 0 there are items for this type,
3323 * create a range to tell us the last key of this type.
3324 * otherwise, there are no items in this directory after
3325 * *min_offset, and we create a range to indicate that.
3328 struct btrfs_key tmp;
3329 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3331 if (key_type == tmp.type)
3332 first_offset = max(min_offset, tmp.offset) + 1;
3337 /* go backward to find any previous key */
3338 ret = btrfs_previous_item(root, path, ino, key_type);
3340 struct btrfs_key tmp;
3341 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3342 if (key_type == tmp.type) {
3343 first_offset = tmp.offset;
3344 ret = overwrite_item(trans, log, dst_path,
3345 path->nodes[0], path->slots[0],
3353 btrfs_release_path(path);
3355 /* find the first key from this transaction again */
3356 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3357 if (WARN_ON(ret != 0))
3361 * we have a block from this transaction, log every item in it
3362 * from our directory
3365 struct btrfs_key tmp;
3366 src = path->nodes[0];
3367 nritems = btrfs_header_nritems(src);
3368 for (i = path->slots[0]; i < nritems; i++) {
3369 struct btrfs_dir_item *di;
3371 btrfs_item_key_to_cpu(src, &min_key, i);
3373 if (min_key.objectid != ino || min_key.type != key_type)
3375 ret = overwrite_item(trans, log, dst_path, src, i,
3383 * We must make sure that when we log a directory entry,
3384 * the corresponding inode, after log replay, has a
3385 * matching link count. For example:
3391 * xfs_io -c "fsync" mydir
3393 * <mount fs and log replay>
3395 * Would result in a fsync log that when replayed, our
3396 * file inode would have a link count of 1, but we get
3397 * two directory entries pointing to the same inode.
3398 * After removing one of the names, it would not be
3399 * possible to remove the other name, which resulted
3400 * always in stale file handle errors, and would not
3401 * be possible to rmdir the parent directory, since
3402 * its i_size could never decrement to the value
3403 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3405 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3406 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3408 (btrfs_dir_transid(src, di) == trans->transid ||
3409 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3410 tmp.type != BTRFS_ROOT_ITEM_KEY)
3411 ctx->log_new_dentries = true;
3413 path->slots[0] = nritems;
3416 * look ahead to the next item and see if it is also
3417 * from this directory and from this transaction
3419 ret = btrfs_next_leaf(root, path);
3421 last_offset = (u64)-1;
3424 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3425 if (tmp.objectid != ino || tmp.type != key_type) {
3426 last_offset = (u64)-1;
3429 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3430 ret = overwrite_item(trans, log, dst_path,
3431 path->nodes[0], path->slots[0],
3436 last_offset = tmp.offset;
3441 btrfs_release_path(path);
3442 btrfs_release_path(dst_path);
3445 *last_offset_ret = last_offset;
3447 * insert the log range keys to indicate where the log
3450 ret = insert_dir_log_key(trans, log, path, key_type,
3451 ino, first_offset, last_offset);
3459 * logging directories is very similar to logging inodes, We find all the items
3460 * from the current transaction and write them to the log.
3462 * The recovery code scans the directory in the subvolume, and if it finds a
3463 * key in the range logged that is not present in the log tree, then it means
3464 * that dir entry was unlinked during the transaction.
3466 * In order for that scan to work, we must include one key smaller than
3467 * the smallest logged by this transaction and one key larger than the largest
3468 * key logged by this transaction.
3470 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3471 struct btrfs_root *root, struct btrfs_inode *inode,
3472 struct btrfs_path *path,
3473 struct btrfs_path *dst_path,
3474 struct btrfs_log_ctx *ctx)
3479 int key_type = BTRFS_DIR_ITEM_KEY;
3485 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3486 ctx, min_key, &max_key);
3489 if (max_key == (u64)-1)
3491 min_key = max_key + 1;
3494 if (key_type == BTRFS_DIR_ITEM_KEY) {
3495 key_type = BTRFS_DIR_INDEX_KEY;
3502 * a helper function to drop items from the log before we relog an
3503 * inode. max_key_type indicates the highest item type to remove.
3504 * This cannot be run for file data extents because it does not
3505 * free the extents they point to.
3507 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3508 struct btrfs_root *log,
3509 struct btrfs_path *path,
3510 u64 objectid, int max_key_type)
3513 struct btrfs_key key;
3514 struct btrfs_key found_key;
3517 key.objectid = objectid;
3518 key.type = max_key_type;
3519 key.offset = (u64)-1;
3522 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3523 BUG_ON(ret == 0); /* Logic error */
3527 if (path->slots[0] == 0)
3531 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3534 if (found_key.objectid != objectid)
3537 found_key.offset = 0;
3539 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3542 ret = btrfs_del_items(trans, log, path, start_slot,
3543 path->slots[0] - start_slot + 1);
3545 * If start slot isn't 0 then we don't need to re-search, we've
3546 * found the last guy with the objectid in this tree.
3548 if (ret || start_slot != 0)
3550 btrfs_release_path(path);
3552 btrfs_release_path(path);
3558 static void fill_inode_item(struct btrfs_trans_handle *trans,
3559 struct extent_buffer *leaf,
3560 struct btrfs_inode_item *item,
3561 struct inode *inode, int log_inode_only,
3564 struct btrfs_map_token token;
3566 btrfs_init_map_token(&token);
3568 if (log_inode_only) {
3569 /* set the generation to zero so the recover code
3570 * can tell the difference between an logging
3571 * just to say 'this inode exists' and a logging
3572 * to say 'update this inode with these values'
3574 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3575 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3577 btrfs_set_token_inode_generation(leaf, item,
3578 BTRFS_I(inode)->generation,
3580 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3583 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3584 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3585 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3586 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3588 btrfs_set_token_timespec_sec(leaf, &item->atime,
3589 inode->i_atime.tv_sec, &token);
3590 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3591 inode->i_atime.tv_nsec, &token);
3593 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3594 inode->i_mtime.tv_sec, &token);
3595 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3596 inode->i_mtime.tv_nsec, &token);
3598 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3599 inode->i_ctime.tv_sec, &token);
3600 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3601 inode->i_ctime.tv_nsec, &token);
3603 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3606 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3607 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3608 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3609 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3610 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3613 static int log_inode_item(struct btrfs_trans_handle *trans,
3614 struct btrfs_root *log, struct btrfs_path *path,
3615 struct btrfs_inode *inode)
3617 struct btrfs_inode_item *inode_item;
3620 ret = btrfs_insert_empty_item(trans, log, path,
3621 &inode->location, sizeof(*inode_item));
3622 if (ret && ret != -EEXIST)
3624 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3625 struct btrfs_inode_item);
3626 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3628 btrfs_release_path(path);
3632 static noinline int copy_items(struct btrfs_trans_handle *trans,
3633 struct btrfs_inode *inode,
3634 struct btrfs_path *dst_path,
3635 struct btrfs_path *src_path, u64 *last_extent,
3636 int start_slot, int nr, int inode_only,
3639 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
3640 unsigned long src_offset;
3641 unsigned long dst_offset;
3642 struct btrfs_root *log = inode->root->log_root;
3643 struct btrfs_file_extent_item *extent;
3644 struct btrfs_inode_item *inode_item;
3645 struct extent_buffer *src = src_path->nodes[0];
3646 struct btrfs_key first_key, last_key, key;
3648 struct btrfs_key *ins_keys;
3652 struct list_head ordered_sums;
3653 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3654 bool has_extents = false;
3655 bool need_find_last_extent = true;
3658 INIT_LIST_HEAD(&ordered_sums);
3660 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3661 nr * sizeof(u32), GFP_NOFS);
3665 first_key.objectid = (u64)-1;
3667 ins_sizes = (u32 *)ins_data;
3668 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3670 for (i = 0; i < nr; i++) {
3671 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3672 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3674 ret = btrfs_insert_empty_items(trans, log, dst_path,
3675 ins_keys, ins_sizes, nr);
3681 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3682 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3683 dst_path->slots[0]);
3685 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3687 if ((i == (nr - 1)))
3688 last_key = ins_keys[i];
3690 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3691 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3693 struct btrfs_inode_item);
3694 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3696 inode_only == LOG_INODE_EXISTS,
3699 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3700 src_offset, ins_sizes[i]);
3704 * We set need_find_last_extent here in case we know we were
3705 * processing other items and then walk into the first extent in
3706 * the inode. If we don't hit an extent then nothing changes,
3707 * we'll do the last search the next time around.
3709 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3711 if (first_key.objectid == (u64)-1)
3712 first_key = ins_keys[i];
3714 need_find_last_extent = false;
3717 /* take a reference on file data extents so that truncates
3718 * or deletes of this inode don't have to relog the inode
3721 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3724 extent = btrfs_item_ptr(src, start_slot + i,
3725 struct btrfs_file_extent_item);
3727 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3730 found_type = btrfs_file_extent_type(src, extent);
3731 if (found_type == BTRFS_FILE_EXTENT_REG) {
3733 ds = btrfs_file_extent_disk_bytenr(src,
3735 /* ds == 0 is a hole */
3739 dl = btrfs_file_extent_disk_num_bytes(src,
3741 cs = btrfs_file_extent_offset(src, extent);
3742 cl = btrfs_file_extent_num_bytes(src,
3744 if (btrfs_file_extent_compression(src,
3750 ret = btrfs_lookup_csums_range(
3752 ds + cs, ds + cs + cl - 1,
3755 btrfs_release_path(dst_path);
3763 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3764 btrfs_release_path(dst_path);
3768 * we have to do this after the loop above to avoid changing the
3769 * log tree while trying to change the log tree.
3772 while (!list_empty(&ordered_sums)) {
3773 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3774 struct btrfs_ordered_sum,
3777 ret = btrfs_csum_file_blocks(trans, log, sums);
3778 list_del(&sums->list);
3785 if (need_find_last_extent && *last_extent == first_key.offset) {
3787 * We don't have any leafs between our current one and the one
3788 * we processed before that can have file extent items for our
3789 * inode (and have a generation number smaller than our current
3792 need_find_last_extent = false;
3796 * Because we use btrfs_search_forward we could skip leaves that were
3797 * not modified and then assume *last_extent is valid when it really
3798 * isn't. So back up to the previous leaf and read the end of the last
3799 * extent before we go and fill in holes.
3801 if (need_find_last_extent) {
3804 ret = btrfs_prev_leaf(inode->root, src_path);
3809 if (src_path->slots[0])
3810 src_path->slots[0]--;
3811 src = src_path->nodes[0];
3812 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3813 if (key.objectid != btrfs_ino(inode) ||
3814 key.type != BTRFS_EXTENT_DATA_KEY)
3816 extent = btrfs_item_ptr(src, src_path->slots[0],
3817 struct btrfs_file_extent_item);
3818 if (btrfs_file_extent_type(src, extent) ==
3819 BTRFS_FILE_EXTENT_INLINE) {
3820 len = btrfs_file_extent_inline_len(src,
3823 *last_extent = ALIGN(key.offset + len,
3824 fs_info->sectorsize);
3826 len = btrfs_file_extent_num_bytes(src, extent);
3827 *last_extent = key.offset + len;
3831 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3832 * things could have happened
3834 * 1) A merge could have happened, so we could currently be on a leaf
3835 * that holds what we were copying in the first place.
3836 * 2) A split could have happened, and now not all of the items we want
3837 * are on the same leaf.
3839 * So we need to adjust how we search for holes, we need to drop the
3840 * path and re-search for the first extent key we found, and then walk
3841 * forward until we hit the last one we copied.
3843 if (need_find_last_extent) {
3844 /* btrfs_prev_leaf could return 1 without releasing the path */
3845 btrfs_release_path(src_path);
3846 ret = btrfs_search_slot(NULL, inode->root, &first_key,
3851 src = src_path->nodes[0];
3852 i = src_path->slots[0];
3858 * Ok so here we need to go through and fill in any holes we may have
3859 * to make sure that holes are punched for those areas in case they had
3860 * extents previously.
3866 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3867 ret = btrfs_next_leaf(inode->root, src_path);
3871 src = src_path->nodes[0];
3875 btrfs_item_key_to_cpu(src, &key, i);
3876 if (!btrfs_comp_cpu_keys(&key, &last_key))
3878 if (key.objectid != btrfs_ino(inode) ||
3879 key.type != BTRFS_EXTENT_DATA_KEY) {
3883 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3884 if (btrfs_file_extent_type(src, extent) ==
3885 BTRFS_FILE_EXTENT_INLINE) {
3886 len = btrfs_file_extent_inline_len(src, i, extent);
3887 extent_end = ALIGN(key.offset + len,
3888 fs_info->sectorsize);
3890 len = btrfs_file_extent_num_bytes(src, extent);
3891 extent_end = key.offset + len;
3895 if (*last_extent == key.offset) {
3896 *last_extent = extent_end;
3899 offset = *last_extent;
3900 len = key.offset - *last_extent;
3901 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3902 offset, 0, 0, len, 0, len, 0, 0, 0);
3905 *last_extent = extent_end;
3908 * Need to let the callers know we dropped the path so they should
3911 if (!ret && need_find_last_extent)
3916 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3918 struct extent_map *em1, *em2;
3920 em1 = list_entry(a, struct extent_map, list);
3921 em2 = list_entry(b, struct extent_map, list);
3923 if (em1->start < em2->start)
3925 else if (em1->start > em2->start)
3930 static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3931 struct inode *inode,
3932 struct btrfs_root *root,
3933 const struct extent_map *em,
3934 const struct list_head *logged_list,
3935 bool *ordered_io_error)
3937 struct btrfs_fs_info *fs_info = root->fs_info;
3938 struct btrfs_ordered_extent *ordered;
3939 struct btrfs_root *log = root->log_root;
3940 u64 mod_start = em->mod_start;
3941 u64 mod_len = em->mod_len;
3942 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3945 LIST_HEAD(ordered_sums);
3948 *ordered_io_error = false;
3950 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3951 em->block_start == EXTENT_MAP_HOLE)
3955 * Wait far any ordered extent that covers our extent map. If it
3956 * finishes without an error, first check and see if our csums are on
3957 * our outstanding ordered extents.
3959 list_for_each_entry(ordered, logged_list, log_list) {
3960 struct btrfs_ordered_sum *sum;
3965 if (ordered->file_offset + ordered->len <= mod_start ||
3966 mod_start + mod_len <= ordered->file_offset)
3969 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3970 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3971 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3972 const u64 start = ordered->file_offset;
3973 const u64 end = ordered->file_offset + ordered->len - 1;
3975 WARN_ON(ordered->inode != inode);
3976 filemap_fdatawrite_range(inode->i_mapping, start, end);
3979 wait_event(ordered->wait,
3980 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3981 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3983 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3985 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3986 * i_mapping flags, so that the next fsync won't get
3987 * an outdated io error too.
3989 filemap_check_errors(inode->i_mapping);
3990 *ordered_io_error = true;
3994 * We are going to copy all the csums on this ordered extent, so
3995 * go ahead and adjust mod_start and mod_len in case this
3996 * ordered extent has already been logged.
3998 if (ordered->file_offset > mod_start) {
3999 if (ordered->file_offset + ordered->len >=
4000 mod_start + mod_len)
4001 mod_len = ordered->file_offset - mod_start;
4003 * If we have this case
4005 * |--------- logged extent ---------|
4006 * |----- ordered extent ----|
4008 * Just don't mess with mod_start and mod_len, we'll
4009 * just end up logging more csums than we need and it
4013 if (ordered->file_offset + ordered->len <
4014 mod_start + mod_len) {
4015 mod_len = (mod_start + mod_len) -
4016 (ordered->file_offset + ordered->len);
4017 mod_start = ordered->file_offset +
4028 * To keep us from looping for the above case of an ordered
4029 * extent that falls inside of the logged extent.
4031 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
4035 list_for_each_entry(sum, &ordered->list, list) {
4036 ret = btrfs_csum_file_blocks(trans, log, sum);
4042 if (*ordered_io_error || !mod_len || ret || skip_csum)
4045 if (em->compress_type) {
4047 csum_len = max(em->block_len, em->orig_block_len);
4049 csum_offset = mod_start - em->start;
4053 /* block start is already adjusted for the file extent offset. */
4054 ret = btrfs_lookup_csums_range(fs_info->csum_root,
4055 em->block_start + csum_offset,
4056 em->block_start + csum_offset +
4057 csum_len - 1, &ordered_sums, 0);
4061 while (!list_empty(&ordered_sums)) {
4062 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4063 struct btrfs_ordered_sum,
4066 ret = btrfs_csum_file_blocks(trans, log, sums);
4067 list_del(&sums->list);
4074 static int log_one_extent(struct btrfs_trans_handle *trans,
4075 struct btrfs_inode *inode, struct btrfs_root *root,
4076 const struct extent_map *em,
4077 struct btrfs_path *path,
4078 const struct list_head *logged_list,
4079 struct btrfs_log_ctx *ctx)
4081 struct btrfs_root *log = root->log_root;
4082 struct btrfs_file_extent_item *fi;
4083 struct extent_buffer *leaf;
4084 struct btrfs_map_token token;
4085 struct btrfs_key key;
4086 u64 extent_offset = em->start - em->orig_start;
4089 int extent_inserted = 0;
4090 bool ordered_io_err = false;
4092 ret = wait_ordered_extents(trans, &inode->vfs_inode, root, em,
4093 logged_list, &ordered_io_err);
4097 if (ordered_io_err) {
4102 btrfs_init_map_token(&token);
4104 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4105 em->start + em->len, NULL, 0, 1,
4106 sizeof(*fi), &extent_inserted);
4110 if (!extent_inserted) {
4111 key.objectid = btrfs_ino(inode);
4112 key.type = BTRFS_EXTENT_DATA_KEY;
4113 key.offset = em->start;
4115 ret = btrfs_insert_empty_item(trans, log, path, &key,
4120 leaf = path->nodes[0];
4121 fi = btrfs_item_ptr(leaf, path->slots[0],
4122 struct btrfs_file_extent_item);
4124 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4126 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4127 btrfs_set_token_file_extent_type(leaf, fi,
4128 BTRFS_FILE_EXTENT_PREALLOC,
4131 btrfs_set_token_file_extent_type(leaf, fi,
4132 BTRFS_FILE_EXTENT_REG,
4135 block_len = max(em->block_len, em->orig_block_len);
4136 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4137 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4140 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4142 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4143 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4145 extent_offset, &token);
4146 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4149 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4150 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4154 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4155 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4156 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4157 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4159 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4160 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4161 btrfs_mark_buffer_dirty(leaf);
4163 btrfs_release_path(path);
4168 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4169 struct btrfs_root *root,
4170 struct btrfs_inode *inode,
4171 struct btrfs_path *path,
4172 struct list_head *logged_list,
4173 struct btrfs_log_ctx *ctx,
4177 struct extent_map *em, *n;
4178 struct list_head extents;
4179 struct extent_map_tree *tree = &inode->extent_tree;
4184 INIT_LIST_HEAD(&extents);
4186 down_write(&inode->dio_sem);
4187 write_lock(&tree->lock);
4188 test_gen = root->fs_info->last_trans_committed;
4190 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4191 list_del_init(&em->list);
4194 * Just an arbitrary number, this can be really CPU intensive
4195 * once we start getting a lot of extents, and really once we
4196 * have a bunch of extents we just want to commit since it will
4199 if (++num > 32768) {
4200 list_del_init(&tree->modified_extents);
4205 if (em->generation <= test_gen)
4207 /* Need a ref to keep it from getting evicted from cache */
4208 refcount_inc(&em->refs);
4209 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4210 list_add_tail(&em->list, &extents);
4214 list_sort(NULL, &extents, extent_cmp);
4215 btrfs_get_logged_extents(inode, logged_list, start, end);
4217 * Some ordered extents started by fsync might have completed
4218 * before we could collect them into the list logged_list, which
4219 * means they're gone, not in our logged_list nor in the inode's
4220 * ordered tree. We want the application/user space to know an
4221 * error happened while attempting to persist file data so that
4222 * it can take proper action. If such error happened, we leave
4223 * without writing to the log tree and the fsync must report the
4224 * file data write error and not commit the current transaction.
4226 ret = filemap_check_errors(inode->vfs_inode.i_mapping);
4230 while (!list_empty(&extents)) {
4231 em = list_entry(extents.next, struct extent_map, list);
4233 list_del_init(&em->list);
4236 * If we had an error we just need to delete everybody from our
4240 clear_em_logging(tree, em);
4241 free_extent_map(em);
4245 write_unlock(&tree->lock);
4247 ret = log_one_extent(trans, inode, root, em, path, logged_list,
4249 write_lock(&tree->lock);
4250 clear_em_logging(tree, em);
4251 free_extent_map(em);
4253 WARN_ON(!list_empty(&extents));
4254 write_unlock(&tree->lock);
4255 up_write(&inode->dio_sem);
4257 btrfs_release_path(path);
4261 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4262 struct btrfs_path *path, u64 *size_ret)
4264 struct btrfs_key key;
4267 key.objectid = btrfs_ino(inode);
4268 key.type = BTRFS_INODE_ITEM_KEY;
4271 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4274 } else if (ret > 0) {
4277 struct btrfs_inode_item *item;
4279 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4280 struct btrfs_inode_item);
4281 *size_ret = btrfs_inode_size(path->nodes[0], item);
4284 btrfs_release_path(path);
4289 * At the moment we always log all xattrs. This is to figure out at log replay
4290 * time which xattrs must have their deletion replayed. If a xattr is missing
4291 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4292 * because if a xattr is deleted, the inode is fsynced and a power failure
4293 * happens, causing the log to be replayed the next time the fs is mounted,
4294 * we want the xattr to not exist anymore (same behaviour as other filesystems
4295 * with a journal, ext3/4, xfs, f2fs, etc).
4297 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4298 struct btrfs_root *root,
4299 struct btrfs_inode *inode,
4300 struct btrfs_path *path,
4301 struct btrfs_path *dst_path)
4304 struct btrfs_key key;
4305 const u64 ino = btrfs_ino(inode);
4310 key.type = BTRFS_XATTR_ITEM_KEY;
4313 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4318 int slot = path->slots[0];
4319 struct extent_buffer *leaf = path->nodes[0];
4320 int nritems = btrfs_header_nritems(leaf);
4322 if (slot >= nritems) {
4324 u64 last_extent = 0;
4326 ret = copy_items(trans, inode, dst_path, path,
4327 &last_extent, start_slot,
4329 /* can't be 1, extent items aren't processed */
4335 ret = btrfs_next_leaf(root, path);
4343 btrfs_item_key_to_cpu(leaf, &key, slot);
4344 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4354 u64 last_extent = 0;
4356 ret = copy_items(trans, inode, dst_path, path,
4357 &last_extent, start_slot,
4359 /* can't be 1, extent items aren't processed */
4369 * If the no holes feature is enabled we need to make sure any hole between the
4370 * last extent and the i_size of our inode is explicitly marked in the log. This
4371 * is to make sure that doing something like:
4373 * 1) create file with 128Kb of data
4374 * 2) truncate file to 64Kb
4375 * 3) truncate file to 256Kb
4377 * 5) <crash/power failure>
4378 * 6) mount fs and trigger log replay
4380 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4381 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4382 * file correspond to a hole. The presence of explicit holes in a log tree is
4383 * what guarantees that log replay will remove/adjust file extent items in the
4386 * Here we do not need to care about holes between extents, that is already done
4387 * by copy_items(). We also only need to do this in the full sync path, where we
4388 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4389 * lookup the list of modified extent maps and if any represents a hole, we
4390 * insert a corresponding extent representing a hole in the log tree.
4392 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4393 struct btrfs_root *root,
4394 struct btrfs_inode *inode,
4395 struct btrfs_path *path)
4397 struct btrfs_fs_info *fs_info = root->fs_info;
4399 struct btrfs_key key;
4402 struct extent_buffer *leaf;
4403 struct btrfs_root *log = root->log_root;
4404 const u64 ino = btrfs_ino(inode);
4405 const u64 i_size = i_size_read(&inode->vfs_inode);
4407 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4411 key.type = BTRFS_EXTENT_DATA_KEY;
4412 key.offset = (u64)-1;
4414 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4419 ASSERT(path->slots[0] > 0);
4421 leaf = path->nodes[0];
4422 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4424 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4425 /* inode does not have any extents */
4429 struct btrfs_file_extent_item *extent;
4433 * If there's an extent beyond i_size, an explicit hole was
4434 * already inserted by copy_items().
4436 if (key.offset >= i_size)
4439 extent = btrfs_item_ptr(leaf, path->slots[0],
4440 struct btrfs_file_extent_item);
4442 if (btrfs_file_extent_type(leaf, extent) ==
4443 BTRFS_FILE_EXTENT_INLINE) {
4444 len = btrfs_file_extent_inline_len(leaf,
4447 ASSERT(len == i_size);
4451 len = btrfs_file_extent_num_bytes(leaf, extent);
4452 /* Last extent goes beyond i_size, no need to log a hole. */
4453 if (key.offset + len > i_size)
4455 hole_start = key.offset + len;
4456 hole_size = i_size - hole_start;
4458 btrfs_release_path(path);
4460 /* Last extent ends at i_size. */
4464 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4465 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4466 hole_size, 0, hole_size, 0, 0, 0);
4471 * When we are logging a new inode X, check if it doesn't have a reference that
4472 * matches the reference from some other inode Y created in a past transaction
4473 * and that was renamed in the current transaction. If we don't do this, then at
4474 * log replay time we can lose inode Y (and all its files if it's a directory):
4477 * echo "hello world" > /mnt/x/foobar
4480 * mkdir /mnt/x # or touch /mnt/x
4481 * xfs_io -c fsync /mnt/x
4483 * mount fs, trigger log replay
4485 * After the log replay procedure, we would lose the first directory and all its
4486 * files (file foobar).
4487 * For the case where inode Y is not a directory we simply end up losing it:
4489 * echo "123" > /mnt/foo
4491 * mv /mnt/foo /mnt/bar
4492 * echo "abc" > /mnt/foo
4493 * xfs_io -c fsync /mnt/foo
4496 * We also need this for cases where a snapshot entry is replaced by some other
4497 * entry (file or directory) otherwise we end up with an unreplayable log due to
4498 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4499 * if it were a regular entry:
4502 * btrfs subvolume snapshot /mnt /mnt/x/snap
4503 * btrfs subvolume delete /mnt/x/snap
4506 * fsync /mnt/x or fsync some new file inside it
4509 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4510 * the same transaction.
4512 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4514 const struct btrfs_key *key,
4515 struct btrfs_inode *inode,
4519 struct btrfs_path *search_path;
4522 u32 item_size = btrfs_item_size_nr(eb, slot);
4524 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4526 search_path = btrfs_alloc_path();
4529 search_path->search_commit_root = 1;
4530 search_path->skip_locking = 1;
4532 while (cur_offset < item_size) {
4536 unsigned long name_ptr;
4537 struct btrfs_dir_item *di;
4539 if (key->type == BTRFS_INODE_REF_KEY) {
4540 struct btrfs_inode_ref *iref;
4542 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4543 parent = key->offset;
4544 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4545 name_ptr = (unsigned long)(iref + 1);
4546 this_len = sizeof(*iref) + this_name_len;
4548 struct btrfs_inode_extref *extref;
4550 extref = (struct btrfs_inode_extref *)(ptr +
4552 parent = btrfs_inode_extref_parent(eb, extref);
4553 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4554 name_ptr = (unsigned long)&extref->name;
4555 this_len = sizeof(*extref) + this_name_len;
4558 if (this_name_len > name_len) {
4561 new_name = krealloc(name, this_name_len, GFP_NOFS);
4566 name_len = this_name_len;
4570 read_extent_buffer(eb, name, name_ptr, this_name_len);
4571 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4572 parent, name, this_name_len, 0);
4573 if (di && !IS_ERR(di)) {
4574 struct btrfs_key di_key;
4576 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4578 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4580 *other_ino = di_key.objectid;
4585 } else if (IS_ERR(di)) {
4589 btrfs_release_path(search_path);
4591 cur_offset += this_len;
4595 btrfs_free_path(search_path);
4600 /* log a single inode in the tree log.
4601 * At least one parent directory for this inode must exist in the tree
4602 * or be logged already.
4604 * Any items from this inode changed by the current transaction are copied
4605 * to the log tree. An extra reference is taken on any extents in this
4606 * file, allowing us to avoid a whole pile of corner cases around logging
4607 * blocks that have been removed from the tree.
4609 * See LOG_INODE_ALL and related defines for a description of what inode_only
4612 * This handles both files and directories.
4614 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4615 struct btrfs_root *root, struct btrfs_inode *inode,
4619 struct btrfs_log_ctx *ctx)
4621 struct btrfs_fs_info *fs_info = root->fs_info;
4622 struct btrfs_path *path;
4623 struct btrfs_path *dst_path;
4624 struct btrfs_key min_key;
4625 struct btrfs_key max_key;
4626 struct btrfs_root *log = root->log_root;
4627 struct extent_buffer *src = NULL;
4628 LIST_HEAD(logged_list);
4629 u64 last_extent = 0;
4633 int ins_start_slot = 0;
4635 bool fast_search = false;
4636 u64 ino = btrfs_ino(inode);
4637 struct extent_map_tree *em_tree = &inode->extent_tree;
4638 u64 logged_isize = 0;
4639 bool need_log_inode_item = true;
4641 path = btrfs_alloc_path();
4644 dst_path = btrfs_alloc_path();
4646 btrfs_free_path(path);
4650 min_key.objectid = ino;
4651 min_key.type = BTRFS_INODE_ITEM_KEY;
4654 max_key.objectid = ino;
4657 /* today the code can only do partial logging of directories */
4658 if (S_ISDIR(inode->vfs_inode.i_mode) ||
4659 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4660 &inode->runtime_flags) &&
4661 inode_only >= LOG_INODE_EXISTS))
4662 max_key.type = BTRFS_XATTR_ITEM_KEY;
4664 max_key.type = (u8)-1;
4665 max_key.offset = (u64)-1;
4668 * Only run delayed items if we are a dir or a new file.
4669 * Otherwise commit the delayed inode only, which is needed in
4670 * order for the log replay code to mark inodes for link count
4671 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4673 if (S_ISDIR(inode->vfs_inode.i_mode) ||
4674 inode->generation > fs_info->last_trans_committed)
4675 ret = btrfs_commit_inode_delayed_items(trans, inode);
4677 ret = btrfs_commit_inode_delayed_inode(inode);
4680 btrfs_free_path(path);
4681 btrfs_free_path(dst_path);
4685 if (inode_only == LOG_OTHER_INODE) {
4686 inode_only = LOG_INODE_EXISTS;
4687 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
4689 mutex_lock(&inode->log_mutex);
4693 * a brute force approach to making sure we get the most uptodate
4694 * copies of everything.
4696 if (S_ISDIR(inode->vfs_inode.i_mode)) {
4697 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4699 if (inode_only == LOG_INODE_EXISTS)
4700 max_key_type = BTRFS_XATTR_ITEM_KEY;
4701 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4703 if (inode_only == LOG_INODE_EXISTS) {
4705 * Make sure the new inode item we write to the log has
4706 * the same isize as the current one (if it exists).
4707 * This is necessary to prevent data loss after log
4708 * replay, and also to prevent doing a wrong expanding
4709 * truncate - for e.g. create file, write 4K into offset
4710 * 0, fsync, write 4K into offset 4096, add hard link,
4711 * fsync some other file (to sync log), power fail - if
4712 * we use the inode's current i_size, after log replay
4713 * we get a 8Kb file, with the last 4Kb extent as a hole
4714 * (zeroes), as if an expanding truncate happened,
4715 * instead of getting a file of 4Kb only.
4717 err = logged_inode_size(log, inode, path, &logged_isize);
4721 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4722 &inode->runtime_flags)) {
4723 if (inode_only == LOG_INODE_EXISTS) {
4724 max_key.type = BTRFS_XATTR_ITEM_KEY;
4725 ret = drop_objectid_items(trans, log, path, ino,
4728 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4729 &inode->runtime_flags);
4730 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4731 &inode->runtime_flags);
4733 ret = btrfs_truncate_inode_items(trans,
4734 log, &inode->vfs_inode, 0, 0);
4739 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4740 &inode->runtime_flags) ||
4741 inode_only == LOG_INODE_EXISTS) {
4742 if (inode_only == LOG_INODE_ALL)
4744 max_key.type = BTRFS_XATTR_ITEM_KEY;
4745 ret = drop_objectid_items(trans, log, path, ino,
4748 if (inode_only == LOG_INODE_ALL)
4761 ret = btrfs_search_forward(root, &min_key,
4762 path, trans->transid);
4770 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4771 if (min_key.objectid != ino)
4773 if (min_key.type > max_key.type)
4776 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4777 need_log_inode_item = false;
4779 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4780 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4781 inode->generation == trans->transid) {
4784 ret = btrfs_check_ref_name_override(path->nodes[0],
4785 path->slots[0], &min_key, inode,
4790 } else if (ret > 0 && ctx &&
4791 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
4792 struct btrfs_key inode_key;
4793 struct inode *other_inode;
4799 ins_start_slot = path->slots[0];
4801 ret = copy_items(trans, inode, dst_path, path,
4802 &last_extent, ins_start_slot,
4810 btrfs_release_path(path);
4811 inode_key.objectid = other_ino;
4812 inode_key.type = BTRFS_INODE_ITEM_KEY;
4813 inode_key.offset = 0;
4814 other_inode = btrfs_iget(fs_info->sb,
4818 * If the other inode that had a conflicting dir
4819 * entry was deleted in the current transaction,
4820 * we don't need to do more work nor fallback to
4821 * a transaction commit.
4823 if (IS_ERR(other_inode) &&
4824 PTR_ERR(other_inode) == -ENOENT) {
4826 } else if (IS_ERR(other_inode)) {
4827 err = PTR_ERR(other_inode);
4831 * We are safe logging the other inode without
4832 * acquiring its i_mutex as long as we log with
4833 * the LOG_INODE_EXISTS mode. We're safe against
4834 * concurrent renames of the other inode as well
4835 * because during a rename we pin the log and
4836 * update the log with the new name before we
4839 err = btrfs_log_inode(trans, root,
4840 BTRFS_I(other_inode),
4841 LOG_OTHER_INODE, 0, LLONG_MAX,
4851 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4852 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4855 ret = copy_items(trans, inode, dst_path, path,
4856 &last_extent, ins_start_slot,
4857 ins_nr, inode_only, logged_isize);
4864 btrfs_release_path(path);
4870 src = path->nodes[0];
4871 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4874 } else if (!ins_nr) {
4875 ins_start_slot = path->slots[0];
4880 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4881 ins_start_slot, ins_nr, inode_only,
4889 btrfs_release_path(path);
4893 ins_start_slot = path->slots[0];
4896 nritems = btrfs_header_nritems(path->nodes[0]);
4898 if (path->slots[0] < nritems) {
4899 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4904 ret = copy_items(trans, inode, dst_path, path,
4905 &last_extent, ins_start_slot,
4906 ins_nr, inode_only, logged_isize);
4914 btrfs_release_path(path);
4916 if (min_key.offset < (u64)-1) {
4918 } else if (min_key.type < max_key.type) {
4926 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4927 ins_start_slot, ins_nr, inode_only,
4937 btrfs_release_path(path);
4938 btrfs_release_path(dst_path);
4939 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4942 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4943 btrfs_release_path(path);
4944 btrfs_release_path(dst_path);
4945 err = btrfs_log_trailing_hole(trans, root, inode, path);
4950 btrfs_release_path(path);
4951 btrfs_release_path(dst_path);
4952 if (need_log_inode_item) {
4953 err = log_inode_item(trans, log, dst_path, inode);
4958 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4959 &logged_list, ctx, start, end);
4964 } else if (inode_only == LOG_INODE_ALL) {
4965 struct extent_map *em, *n;
4967 write_lock(&em_tree->lock);
4969 * We can't just remove every em if we're called for a ranged
4970 * fsync - that is, one that doesn't cover the whole possible
4971 * file range (0 to LLONG_MAX). This is because we can have
4972 * em's that fall outside the range we're logging and therefore
4973 * their ordered operations haven't completed yet
4974 * (btrfs_finish_ordered_io() not invoked yet). This means we
4975 * didn't get their respective file extent item in the fs/subvol
4976 * tree yet, and need to let the next fast fsync (one which
4977 * consults the list of modified extent maps) find the em so
4978 * that it logs a matching file extent item and waits for the
4979 * respective ordered operation to complete (if it's still
4982 * Removing every em outside the range we're logging would make
4983 * the next fast fsync not log their matching file extent items,
4984 * therefore making us lose data after a log replay.
4986 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4988 const u64 mod_end = em->mod_start + em->mod_len - 1;
4990 if (em->mod_start >= start && mod_end <= end)
4991 list_del_init(&em->list);
4993 write_unlock(&em_tree->lock);
4996 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
4997 ret = log_directory_changes(trans, root, inode, path, dst_path,
5005 spin_lock(&inode->lock);
5006 inode->logged_trans = trans->transid;
5007 inode->last_log_commit = inode->last_sub_trans;
5008 spin_unlock(&inode->lock);
5011 btrfs_put_logged_extents(&logged_list);
5013 btrfs_submit_logged_extents(&logged_list, log);
5014 mutex_unlock(&inode->log_mutex);
5016 btrfs_free_path(path);
5017 btrfs_free_path(dst_path);
5022 * Check if we must fallback to a transaction commit when logging an inode.
5023 * This must be called after logging the inode and is used only in the context
5024 * when fsyncing an inode requires the need to log some other inode - in which
5025 * case we can't lock the i_mutex of each other inode we need to log as that
5026 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5027 * log inodes up or down in the hierarchy) or rename operations for example. So
5028 * we take the log_mutex of the inode after we have logged it and then check for
5029 * its last_unlink_trans value - this is safe because any task setting
5030 * last_unlink_trans must take the log_mutex and it must do this before it does
5031 * the actual unlink operation, so if we do this check before a concurrent task
5032 * sets last_unlink_trans it means we've logged a consistent version/state of
5033 * all the inode items, otherwise we are not sure and must do a transaction
5034 * commit (the concurrent task might have only updated last_unlink_trans before
5035 * we logged the inode or it might have also done the unlink).
5037 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5038 struct btrfs_inode *inode)
5040 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5043 mutex_lock(&inode->log_mutex);
5044 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5046 * Make sure any commits to the log are forced to be full
5049 btrfs_set_log_full_commit(fs_info, trans);
5052 mutex_unlock(&inode->log_mutex);
5058 * follow the dentry parent pointers up the chain and see if any
5059 * of the directories in it require a full commit before they can
5060 * be logged. Returns zero if nothing special needs to be done or 1 if
5061 * a full commit is required.
5063 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5064 struct btrfs_inode *inode,
5065 struct dentry *parent,
5066 struct super_block *sb,
5070 struct dentry *old_parent = NULL;
5071 struct btrfs_inode *orig_inode = inode;
5074 * for regular files, if its inode is already on disk, we don't
5075 * have to worry about the parents at all. This is because
5076 * we can use the last_unlink_trans field to record renames
5077 * and other fun in this file.
5079 if (S_ISREG(inode->vfs_inode.i_mode) &&
5080 inode->generation <= last_committed &&
5081 inode->last_unlink_trans <= last_committed)
5084 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5085 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5087 inode = BTRFS_I(d_inode(parent));
5092 * If we are logging a directory then we start with our inode,
5093 * not our parent's inode, so we need to skip setting the
5094 * logged_trans so that further down in the log code we don't
5095 * think this inode has already been logged.
5097 if (inode != orig_inode)
5098 inode->logged_trans = trans->transid;
5101 if (btrfs_must_commit_transaction(trans, inode)) {
5106 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5109 if (IS_ROOT(parent)) {
5110 inode = BTRFS_I(d_inode(parent));
5111 if (btrfs_must_commit_transaction(trans, inode))
5116 parent = dget_parent(parent);
5118 old_parent = parent;
5119 inode = BTRFS_I(d_inode(parent));
5127 struct btrfs_dir_list {
5129 struct list_head list;
5133 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5134 * details about the why it is needed.
5135 * This is a recursive operation - if an existing dentry corresponds to a
5136 * directory, that directory's new entries are logged too (same behaviour as
5137 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5138 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5139 * complains about the following circular lock dependency / possible deadlock:
5143 * lock(&type->i_mutex_dir_key#3/2);
5144 * lock(sb_internal#2);
5145 * lock(&type->i_mutex_dir_key#3/2);
5146 * lock(&sb->s_type->i_mutex_key#14);
5148 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5149 * sb_start_intwrite() in btrfs_start_transaction().
5150 * Not locking i_mutex of the inodes is still safe because:
5152 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5153 * that while logging the inode new references (names) are added or removed
5154 * from the inode, leaving the logged inode item with a link count that does
5155 * not match the number of logged inode reference items. This is fine because
5156 * at log replay time we compute the real number of links and correct the
5157 * link count in the inode item (see replay_one_buffer() and
5158 * link_to_fixup_dir());
5160 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5161 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5162 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5163 * has a size that doesn't match the sum of the lengths of all the logged
5164 * names. This does not result in a problem because if a dir_item key is
5165 * logged but its matching dir_index key is not logged, at log replay time we
5166 * don't use it to replay the respective name (see replay_one_name()). On the
5167 * other hand if only the dir_index key ends up being logged, the respective
5168 * name is added to the fs/subvol tree with both the dir_item and dir_index
5169 * keys created (see replay_one_name()).
5170 * The directory's inode item with a wrong i_size is not a problem as well,
5171 * since we don't use it at log replay time to set the i_size in the inode
5172 * item of the fs/subvol tree (see overwrite_item()).
5174 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5175 struct btrfs_root *root,
5176 struct btrfs_inode *start_inode,
5177 struct btrfs_log_ctx *ctx)
5179 struct btrfs_fs_info *fs_info = root->fs_info;
5180 struct btrfs_root *log = root->log_root;
5181 struct btrfs_path *path;
5182 LIST_HEAD(dir_list);
5183 struct btrfs_dir_list *dir_elem;
5186 path = btrfs_alloc_path();
5190 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5192 btrfs_free_path(path);
5195 dir_elem->ino = btrfs_ino(start_inode);
5196 list_add_tail(&dir_elem->list, &dir_list);
5198 while (!list_empty(&dir_list)) {
5199 struct extent_buffer *leaf;
5200 struct btrfs_key min_key;
5204 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5207 goto next_dir_inode;
5209 min_key.objectid = dir_elem->ino;
5210 min_key.type = BTRFS_DIR_ITEM_KEY;
5213 btrfs_release_path(path);
5214 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5216 goto next_dir_inode;
5217 } else if (ret > 0) {
5219 goto next_dir_inode;
5223 leaf = path->nodes[0];
5224 nritems = btrfs_header_nritems(leaf);
5225 for (i = path->slots[0]; i < nritems; i++) {
5226 struct btrfs_dir_item *di;
5227 struct btrfs_key di_key;
5228 struct inode *di_inode;
5229 struct btrfs_dir_list *new_dir_elem;
5230 int log_mode = LOG_INODE_EXISTS;
5233 btrfs_item_key_to_cpu(leaf, &min_key, i);
5234 if (min_key.objectid != dir_elem->ino ||
5235 min_key.type != BTRFS_DIR_ITEM_KEY)
5236 goto next_dir_inode;
5238 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5239 type = btrfs_dir_type(leaf, di);
5240 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5241 type != BTRFS_FT_DIR)
5243 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5244 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5247 btrfs_release_path(path);
5248 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5249 if (IS_ERR(di_inode)) {
5250 ret = PTR_ERR(di_inode);
5251 goto next_dir_inode;
5254 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5259 ctx->log_new_dentries = false;
5260 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5261 log_mode = LOG_INODE_ALL;
5262 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5263 log_mode, 0, LLONG_MAX, ctx);
5265 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5269 goto next_dir_inode;
5270 if (ctx->log_new_dentries) {
5271 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5273 if (!new_dir_elem) {
5275 goto next_dir_inode;
5277 new_dir_elem->ino = di_key.objectid;
5278 list_add_tail(&new_dir_elem->list, &dir_list);
5283 ret = btrfs_next_leaf(log, path);
5285 goto next_dir_inode;
5286 } else if (ret > 0) {
5288 goto next_dir_inode;
5292 if (min_key.offset < (u64)-1) {
5297 list_del(&dir_elem->list);
5301 btrfs_free_path(path);
5305 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5306 struct btrfs_inode *inode,
5307 struct btrfs_log_ctx *ctx)
5309 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
5311 struct btrfs_path *path;
5312 struct btrfs_key key;
5313 struct btrfs_root *root = inode->root;
5314 const u64 ino = btrfs_ino(inode);
5316 path = btrfs_alloc_path();
5319 path->skip_locking = 1;
5320 path->search_commit_root = 1;
5323 key.type = BTRFS_INODE_REF_KEY;
5325 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5330 struct extent_buffer *leaf = path->nodes[0];
5331 int slot = path->slots[0];
5336 if (slot >= btrfs_header_nritems(leaf)) {
5337 ret = btrfs_next_leaf(root, path);
5345 btrfs_item_key_to_cpu(leaf, &key, slot);
5346 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5347 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5350 item_size = btrfs_item_size_nr(leaf, slot);
5351 ptr = btrfs_item_ptr_offset(leaf, slot);
5352 while (cur_offset < item_size) {
5353 struct btrfs_key inode_key;
5354 struct inode *dir_inode;
5356 inode_key.type = BTRFS_INODE_ITEM_KEY;
5357 inode_key.offset = 0;
5359 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5360 struct btrfs_inode_extref *extref;
5362 extref = (struct btrfs_inode_extref *)
5364 inode_key.objectid = btrfs_inode_extref_parent(
5366 cur_offset += sizeof(*extref);
5367 cur_offset += btrfs_inode_extref_name_len(leaf,
5370 inode_key.objectid = key.offset;
5371 cur_offset = item_size;
5374 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5376 /* If parent inode was deleted, skip it. */
5377 if (IS_ERR(dir_inode))
5381 ctx->log_new_dentries = false;
5382 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5383 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5385 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5387 if (!ret && ctx && ctx->log_new_dentries)
5388 ret = log_new_dir_dentries(trans, root,
5389 BTRFS_I(dir_inode), ctx);
5398 btrfs_free_path(path);
5403 * helper function around btrfs_log_inode to make sure newly created
5404 * parent directories also end up in the log. A minimal inode and backref
5405 * only logging is done of any parent directories that are older than
5406 * the last committed transaction
5408 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5409 struct btrfs_root *root,
5410 struct btrfs_inode *inode,
5411 struct dentry *parent,
5415 struct btrfs_log_ctx *ctx)
5417 struct btrfs_fs_info *fs_info = root->fs_info;
5418 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
5419 struct super_block *sb;
5420 struct dentry *old_parent = NULL;
5422 u64 last_committed = fs_info->last_trans_committed;
5423 bool log_dentries = false;
5424 struct btrfs_inode *orig_inode = inode;
5426 sb = inode->vfs_inode.i_sb;
5428 if (btrfs_test_opt(fs_info, NOTREELOG)) {
5434 * The prev transaction commit doesn't complete, we need do
5435 * full commit by ourselves.
5437 if (fs_info->last_trans_log_full_commit >
5438 fs_info->last_trans_committed) {
5443 if (root != inode->root || btrfs_root_refs(&root->root_item) == 0) {
5448 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
5453 if (btrfs_inode_in_log(inode, trans->transid)) {
5454 ret = BTRFS_NO_LOG_SYNC;
5458 ret = start_log_trans(trans, root, ctx);
5462 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5467 * for regular files, if its inode is already on disk, we don't
5468 * have to worry about the parents at all. This is because
5469 * we can use the last_unlink_trans field to record renames
5470 * and other fun in this file.
5472 if (S_ISREG(inode->vfs_inode.i_mode) &&
5473 inode->generation <= last_committed &&
5474 inode->last_unlink_trans <= last_committed) {
5479 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
5480 log_dentries = true;
5483 * On unlink we must make sure all our current and old parent directory
5484 * inodes are fully logged. This is to prevent leaving dangling
5485 * directory index entries in directories that were our parents but are
5486 * not anymore. Not doing this results in old parent directory being
5487 * impossible to delete after log replay (rmdir will always fail with
5488 * error -ENOTEMPTY).
5494 * ln testdir/foo testdir/bar
5496 * unlink testdir/bar
5497 * xfs_io -c fsync testdir/foo
5499 * mount fs, triggers log replay
5501 * If we don't log the parent directory (testdir), after log replay the
5502 * directory still has an entry pointing to the file inode using the bar
5503 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5504 * the file inode has a link count of 1.
5510 * ln foo testdir/foo2
5511 * ln foo testdir/foo3
5513 * unlink testdir/foo3
5514 * xfs_io -c fsync foo
5516 * mount fs, triggers log replay
5518 * Similar as the first example, after log replay the parent directory
5519 * testdir still has an entry pointing to the inode file with name foo3
5520 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5521 * and has a link count of 2.
5523 if (inode->last_unlink_trans > last_committed) {
5524 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5530 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5533 inode = BTRFS_I(d_inode(parent));
5534 if (root != inode->root)
5537 if (inode->generation > last_committed) {
5538 ret = btrfs_log_inode(trans, root, inode,
5539 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5543 if (IS_ROOT(parent))
5546 parent = dget_parent(parent);
5548 old_parent = parent;
5551 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5557 btrfs_set_log_full_commit(fs_info, trans);
5562 btrfs_remove_log_ctx(root, ctx);
5563 btrfs_end_log_trans(root);
5569 * it is not safe to log dentry if the chunk root has added new
5570 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5571 * If this returns 1, you must commit the transaction to safely get your
5574 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5575 struct btrfs_root *root, struct dentry *dentry,
5578 struct btrfs_log_ctx *ctx)
5580 struct dentry *parent = dget_parent(dentry);
5583 ret = btrfs_log_inode_parent(trans, root, BTRFS_I(d_inode(dentry)),
5584 parent, start, end, 0, ctx);
5591 * should be called during mount to recover any replay any log trees
5594 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5597 struct btrfs_path *path;
5598 struct btrfs_trans_handle *trans;
5599 struct btrfs_key key;
5600 struct btrfs_key found_key;
5601 struct btrfs_key tmp_key;
5602 struct btrfs_root *log;
5603 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5604 struct walk_control wc = {
5605 .process_func = process_one_buffer,
5609 path = btrfs_alloc_path();
5613 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5615 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5616 if (IS_ERR(trans)) {
5617 ret = PTR_ERR(trans);
5624 ret = walk_log_tree(trans, log_root_tree, &wc);
5626 btrfs_handle_fs_error(fs_info, ret,
5627 "Failed to pin buffers while recovering log root tree.");
5632 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5633 key.offset = (u64)-1;
5634 key.type = BTRFS_ROOT_ITEM_KEY;
5637 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5640 btrfs_handle_fs_error(fs_info, ret,
5641 "Couldn't find tree log root.");
5645 if (path->slots[0] == 0)
5649 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5651 btrfs_release_path(path);
5652 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5655 log = btrfs_read_fs_root(log_root_tree, &found_key);
5658 btrfs_handle_fs_error(fs_info, ret,
5659 "Couldn't read tree log root.");
5663 tmp_key.objectid = found_key.offset;
5664 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5665 tmp_key.offset = (u64)-1;
5667 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5668 if (IS_ERR(wc.replay_dest)) {
5669 ret = PTR_ERR(wc.replay_dest);
5670 free_extent_buffer(log->node);
5671 free_extent_buffer(log->commit_root);
5673 btrfs_handle_fs_error(fs_info, ret,
5674 "Couldn't read target root for tree log recovery.");
5678 wc.replay_dest->log_root = log;
5679 btrfs_record_root_in_trans(trans, wc.replay_dest);
5680 ret = walk_log_tree(trans, log, &wc);
5682 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5683 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5687 key.offset = found_key.offset - 1;
5688 wc.replay_dest->log_root = NULL;
5689 free_extent_buffer(log->node);
5690 free_extent_buffer(log->commit_root);
5696 if (found_key.offset == 0)
5699 btrfs_release_path(path);
5701 /* step one is to pin it all, step two is to replay just inodes */
5704 wc.process_func = replay_one_buffer;
5705 wc.stage = LOG_WALK_REPLAY_INODES;
5708 /* step three is to replay everything */
5709 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5714 btrfs_free_path(path);
5716 /* step 4: commit the transaction, which also unpins the blocks */
5717 ret = btrfs_commit_transaction(trans);
5721 free_extent_buffer(log_root_tree->node);
5722 log_root_tree->log_root = NULL;
5723 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5724 kfree(log_root_tree);
5729 btrfs_end_transaction(wc.trans);
5730 btrfs_free_path(path);
5735 * there are some corner cases where we want to force a full
5736 * commit instead of allowing a directory to be logged.
5738 * They revolve around files there were unlinked from the directory, and
5739 * this function updates the parent directory so that a full commit is
5740 * properly done if it is fsync'd later after the unlinks are done.
5742 * Must be called before the unlink operations (updates to the subvolume tree,
5743 * inodes, etc) are done.
5745 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5746 struct btrfs_inode *dir, struct btrfs_inode *inode,
5750 * when we're logging a file, if it hasn't been renamed
5751 * or unlinked, and its inode is fully committed on disk,
5752 * we don't have to worry about walking up the directory chain
5753 * to log its parents.
5755 * So, we use the last_unlink_trans field to put this transid
5756 * into the file. When the file is logged we check it and
5757 * don't log the parents if the file is fully on disk.
5759 mutex_lock(&inode->log_mutex);
5760 inode->last_unlink_trans = trans->transid;
5761 mutex_unlock(&inode->log_mutex);
5764 * if this directory was already logged any new
5765 * names for this file/dir will get recorded
5768 if (dir->logged_trans == trans->transid)
5772 * if the inode we're about to unlink was logged,
5773 * the log will be properly updated for any new names
5775 if (inode->logged_trans == trans->transid)
5779 * when renaming files across directories, if the directory
5780 * there we're unlinking from gets fsync'd later on, there's
5781 * no way to find the destination directory later and fsync it
5782 * properly. So, we have to be conservative and force commits
5783 * so the new name gets discovered.
5788 /* we can safely do the unlink without any special recording */
5792 mutex_lock(&dir->log_mutex);
5793 dir->last_unlink_trans = trans->transid;
5794 mutex_unlock(&dir->log_mutex);
5798 * Make sure that if someone attempts to fsync the parent directory of a deleted
5799 * snapshot, it ends up triggering a transaction commit. This is to guarantee
5800 * that after replaying the log tree of the parent directory's root we will not
5801 * see the snapshot anymore and at log replay time we will not see any log tree
5802 * corresponding to the deleted snapshot's root, which could lead to replaying
5803 * it after replaying the log tree of the parent directory (which would replay
5804 * the snapshot delete operation).
5806 * Must be called before the actual snapshot destroy operation (updates to the
5807 * parent root and tree of tree roots trees, etc) are done.
5809 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
5810 struct btrfs_inode *dir)
5812 mutex_lock(&dir->log_mutex);
5813 dir->last_unlink_trans = trans->transid;
5814 mutex_unlock(&dir->log_mutex);
5818 * Call this after adding a new name for a file and it will properly
5819 * update the log to reflect the new name.
5821 * It will return zero if all goes well, and it will return 1 if a
5822 * full transaction commit is required.
5824 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5825 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
5826 struct dentry *parent)
5828 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
5829 struct btrfs_root *root = inode->root;
5832 * this will force the logging code to walk the dentry chain
5835 if (S_ISREG(inode->vfs_inode.i_mode))
5836 inode->last_unlink_trans = trans->transid;
5839 * if this inode hasn't been logged and directory we're renaming it
5840 * from hasn't been logged, we don't need to log it
5842 if (inode->logged_trans <= fs_info->last_trans_committed &&
5843 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
5846 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
5847 LLONG_MAX, 1, NULL);