2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
23 #include <sys/types.h>
27 #include <uuid/uuid.h>
32 #include "print-tree.h"
33 #include "task-utils.h"
34 #include "transaction.h"
37 #include "free-space-cache.h"
38 #include "free-space-tree.h"
40 #include "qgroup-verify.h"
41 #include "rbtree-utils.h"
49 TASK_NOTHING, /* have to be the last element */
54 enum task_position tp;
56 struct task_info *info;
59 static u64 bytes_used = 0;
60 static u64 total_csum_bytes = 0;
61 static u64 total_btree_bytes = 0;
62 static u64 total_fs_tree_bytes = 0;
63 static u64 total_extent_tree_bytes = 0;
64 static u64 btree_space_waste = 0;
65 static u64 data_bytes_allocated = 0;
66 static u64 data_bytes_referenced = 0;
67 static int found_old_backref = 0;
68 static LIST_HEAD(duplicate_extents);
69 static LIST_HEAD(delete_items);
70 static int no_holes = 0;
71 static int init_extent_tree = 0;
72 static int check_data_csum = 0;
73 static struct btrfs_fs_info *global_info;
74 static struct task_ctx ctx = { 0 };
75 static struct cache_tree *roots_info_cache = NULL;
77 enum btrfs_check_mode {
81 CHECK_MODE_DEFAULT = CHECK_MODE_ORIGINAL
84 static enum btrfs_check_mode check_mode = CHECK_MODE_DEFAULT;
86 struct extent_backref {
87 struct list_head list;
88 unsigned int is_data:1;
89 unsigned int found_extent_tree:1;
90 unsigned int full_backref:1;
91 unsigned int found_ref:1;
92 unsigned int broken:1;
95 static inline struct extent_backref* to_extent_backref(struct list_head *entry)
97 return list_entry(entry, struct extent_backref, list);
100 struct data_backref {
101 struct extent_backref node;
115 static inline struct data_backref* to_data_backref(struct extent_backref *back)
117 return container_of(back, struct data_backref, node);
121 * Much like data_backref, just removed the undetermined members
122 * and change it to use list_head.
123 * During extent scan, it is stored in root->orphan_data_extent.
124 * During fs tree scan, it is then moved to inode_rec->orphan_data_extents.
126 struct orphan_data_extent {
127 struct list_head list;
135 struct tree_backref {
136 struct extent_backref node;
143 static inline struct tree_backref* to_tree_backref(struct extent_backref *back)
145 return container_of(back, struct tree_backref, node);
148 /* Explicit initialization for extent_record::flag_block_full_backref */
149 enum { FLAG_UNSET = 2 };
151 struct extent_record {
152 struct list_head backrefs;
153 struct list_head dups;
154 struct list_head list;
155 struct cache_extent cache;
156 struct btrfs_disk_key parent_key;
161 u64 extent_item_refs;
163 u64 parent_generation;
167 unsigned int flag_block_full_backref:2;
168 unsigned int found_rec:1;
169 unsigned int content_checked:1;
170 unsigned int owner_ref_checked:1;
171 unsigned int is_root:1;
172 unsigned int metadata:1;
173 unsigned int bad_full_backref:1;
174 unsigned int crossing_stripes:1;
175 unsigned int wrong_chunk_type:1;
178 static inline struct extent_record* to_extent_record(struct list_head *entry)
180 return container_of(entry, struct extent_record, list);
183 struct inode_backref {
184 struct list_head list;
185 unsigned int found_dir_item:1;
186 unsigned int found_dir_index:1;
187 unsigned int found_inode_ref:1;
188 unsigned int filetype:8;
190 unsigned int ref_type;
197 static inline struct inode_backref* to_inode_backref(struct list_head *entry)
199 return list_entry(entry, struct inode_backref, list);
202 struct root_item_record {
203 struct list_head list;
210 struct btrfs_key drop_key;
213 #define REF_ERR_NO_DIR_ITEM (1 << 0)
214 #define REF_ERR_NO_DIR_INDEX (1 << 1)
215 #define REF_ERR_NO_INODE_REF (1 << 2)
216 #define REF_ERR_DUP_DIR_ITEM (1 << 3)
217 #define REF_ERR_DUP_DIR_INDEX (1 << 4)
218 #define REF_ERR_DUP_INODE_REF (1 << 5)
219 #define REF_ERR_INDEX_UNMATCH (1 << 6)
220 #define REF_ERR_FILETYPE_UNMATCH (1 << 7)
221 #define REF_ERR_NAME_TOO_LONG (1 << 8) // 100
222 #define REF_ERR_NO_ROOT_REF (1 << 9)
223 #define REF_ERR_NO_ROOT_BACKREF (1 << 10)
224 #define REF_ERR_DUP_ROOT_REF (1 << 11)
225 #define REF_ERR_DUP_ROOT_BACKREF (1 << 12)
227 struct file_extent_hole {
233 struct inode_record {
234 struct list_head backrefs;
235 unsigned int checked:1;
236 unsigned int merging:1;
237 unsigned int found_inode_item:1;
238 unsigned int found_dir_item:1;
239 unsigned int found_file_extent:1;
240 unsigned int found_csum_item:1;
241 unsigned int some_csum_missing:1;
242 unsigned int nodatasum:1;
255 struct rb_root holes;
256 struct list_head orphan_extents;
261 #define I_ERR_NO_INODE_ITEM (1 << 0)
262 #define I_ERR_NO_ORPHAN_ITEM (1 << 1)
263 #define I_ERR_DUP_INODE_ITEM (1 << 2)
264 #define I_ERR_DUP_DIR_INDEX (1 << 3)
265 #define I_ERR_ODD_DIR_ITEM (1 << 4)
266 #define I_ERR_ODD_FILE_EXTENT (1 << 5)
267 #define I_ERR_BAD_FILE_EXTENT (1 << 6)
268 #define I_ERR_FILE_EXTENT_OVERLAP (1 << 7)
269 #define I_ERR_FILE_EXTENT_DISCOUNT (1 << 8) // 100
270 #define I_ERR_DIR_ISIZE_WRONG (1 << 9)
271 #define I_ERR_FILE_NBYTES_WRONG (1 << 10) // 400
272 #define I_ERR_ODD_CSUM_ITEM (1 << 11)
273 #define I_ERR_SOME_CSUM_MISSING (1 << 12)
274 #define I_ERR_LINK_COUNT_WRONG (1 << 13)
275 #define I_ERR_FILE_EXTENT_ORPHAN (1 << 14)
277 struct root_backref {
278 struct list_head list;
279 unsigned int found_dir_item:1;
280 unsigned int found_dir_index:1;
281 unsigned int found_back_ref:1;
282 unsigned int found_forward_ref:1;
283 unsigned int reachable:1;
292 static inline struct root_backref* to_root_backref(struct list_head *entry)
294 return list_entry(entry, struct root_backref, list);
298 struct list_head backrefs;
299 struct cache_extent cache;
300 unsigned int found_root_item:1;
306 struct cache_extent cache;
311 struct cache_extent cache;
312 struct cache_tree root_cache;
313 struct cache_tree inode_cache;
314 struct inode_record *current;
323 struct walk_control {
324 struct cache_tree shared;
325 struct shared_node *nodes[BTRFS_MAX_LEVEL];
331 struct btrfs_key key;
333 struct list_head list;
336 struct extent_entry {
341 struct list_head list;
344 struct root_item_info {
345 /* level of the root */
347 /* number of nodes at this level, must be 1 for a root */
351 struct cache_extent cache_extent;
355 * Error bit for low memory mode check.
357 * Currently no caller cares about it yet. Just internal use for error
360 #define BACKREF_MISSING (1 << 0) /* Backref missing in extent tree */
361 #define BACKREF_MISMATCH (1 << 1) /* Backref exists but does not match */
362 #define BYTES_UNALIGNED (1 << 2) /* Some bytes are not aligned */
363 #define REFERENCER_MISSING (1 << 3) /* Referencer not found */
364 #define REFERENCER_MISMATCH (1 << 4) /* Referenceer found but does not match */
365 #define CROSSING_STRIPE_BOUNDARY (1 << 4) /* For kernel scrub workaround */
366 #define ITEM_SIZE_MISMATCH (1 << 5) /* Bad item size */
367 #define UNKNOWN_TYPE (1 << 6) /* Unknown type */
368 #define ACCOUNTING_MISMATCH (1 << 7) /* Used space accounting error */
369 #define CHUNK_TYPE_MISMATCH (1 << 8)
371 static void *print_status_check(void *p)
373 struct task_ctx *priv = p;
374 const char work_indicator[] = { '.', 'o', 'O', 'o' };
376 static char *task_position_string[] = {
378 "checking free space cache",
382 task_period_start(priv->info, 1000 /* 1s */);
384 if (priv->tp == TASK_NOTHING)
388 printf("%s [%c]\r", task_position_string[priv->tp],
389 work_indicator[count % 4]);
392 task_period_wait(priv->info);
397 static int print_status_return(void *p)
405 static enum btrfs_check_mode parse_check_mode(const char *str)
407 if (strcmp(str, "lowmem") == 0)
408 return CHECK_MODE_LOWMEM;
409 if (strcmp(str, "orig") == 0)
410 return CHECK_MODE_ORIGINAL;
411 if (strcmp(str, "original") == 0)
412 return CHECK_MODE_ORIGINAL;
414 return CHECK_MODE_UNKNOWN;
417 /* Compatible function to allow reuse of old codes */
418 static u64 first_extent_gap(struct rb_root *holes)
420 struct file_extent_hole *hole;
422 if (RB_EMPTY_ROOT(holes))
425 hole = rb_entry(rb_first(holes), struct file_extent_hole, node);
429 static int compare_hole(struct rb_node *node1, struct rb_node *node2)
431 struct file_extent_hole *hole1;
432 struct file_extent_hole *hole2;
434 hole1 = rb_entry(node1, struct file_extent_hole, node);
435 hole2 = rb_entry(node2, struct file_extent_hole, node);
437 if (hole1->start > hole2->start)
439 if (hole1->start < hole2->start)
441 /* Now hole1->start == hole2->start */
442 if (hole1->len >= hole2->len)
444 * Hole 1 will be merge center
445 * Same hole will be merged later
448 /* Hole 2 will be merge center */
453 * Add a hole to the record
455 * This will do hole merge for copy_file_extent_holes(),
456 * which will ensure there won't be continuous holes.
458 static int add_file_extent_hole(struct rb_root *holes,
461 struct file_extent_hole *hole;
462 struct file_extent_hole *prev = NULL;
463 struct file_extent_hole *next = NULL;
465 hole = malloc(sizeof(*hole));
470 /* Since compare will not return 0, no -EEXIST will happen */
471 rb_insert(holes, &hole->node, compare_hole);
473 /* simple merge with previous hole */
474 if (rb_prev(&hole->node))
475 prev = rb_entry(rb_prev(&hole->node), struct file_extent_hole,
477 if (prev && prev->start + prev->len >= hole->start) {
478 hole->len = hole->start + hole->len - prev->start;
479 hole->start = prev->start;
480 rb_erase(&prev->node, holes);
485 /* iterate merge with next holes */
487 if (!rb_next(&hole->node))
489 next = rb_entry(rb_next(&hole->node), struct file_extent_hole,
491 if (hole->start + hole->len >= next->start) {
492 if (hole->start + hole->len <= next->start + next->len)
493 hole->len = next->start + next->len -
495 rb_erase(&next->node, holes);
504 static int compare_hole_range(struct rb_node *node, void *data)
506 struct file_extent_hole *hole;
509 hole = (struct file_extent_hole *)data;
512 hole = rb_entry(node, struct file_extent_hole, node);
513 if (start < hole->start)
515 if (start >= hole->start && start < hole->start + hole->len)
521 * Delete a hole in the record
523 * This will do the hole split and is much restrict than add.
525 static int del_file_extent_hole(struct rb_root *holes,
528 struct file_extent_hole *hole;
529 struct file_extent_hole tmp;
534 struct rb_node *node;
541 node = rb_search(holes, &tmp, compare_hole_range, NULL);
544 hole = rb_entry(node, struct file_extent_hole, node);
545 if (start + len > hole->start + hole->len)
549 * Now there will be no overlap, delete the hole and re-add the
550 * split(s) if they exists.
552 if (start > hole->start) {
553 prev_start = hole->start;
554 prev_len = start - hole->start;
557 if (hole->start + hole->len > start + len) {
558 next_start = start + len;
559 next_len = hole->start + hole->len - start - len;
562 rb_erase(node, holes);
565 ret = add_file_extent_hole(holes, prev_start, prev_len);
570 ret = add_file_extent_hole(holes, next_start, next_len);
577 static int copy_file_extent_holes(struct rb_root *dst,
580 struct file_extent_hole *hole;
581 struct rb_node *node;
584 node = rb_first(src);
586 hole = rb_entry(node, struct file_extent_hole, node);
587 ret = add_file_extent_hole(dst, hole->start, hole->len);
590 node = rb_next(node);
595 static void free_file_extent_holes(struct rb_root *holes)
597 struct rb_node *node;
598 struct file_extent_hole *hole;
600 node = rb_first(holes);
602 hole = rb_entry(node, struct file_extent_hole, node);
603 rb_erase(node, holes);
605 node = rb_first(holes);
609 static void reset_cached_block_groups(struct btrfs_fs_info *fs_info);
611 static void record_root_in_trans(struct btrfs_trans_handle *trans,
612 struct btrfs_root *root)
614 if (root->last_trans != trans->transid) {
615 root->track_dirty = 1;
616 root->last_trans = trans->transid;
617 root->commit_root = root->node;
618 extent_buffer_get(root->node);
622 static u8 imode_to_type(u32 imode)
625 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
626 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
627 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
628 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
629 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
630 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
631 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
632 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
635 return btrfs_type_by_mode[(imode & S_IFMT) >> S_SHIFT];
639 static int device_record_compare(struct rb_node *node1, struct rb_node *node2)
641 struct device_record *rec1;
642 struct device_record *rec2;
644 rec1 = rb_entry(node1, struct device_record, node);
645 rec2 = rb_entry(node2, struct device_record, node);
646 if (rec1->devid > rec2->devid)
648 else if (rec1->devid < rec2->devid)
654 static struct inode_record *clone_inode_rec(struct inode_record *orig_rec)
656 struct inode_record *rec;
657 struct inode_backref *backref;
658 struct inode_backref *orig;
659 struct inode_backref *tmp;
660 struct orphan_data_extent *src_orphan;
661 struct orphan_data_extent *dst_orphan;
665 rec = malloc(sizeof(*rec));
667 return ERR_PTR(-ENOMEM);
668 memcpy(rec, orig_rec, sizeof(*rec));
670 INIT_LIST_HEAD(&rec->backrefs);
671 INIT_LIST_HEAD(&rec->orphan_extents);
672 rec->holes = RB_ROOT;
674 list_for_each_entry(orig, &orig_rec->backrefs, list) {
675 size = sizeof(*orig) + orig->namelen + 1;
676 backref = malloc(size);
681 memcpy(backref, orig, size);
682 list_add_tail(&backref->list, &rec->backrefs);
684 list_for_each_entry(src_orphan, &orig_rec->orphan_extents, list) {
685 dst_orphan = malloc(sizeof(*dst_orphan));
690 memcpy(dst_orphan, src_orphan, sizeof(*src_orphan));
691 list_add_tail(&dst_orphan->list, &rec->orphan_extents);
693 ret = copy_file_extent_holes(&rec->holes, &orig_rec->holes);
699 if (!list_empty(&rec->backrefs))
700 list_for_each_entry_safe(orig, tmp, &rec->backrefs, list) {
701 list_del(&orig->list);
705 if (!list_empty(&rec->orphan_extents))
706 list_for_each_entry_safe(orig, tmp, &rec->orphan_extents, list) {
707 list_del(&orig->list);
716 static void print_orphan_data_extents(struct list_head *orphan_extents,
719 struct orphan_data_extent *orphan;
721 if (list_empty(orphan_extents))
723 printf("The following data extent is lost in tree %llu:\n",
725 list_for_each_entry(orphan, orphan_extents, list) {
726 printf("\tinode: %llu, offset:%llu, disk_bytenr: %llu, disk_len: %llu\n",
727 orphan->objectid, orphan->offset, orphan->disk_bytenr,
732 static void print_inode_error(struct btrfs_root *root, struct inode_record *rec)
734 u64 root_objectid = root->root_key.objectid;
735 int errors = rec->errors;
739 /* reloc root errors, we print its corresponding fs root objectid*/
740 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
741 root_objectid = root->root_key.offset;
742 fprintf(stderr, "reloc");
744 fprintf(stderr, "root %llu inode %llu errors %x",
745 (unsigned long long) root_objectid,
746 (unsigned long long) rec->ino, rec->errors);
748 if (errors & I_ERR_NO_INODE_ITEM)
749 fprintf(stderr, ", no inode item");
750 if (errors & I_ERR_NO_ORPHAN_ITEM)
751 fprintf(stderr, ", no orphan item");
752 if (errors & I_ERR_DUP_INODE_ITEM)
753 fprintf(stderr, ", dup inode item");
754 if (errors & I_ERR_DUP_DIR_INDEX)
755 fprintf(stderr, ", dup dir index");
756 if (errors & I_ERR_ODD_DIR_ITEM)
757 fprintf(stderr, ", odd dir item");
758 if (errors & I_ERR_ODD_FILE_EXTENT)
759 fprintf(stderr, ", odd file extent");
760 if (errors & I_ERR_BAD_FILE_EXTENT)
761 fprintf(stderr, ", bad file extent");
762 if (errors & I_ERR_FILE_EXTENT_OVERLAP)
763 fprintf(stderr, ", file extent overlap");
764 if (errors & I_ERR_FILE_EXTENT_DISCOUNT)
765 fprintf(stderr, ", file extent discount");
766 if (errors & I_ERR_DIR_ISIZE_WRONG)
767 fprintf(stderr, ", dir isize wrong");
768 if (errors & I_ERR_FILE_NBYTES_WRONG)
769 fprintf(stderr, ", nbytes wrong");
770 if (errors & I_ERR_ODD_CSUM_ITEM)
771 fprintf(stderr, ", odd csum item");
772 if (errors & I_ERR_SOME_CSUM_MISSING)
773 fprintf(stderr, ", some csum missing");
774 if (errors & I_ERR_LINK_COUNT_WRONG)
775 fprintf(stderr, ", link count wrong");
776 if (errors & I_ERR_FILE_EXTENT_ORPHAN)
777 fprintf(stderr, ", orphan file extent");
778 fprintf(stderr, "\n");
779 /* Print the orphan extents if needed */
780 if (errors & I_ERR_FILE_EXTENT_ORPHAN)
781 print_orphan_data_extents(&rec->orphan_extents, root->objectid);
783 /* Print the holes if needed */
784 if (errors & I_ERR_FILE_EXTENT_DISCOUNT) {
785 struct file_extent_hole *hole;
786 struct rb_node *node;
789 node = rb_first(&rec->holes);
790 fprintf(stderr, "Found file extent holes:\n");
793 hole = rb_entry(node, struct file_extent_hole, node);
794 fprintf(stderr, "\tstart: %llu, len: %llu\n",
795 hole->start, hole->len);
796 node = rb_next(node);
799 fprintf(stderr, "\tstart: 0, len: %llu\n",
800 round_up(rec->isize, root->sectorsize));
804 static void print_ref_error(int errors)
806 if (errors & REF_ERR_NO_DIR_ITEM)
807 fprintf(stderr, ", no dir item");
808 if (errors & REF_ERR_NO_DIR_INDEX)
809 fprintf(stderr, ", no dir index");
810 if (errors & REF_ERR_NO_INODE_REF)
811 fprintf(stderr, ", no inode ref");
812 if (errors & REF_ERR_DUP_DIR_ITEM)
813 fprintf(stderr, ", dup dir item");
814 if (errors & REF_ERR_DUP_DIR_INDEX)
815 fprintf(stderr, ", dup dir index");
816 if (errors & REF_ERR_DUP_INODE_REF)
817 fprintf(stderr, ", dup inode ref");
818 if (errors & REF_ERR_INDEX_UNMATCH)
819 fprintf(stderr, ", index mismatch");
820 if (errors & REF_ERR_FILETYPE_UNMATCH)
821 fprintf(stderr, ", filetype mismatch");
822 if (errors & REF_ERR_NAME_TOO_LONG)
823 fprintf(stderr, ", name too long");
824 if (errors & REF_ERR_NO_ROOT_REF)
825 fprintf(stderr, ", no root ref");
826 if (errors & REF_ERR_NO_ROOT_BACKREF)
827 fprintf(stderr, ", no root backref");
828 if (errors & REF_ERR_DUP_ROOT_REF)
829 fprintf(stderr, ", dup root ref");
830 if (errors & REF_ERR_DUP_ROOT_BACKREF)
831 fprintf(stderr, ", dup root backref");
832 fprintf(stderr, "\n");
835 static struct inode_record *get_inode_rec(struct cache_tree *inode_cache,
838 struct ptr_node *node;
839 struct cache_extent *cache;
840 struct inode_record *rec = NULL;
843 cache = lookup_cache_extent(inode_cache, ino, 1);
845 node = container_of(cache, struct ptr_node, cache);
847 if (mod && rec->refs > 1) {
848 node->data = clone_inode_rec(rec);
849 if (IS_ERR(node->data))
855 rec = calloc(1, sizeof(*rec));
857 return ERR_PTR(-ENOMEM);
859 rec->extent_start = (u64)-1;
861 INIT_LIST_HEAD(&rec->backrefs);
862 INIT_LIST_HEAD(&rec->orphan_extents);
863 rec->holes = RB_ROOT;
865 node = malloc(sizeof(*node));
868 return ERR_PTR(-ENOMEM);
870 node->cache.start = ino;
871 node->cache.size = 1;
874 if (ino == BTRFS_FREE_INO_OBJECTID)
877 ret = insert_cache_extent(inode_cache, &node->cache);
879 return ERR_PTR(-EEXIST);
884 static void free_orphan_data_extents(struct list_head *orphan_extents)
886 struct orphan_data_extent *orphan;
888 while (!list_empty(orphan_extents)) {
889 orphan = list_entry(orphan_extents->next,
890 struct orphan_data_extent, list);
891 list_del(&orphan->list);
896 static void free_inode_rec(struct inode_record *rec)
898 struct inode_backref *backref;
903 while (!list_empty(&rec->backrefs)) {
904 backref = to_inode_backref(rec->backrefs.next);
905 list_del(&backref->list);
908 free_orphan_data_extents(&rec->orphan_extents);
909 free_file_extent_holes(&rec->holes);
913 static int can_free_inode_rec(struct inode_record *rec)
915 if (!rec->errors && rec->checked && rec->found_inode_item &&
916 rec->nlink == rec->found_link && list_empty(&rec->backrefs))
921 static void maybe_free_inode_rec(struct cache_tree *inode_cache,
922 struct inode_record *rec)
924 struct cache_extent *cache;
925 struct inode_backref *tmp, *backref;
926 struct ptr_node *node;
927 unsigned char filetype;
929 if (!rec->found_inode_item)
932 filetype = imode_to_type(rec->imode);
933 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
934 if (backref->found_dir_item && backref->found_dir_index) {
935 if (backref->filetype != filetype)
936 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
937 if (!backref->errors && backref->found_inode_ref &&
938 rec->nlink == rec->found_link) {
939 list_del(&backref->list);
945 if (!rec->checked || rec->merging)
948 if (S_ISDIR(rec->imode)) {
949 if (rec->found_size != rec->isize)
950 rec->errors |= I_ERR_DIR_ISIZE_WRONG;
951 if (rec->found_file_extent)
952 rec->errors |= I_ERR_ODD_FILE_EXTENT;
953 } else if (S_ISREG(rec->imode) || S_ISLNK(rec->imode)) {
954 if (rec->found_dir_item)
955 rec->errors |= I_ERR_ODD_DIR_ITEM;
956 if (rec->found_size != rec->nbytes)
957 rec->errors |= I_ERR_FILE_NBYTES_WRONG;
958 if (rec->nlink > 0 && !no_holes &&
959 (rec->extent_end < rec->isize ||
960 first_extent_gap(&rec->holes) < rec->isize))
961 rec->errors |= I_ERR_FILE_EXTENT_DISCOUNT;
964 if (S_ISREG(rec->imode) || S_ISLNK(rec->imode)) {
965 if (rec->found_csum_item && rec->nodatasum)
966 rec->errors |= I_ERR_ODD_CSUM_ITEM;
967 if (rec->some_csum_missing && !rec->nodatasum)
968 rec->errors |= I_ERR_SOME_CSUM_MISSING;
971 BUG_ON(rec->refs != 1);
972 if (can_free_inode_rec(rec)) {
973 cache = lookup_cache_extent(inode_cache, rec->ino, 1);
974 node = container_of(cache, struct ptr_node, cache);
975 BUG_ON(node->data != rec);
976 remove_cache_extent(inode_cache, &node->cache);
982 static int check_orphan_item(struct btrfs_root *root, u64 ino)
984 struct btrfs_path path;
985 struct btrfs_key key;
988 key.objectid = BTRFS_ORPHAN_OBJECTID;
989 key.type = BTRFS_ORPHAN_ITEM_KEY;
992 btrfs_init_path(&path);
993 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
994 btrfs_release_path(&path);
1000 static int process_inode_item(struct extent_buffer *eb,
1001 int slot, struct btrfs_key *key,
1002 struct shared_node *active_node)
1004 struct inode_record *rec;
1005 struct btrfs_inode_item *item;
1007 rec = active_node->current;
1008 BUG_ON(rec->ino != key->objectid || rec->refs > 1);
1009 if (rec->found_inode_item) {
1010 rec->errors |= I_ERR_DUP_INODE_ITEM;
1013 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
1014 rec->nlink = btrfs_inode_nlink(eb, item);
1015 rec->isize = btrfs_inode_size(eb, item);
1016 rec->nbytes = btrfs_inode_nbytes(eb, item);
1017 rec->imode = btrfs_inode_mode(eb, item);
1018 if (btrfs_inode_flags(eb, item) & BTRFS_INODE_NODATASUM)
1020 rec->found_inode_item = 1;
1021 if (rec->nlink == 0)
1022 rec->errors |= I_ERR_NO_ORPHAN_ITEM;
1023 maybe_free_inode_rec(&active_node->inode_cache, rec);
1027 static struct inode_backref *get_inode_backref(struct inode_record *rec,
1029 int namelen, u64 dir)
1031 struct inode_backref *backref;
1033 list_for_each_entry(backref, &rec->backrefs, list) {
1034 if (rec->ino == BTRFS_MULTIPLE_OBJECTIDS)
1036 if (backref->dir != dir || backref->namelen != namelen)
1038 if (memcmp(name, backref->name, namelen))
1043 backref = malloc(sizeof(*backref) + namelen + 1);
1046 memset(backref, 0, sizeof(*backref));
1048 backref->namelen = namelen;
1049 memcpy(backref->name, name, namelen);
1050 backref->name[namelen] = '\0';
1051 list_add_tail(&backref->list, &rec->backrefs);
1055 static int add_inode_backref(struct cache_tree *inode_cache,
1056 u64 ino, u64 dir, u64 index,
1057 const char *name, int namelen,
1058 int filetype, int itemtype, int errors)
1060 struct inode_record *rec;
1061 struct inode_backref *backref;
1063 rec = get_inode_rec(inode_cache, ino, 1);
1064 BUG_ON(IS_ERR(rec));
1065 backref = get_inode_backref(rec, name, namelen, dir);
1068 backref->errors |= errors;
1069 if (itemtype == BTRFS_DIR_INDEX_KEY) {
1070 if (backref->found_dir_index)
1071 backref->errors |= REF_ERR_DUP_DIR_INDEX;
1072 if (backref->found_inode_ref && backref->index != index)
1073 backref->errors |= REF_ERR_INDEX_UNMATCH;
1074 if (backref->found_dir_item && backref->filetype != filetype)
1075 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
1077 backref->index = index;
1078 backref->filetype = filetype;
1079 backref->found_dir_index = 1;
1080 } else if (itemtype == BTRFS_DIR_ITEM_KEY) {
1082 if (backref->found_dir_item)
1083 backref->errors |= REF_ERR_DUP_DIR_ITEM;
1084 if (backref->found_dir_index && backref->filetype != filetype)
1085 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
1087 backref->filetype = filetype;
1088 backref->found_dir_item = 1;
1089 } else if ((itemtype == BTRFS_INODE_REF_KEY) ||
1090 (itemtype == BTRFS_INODE_EXTREF_KEY)) {
1091 if (backref->found_inode_ref)
1092 backref->errors |= REF_ERR_DUP_INODE_REF;
1093 if (backref->found_dir_index && backref->index != index)
1094 backref->errors |= REF_ERR_INDEX_UNMATCH;
1096 backref->index = index;
1098 backref->ref_type = itemtype;
1099 backref->found_inode_ref = 1;
1104 maybe_free_inode_rec(inode_cache, rec);
1108 static int merge_inode_recs(struct inode_record *src, struct inode_record *dst,
1109 struct cache_tree *dst_cache)
1111 struct inode_backref *backref;
1116 list_for_each_entry(backref, &src->backrefs, list) {
1117 if (backref->found_dir_index) {
1118 add_inode_backref(dst_cache, dst->ino, backref->dir,
1119 backref->index, backref->name,
1120 backref->namelen, backref->filetype,
1121 BTRFS_DIR_INDEX_KEY, backref->errors);
1123 if (backref->found_dir_item) {
1125 add_inode_backref(dst_cache, dst->ino,
1126 backref->dir, 0, backref->name,
1127 backref->namelen, backref->filetype,
1128 BTRFS_DIR_ITEM_KEY, backref->errors);
1130 if (backref->found_inode_ref) {
1131 add_inode_backref(dst_cache, dst->ino,
1132 backref->dir, backref->index,
1133 backref->name, backref->namelen, 0,
1134 backref->ref_type, backref->errors);
1138 if (src->found_dir_item)
1139 dst->found_dir_item = 1;
1140 if (src->found_file_extent)
1141 dst->found_file_extent = 1;
1142 if (src->found_csum_item)
1143 dst->found_csum_item = 1;
1144 if (src->some_csum_missing)
1145 dst->some_csum_missing = 1;
1146 if (first_extent_gap(&dst->holes) > first_extent_gap(&src->holes)) {
1147 ret = copy_file_extent_holes(&dst->holes, &src->holes);
1152 BUG_ON(src->found_link < dir_count);
1153 dst->found_link += src->found_link - dir_count;
1154 dst->found_size += src->found_size;
1155 if (src->extent_start != (u64)-1) {
1156 if (dst->extent_start == (u64)-1) {
1157 dst->extent_start = src->extent_start;
1158 dst->extent_end = src->extent_end;
1160 if (dst->extent_end > src->extent_start)
1161 dst->errors |= I_ERR_FILE_EXTENT_OVERLAP;
1162 else if (dst->extent_end < src->extent_start) {
1163 ret = add_file_extent_hole(&dst->holes,
1165 src->extent_start - dst->extent_end);
1167 if (dst->extent_end < src->extent_end)
1168 dst->extent_end = src->extent_end;
1172 dst->errors |= src->errors;
1173 if (src->found_inode_item) {
1174 if (!dst->found_inode_item) {
1175 dst->nlink = src->nlink;
1176 dst->isize = src->isize;
1177 dst->nbytes = src->nbytes;
1178 dst->imode = src->imode;
1179 dst->nodatasum = src->nodatasum;
1180 dst->found_inode_item = 1;
1182 dst->errors |= I_ERR_DUP_INODE_ITEM;
1190 static int splice_shared_node(struct shared_node *src_node,
1191 struct shared_node *dst_node)
1193 struct cache_extent *cache;
1194 struct ptr_node *node, *ins;
1195 struct cache_tree *src, *dst;
1196 struct inode_record *rec, *conflict;
1197 u64 current_ino = 0;
1201 if (--src_node->refs == 0)
1203 if (src_node->current)
1204 current_ino = src_node->current->ino;
1206 src = &src_node->root_cache;
1207 dst = &dst_node->root_cache;
1209 cache = search_cache_extent(src, 0);
1211 node = container_of(cache, struct ptr_node, cache);
1213 cache = next_cache_extent(cache);
1216 remove_cache_extent(src, &node->cache);
1219 ins = malloc(sizeof(*ins));
1221 ins->cache.start = node->cache.start;
1222 ins->cache.size = node->cache.size;
1226 ret = insert_cache_extent(dst, &ins->cache);
1227 if (ret == -EEXIST) {
1228 conflict = get_inode_rec(dst, rec->ino, 1);
1229 BUG_ON(IS_ERR(conflict));
1230 merge_inode_recs(rec, conflict, dst);
1232 conflict->checked = 1;
1233 if (dst_node->current == conflict)
1234 dst_node->current = NULL;
1236 maybe_free_inode_rec(dst, conflict);
1237 free_inode_rec(rec);
1244 if (src == &src_node->root_cache) {
1245 src = &src_node->inode_cache;
1246 dst = &dst_node->inode_cache;
1250 if (current_ino > 0 && (!dst_node->current ||
1251 current_ino > dst_node->current->ino)) {
1252 if (dst_node->current) {
1253 dst_node->current->checked = 1;
1254 maybe_free_inode_rec(dst, dst_node->current);
1256 dst_node->current = get_inode_rec(dst, current_ino, 1);
1257 BUG_ON(IS_ERR(dst_node->current));
1262 static void free_inode_ptr(struct cache_extent *cache)
1264 struct ptr_node *node;
1265 struct inode_record *rec;
1267 node = container_of(cache, struct ptr_node, cache);
1269 free_inode_rec(rec);
1273 FREE_EXTENT_CACHE_BASED_TREE(inode_recs, free_inode_ptr);
1275 static struct shared_node *find_shared_node(struct cache_tree *shared,
1278 struct cache_extent *cache;
1279 struct shared_node *node;
1281 cache = lookup_cache_extent(shared, bytenr, 1);
1283 node = container_of(cache, struct shared_node, cache);
1289 static int add_shared_node(struct cache_tree *shared, u64 bytenr, u32 refs)
1292 struct shared_node *node;
1294 node = calloc(1, sizeof(*node));
1297 node->cache.start = bytenr;
1298 node->cache.size = 1;
1299 cache_tree_init(&node->root_cache);
1300 cache_tree_init(&node->inode_cache);
1303 ret = insert_cache_extent(shared, &node->cache);
1308 static int enter_shared_node(struct btrfs_root *root, u64 bytenr, u32 refs,
1309 struct walk_control *wc, int level)
1311 struct shared_node *node;
1312 struct shared_node *dest;
1315 if (level == wc->active_node)
1318 BUG_ON(wc->active_node <= level);
1319 node = find_shared_node(&wc->shared, bytenr);
1321 ret = add_shared_node(&wc->shared, bytenr, refs);
1323 node = find_shared_node(&wc->shared, bytenr);
1324 wc->nodes[level] = node;
1325 wc->active_node = level;
1329 if (wc->root_level == wc->active_node &&
1330 btrfs_root_refs(&root->root_item) == 0) {
1331 if (--node->refs == 0) {
1332 free_inode_recs_tree(&node->root_cache);
1333 free_inode_recs_tree(&node->inode_cache);
1334 remove_cache_extent(&wc->shared, &node->cache);
1340 dest = wc->nodes[wc->active_node];
1341 splice_shared_node(node, dest);
1342 if (node->refs == 0) {
1343 remove_cache_extent(&wc->shared, &node->cache);
1349 static int leave_shared_node(struct btrfs_root *root,
1350 struct walk_control *wc, int level)
1352 struct shared_node *node;
1353 struct shared_node *dest;
1356 if (level == wc->root_level)
1359 for (i = level + 1; i < BTRFS_MAX_LEVEL; i++) {
1363 BUG_ON(i >= BTRFS_MAX_LEVEL);
1365 node = wc->nodes[wc->active_node];
1366 wc->nodes[wc->active_node] = NULL;
1367 wc->active_node = i;
1369 dest = wc->nodes[wc->active_node];
1370 if (wc->active_node < wc->root_level ||
1371 btrfs_root_refs(&root->root_item) > 0) {
1372 BUG_ON(node->refs <= 1);
1373 splice_shared_node(node, dest);
1375 BUG_ON(node->refs < 2);
1384 * 1 - if the root with id child_root_id is a child of root parent_root_id
1385 * 0 - if the root child_root_id isn't a child of the root parent_root_id but
1386 * has other root(s) as parent(s)
1387 * 2 - if the root child_root_id doesn't have any parent roots
1389 static int is_child_root(struct btrfs_root *root, u64 parent_root_id,
1392 struct btrfs_path path;
1393 struct btrfs_key key;
1394 struct extent_buffer *leaf;
1398 btrfs_init_path(&path);
1400 key.objectid = parent_root_id;
1401 key.type = BTRFS_ROOT_REF_KEY;
1402 key.offset = child_root_id;
1403 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path,
1407 btrfs_release_path(&path);
1411 key.objectid = child_root_id;
1412 key.type = BTRFS_ROOT_BACKREF_KEY;
1414 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path,
1420 leaf = path.nodes[0];
1421 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
1422 ret = btrfs_next_leaf(root->fs_info->tree_root, &path);
1425 leaf = path.nodes[0];
1428 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1429 if (key.objectid != child_root_id ||
1430 key.type != BTRFS_ROOT_BACKREF_KEY)
1435 if (key.offset == parent_root_id) {
1436 btrfs_release_path(&path);
1443 btrfs_release_path(&path);
1446 return has_parent ? 0 : 2;
1449 static int process_dir_item(struct btrfs_root *root,
1450 struct extent_buffer *eb,
1451 int slot, struct btrfs_key *key,
1452 struct shared_node *active_node)
1462 struct btrfs_dir_item *di;
1463 struct inode_record *rec;
1464 struct cache_tree *root_cache;
1465 struct cache_tree *inode_cache;
1466 struct btrfs_key location;
1467 char namebuf[BTRFS_NAME_LEN];
1469 root_cache = &active_node->root_cache;
1470 inode_cache = &active_node->inode_cache;
1471 rec = active_node->current;
1472 rec->found_dir_item = 1;
1474 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1475 total = btrfs_item_size_nr(eb, slot);
1476 while (cur < total) {
1478 btrfs_dir_item_key_to_cpu(eb, di, &location);
1479 name_len = btrfs_dir_name_len(eb, di);
1480 data_len = btrfs_dir_data_len(eb, di);
1481 filetype = btrfs_dir_type(eb, di);
1483 rec->found_size += name_len;
1484 if (name_len <= BTRFS_NAME_LEN) {
1488 len = BTRFS_NAME_LEN;
1489 error = REF_ERR_NAME_TOO_LONG;
1491 read_extent_buffer(eb, namebuf, (unsigned long)(di + 1), len);
1493 if (location.type == BTRFS_INODE_ITEM_KEY) {
1494 add_inode_backref(inode_cache, location.objectid,
1495 key->objectid, key->offset, namebuf,
1496 len, filetype, key->type, error);
1497 } else if (location.type == BTRFS_ROOT_ITEM_KEY) {
1498 add_inode_backref(root_cache, location.objectid,
1499 key->objectid, key->offset,
1500 namebuf, len, filetype,
1503 fprintf(stderr, "invalid location in dir item %u\n",
1505 add_inode_backref(inode_cache, BTRFS_MULTIPLE_OBJECTIDS,
1506 key->objectid, key->offset, namebuf,
1507 len, filetype, key->type, error);
1510 len = sizeof(*di) + name_len + data_len;
1511 di = (struct btrfs_dir_item *)((char *)di + len);
1514 if (key->type == BTRFS_DIR_INDEX_KEY && nritems > 1)
1515 rec->errors |= I_ERR_DUP_DIR_INDEX;
1520 static int process_inode_ref(struct extent_buffer *eb,
1521 int slot, struct btrfs_key *key,
1522 struct shared_node *active_node)
1530 struct cache_tree *inode_cache;
1531 struct btrfs_inode_ref *ref;
1532 char namebuf[BTRFS_NAME_LEN];
1534 inode_cache = &active_node->inode_cache;
1536 ref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1537 total = btrfs_item_size_nr(eb, slot);
1538 while (cur < total) {
1539 name_len = btrfs_inode_ref_name_len(eb, ref);
1540 index = btrfs_inode_ref_index(eb, ref);
1541 if (name_len <= BTRFS_NAME_LEN) {
1545 len = BTRFS_NAME_LEN;
1546 error = REF_ERR_NAME_TOO_LONG;
1548 read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
1549 add_inode_backref(inode_cache, key->objectid, key->offset,
1550 index, namebuf, len, 0, key->type, error);
1552 len = sizeof(*ref) + name_len;
1553 ref = (struct btrfs_inode_ref *)((char *)ref + len);
1559 static int process_inode_extref(struct extent_buffer *eb,
1560 int slot, struct btrfs_key *key,
1561 struct shared_node *active_node)
1570 struct cache_tree *inode_cache;
1571 struct btrfs_inode_extref *extref;
1572 char namebuf[BTRFS_NAME_LEN];
1574 inode_cache = &active_node->inode_cache;
1576 extref = btrfs_item_ptr(eb, slot, struct btrfs_inode_extref);
1577 total = btrfs_item_size_nr(eb, slot);
1578 while (cur < total) {
1579 name_len = btrfs_inode_extref_name_len(eb, extref);
1580 index = btrfs_inode_extref_index(eb, extref);
1581 parent = btrfs_inode_extref_parent(eb, extref);
1582 if (name_len <= BTRFS_NAME_LEN) {
1586 len = BTRFS_NAME_LEN;
1587 error = REF_ERR_NAME_TOO_LONG;
1589 read_extent_buffer(eb, namebuf,
1590 (unsigned long)(extref + 1), len);
1591 add_inode_backref(inode_cache, key->objectid, parent,
1592 index, namebuf, len, 0, key->type, error);
1594 len = sizeof(*extref) + name_len;
1595 extref = (struct btrfs_inode_extref *)((char *)extref + len);
1602 static int count_csum_range(struct btrfs_root *root, u64 start,
1603 u64 len, u64 *found)
1605 struct btrfs_key key;
1606 struct btrfs_path path;
1607 struct extent_buffer *leaf;
1612 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
1614 btrfs_init_path(&path);
1616 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1618 key.type = BTRFS_EXTENT_CSUM_KEY;
1620 ret = btrfs_search_slot(NULL, root->fs_info->csum_root,
1624 if (ret > 0 && path.slots[0] > 0) {
1625 leaf = path.nodes[0];
1626 btrfs_item_key_to_cpu(leaf, &key, path.slots[0] - 1);
1627 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
1628 key.type == BTRFS_EXTENT_CSUM_KEY)
1633 leaf = path.nodes[0];
1634 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
1635 ret = btrfs_next_leaf(root->fs_info->csum_root, &path);
1640 leaf = path.nodes[0];
1643 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1644 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
1645 key.type != BTRFS_EXTENT_CSUM_KEY)
1648 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1649 if (key.offset >= start + len)
1652 if (key.offset > start)
1655 size = btrfs_item_size_nr(leaf, path.slots[0]);
1656 csum_end = key.offset + (size / csum_size) * root->sectorsize;
1657 if (csum_end > start) {
1658 size = min(csum_end - start, len);
1667 btrfs_release_path(&path);
1673 static int process_file_extent(struct btrfs_root *root,
1674 struct extent_buffer *eb,
1675 int slot, struct btrfs_key *key,
1676 struct shared_node *active_node)
1678 struct inode_record *rec;
1679 struct btrfs_file_extent_item *fi;
1681 u64 disk_bytenr = 0;
1682 u64 extent_offset = 0;
1683 u64 mask = root->sectorsize - 1;
1687 rec = active_node->current;
1688 BUG_ON(rec->ino != key->objectid || rec->refs > 1);
1689 rec->found_file_extent = 1;
1691 if (rec->extent_start == (u64)-1) {
1692 rec->extent_start = key->offset;
1693 rec->extent_end = key->offset;
1696 if (rec->extent_end > key->offset)
1697 rec->errors |= I_ERR_FILE_EXTENT_OVERLAP;
1698 else if (rec->extent_end < key->offset) {
1699 ret = add_file_extent_hole(&rec->holes, rec->extent_end,
1700 key->offset - rec->extent_end);
1705 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
1706 extent_type = btrfs_file_extent_type(eb, fi);
1708 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1709 num_bytes = btrfs_file_extent_inline_len(eb, slot, fi);
1711 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1712 rec->found_size += num_bytes;
1713 num_bytes = (num_bytes + mask) & ~mask;
1714 } else if (extent_type == BTRFS_FILE_EXTENT_REG ||
1715 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1716 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1717 disk_bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1718 extent_offset = btrfs_file_extent_offset(eb, fi);
1719 if (num_bytes == 0 || (num_bytes & mask))
1720 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1721 if (num_bytes + extent_offset >
1722 btrfs_file_extent_ram_bytes(eb, fi))
1723 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1724 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC &&
1725 (btrfs_file_extent_compression(eb, fi) ||
1726 btrfs_file_extent_encryption(eb, fi) ||
1727 btrfs_file_extent_other_encoding(eb, fi)))
1728 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1729 if (disk_bytenr > 0)
1730 rec->found_size += num_bytes;
1732 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1734 rec->extent_end = key->offset + num_bytes;
1737 * The data reloc tree will copy full extents into its inode and then
1738 * copy the corresponding csums. Because the extent it copied could be
1739 * a preallocated extent that hasn't been written to yet there may be no
1740 * csums to copy, ergo we won't have csums for our file extent. This is
1741 * ok so just don't bother checking csums if the inode belongs to the
1744 if (disk_bytenr > 0 &&
1745 btrfs_header_owner(eb) != BTRFS_DATA_RELOC_TREE_OBJECTID) {
1747 if (btrfs_file_extent_compression(eb, fi))
1748 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1750 disk_bytenr += extent_offset;
1752 ret = count_csum_range(root, disk_bytenr, num_bytes, &found);
1755 if (extent_type == BTRFS_FILE_EXTENT_REG) {
1757 rec->found_csum_item = 1;
1758 if (found < num_bytes)
1759 rec->some_csum_missing = 1;
1760 } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1762 rec->errors |= I_ERR_ODD_CSUM_ITEM;
1768 static int process_one_leaf(struct btrfs_root *root, struct extent_buffer *eb,
1769 struct walk_control *wc)
1771 struct btrfs_key key;
1775 struct cache_tree *inode_cache;
1776 struct shared_node *active_node;
1778 if (wc->root_level == wc->active_node &&
1779 btrfs_root_refs(&root->root_item) == 0)
1782 active_node = wc->nodes[wc->active_node];
1783 inode_cache = &active_node->inode_cache;
1784 nritems = btrfs_header_nritems(eb);
1785 for (i = 0; i < nritems; i++) {
1786 btrfs_item_key_to_cpu(eb, &key, i);
1788 if (key.objectid == BTRFS_FREE_SPACE_OBJECTID)
1790 if (key.type == BTRFS_ORPHAN_ITEM_KEY)
1793 if (active_node->current == NULL ||
1794 active_node->current->ino < key.objectid) {
1795 if (active_node->current) {
1796 active_node->current->checked = 1;
1797 maybe_free_inode_rec(inode_cache,
1798 active_node->current);
1800 active_node->current = get_inode_rec(inode_cache,
1802 BUG_ON(IS_ERR(active_node->current));
1805 case BTRFS_DIR_ITEM_KEY:
1806 case BTRFS_DIR_INDEX_KEY:
1807 ret = process_dir_item(root, eb, i, &key, active_node);
1809 case BTRFS_INODE_REF_KEY:
1810 ret = process_inode_ref(eb, i, &key, active_node);
1812 case BTRFS_INODE_EXTREF_KEY:
1813 ret = process_inode_extref(eb, i, &key, active_node);
1815 case BTRFS_INODE_ITEM_KEY:
1816 ret = process_inode_item(eb, i, &key, active_node);
1818 case BTRFS_EXTENT_DATA_KEY:
1819 ret = process_file_extent(root, eb, i, &key,
1829 static void reada_walk_down(struct btrfs_root *root,
1830 struct extent_buffer *node, int slot)
1839 level = btrfs_header_level(node);
1843 nritems = btrfs_header_nritems(node);
1844 blocksize = root->nodesize;
1845 for (i = slot; i < nritems; i++) {
1846 bytenr = btrfs_node_blockptr(node, i);
1847 ptr_gen = btrfs_node_ptr_generation(node, i);
1848 readahead_tree_block(root, bytenr, blocksize, ptr_gen);
1853 * Check the child node/leaf by the following condition:
1854 * 1. the first item key of the node/leaf should be the same with the one
1856 * 2. block in parent node should match the child node/leaf.
1857 * 3. generation of parent node and child's header should be consistent.
1859 * Or the child node/leaf pointed by the key in parent is not valid.
1861 * We hope to check leaf owner too, but since subvol may share leaves,
1862 * which makes leaf owner check not so strong, key check should be
1863 * sufficient enough for that case.
1865 static int check_child_node(struct btrfs_root *root,
1866 struct extent_buffer *parent, int slot,
1867 struct extent_buffer *child)
1869 struct btrfs_key parent_key;
1870 struct btrfs_key child_key;
1873 btrfs_node_key_to_cpu(parent, &parent_key, slot);
1874 if (btrfs_header_level(child) == 0)
1875 btrfs_item_key_to_cpu(child, &child_key, 0);
1877 btrfs_node_key_to_cpu(child, &child_key, 0);
1879 if (memcmp(&parent_key, &child_key, sizeof(parent_key))) {
1882 "Wrong key of child node/leaf, wanted: (%llu, %u, %llu), have: (%llu, %u, %llu)\n",
1883 parent_key.objectid, parent_key.type, parent_key.offset,
1884 child_key.objectid, child_key.type, child_key.offset);
1886 if (btrfs_header_bytenr(child) != btrfs_node_blockptr(parent, slot)) {
1888 fprintf(stderr, "Wrong block of child node/leaf, wanted: %llu, have: %llu\n",
1889 btrfs_node_blockptr(parent, slot),
1890 btrfs_header_bytenr(child));
1892 if (btrfs_node_ptr_generation(parent, slot) !=
1893 btrfs_header_generation(child)) {
1895 fprintf(stderr, "Wrong generation of child node/leaf, wanted: %llu, have: %llu\n",
1896 btrfs_header_generation(child),
1897 btrfs_node_ptr_generation(parent, slot));
1903 u64 bytenr[BTRFS_MAX_LEVEL];
1904 u64 refs[BTRFS_MAX_LEVEL];
1907 static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
1908 struct walk_control *wc, int *level,
1909 struct node_refs *nrefs)
1911 enum btrfs_tree_block_status status;
1914 struct extent_buffer *next;
1915 struct extent_buffer *cur;
1920 WARN_ON(*level < 0);
1921 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1923 if (path->nodes[*level]->start == nrefs->bytenr[*level]) {
1924 refs = nrefs->refs[*level];
1927 ret = btrfs_lookup_extent_info(NULL, root,
1928 path->nodes[*level]->start,
1929 *level, 1, &refs, NULL);
1934 nrefs->bytenr[*level] = path->nodes[*level]->start;
1935 nrefs->refs[*level] = refs;
1939 ret = enter_shared_node(root, path->nodes[*level]->start,
1947 while (*level >= 0) {
1948 WARN_ON(*level < 0);
1949 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1950 cur = path->nodes[*level];
1952 if (btrfs_header_level(cur) != *level)
1955 if (path->slots[*level] >= btrfs_header_nritems(cur))
1958 ret = process_one_leaf(root, cur, wc);
1963 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1964 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1965 blocksize = root->nodesize;
1967 if (bytenr == nrefs->bytenr[*level - 1]) {
1968 refs = nrefs->refs[*level - 1];
1970 ret = btrfs_lookup_extent_info(NULL, root, bytenr,
1971 *level - 1, 1, &refs, NULL);
1975 nrefs->bytenr[*level - 1] = bytenr;
1976 nrefs->refs[*level - 1] = refs;
1981 ret = enter_shared_node(root, bytenr, refs,
1984 path->slots[*level]++;
1989 next = btrfs_find_tree_block(root, bytenr, blocksize);
1990 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
1991 free_extent_buffer(next);
1992 reada_walk_down(root, cur, path->slots[*level]);
1993 next = read_tree_block(root, bytenr, blocksize,
1995 if (!extent_buffer_uptodate(next)) {
1996 struct btrfs_key node_key;
1998 btrfs_node_key_to_cpu(path->nodes[*level],
2000 path->slots[*level]);
2001 btrfs_add_corrupt_extent_record(root->fs_info,
2003 path->nodes[*level]->start,
2004 root->nodesize, *level);
2010 ret = check_child_node(root, cur, path->slots[*level], next);
2016 if (btrfs_is_leaf(next))
2017 status = btrfs_check_leaf(root, NULL, next);
2019 status = btrfs_check_node(root, NULL, next);
2020 if (status != BTRFS_TREE_BLOCK_CLEAN) {
2021 free_extent_buffer(next);
2026 *level = *level - 1;
2027 free_extent_buffer(path->nodes[*level]);
2028 path->nodes[*level] = next;
2029 path->slots[*level] = 0;
2032 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2036 static int walk_up_tree(struct btrfs_root *root, struct btrfs_path *path,
2037 struct walk_control *wc, int *level)
2040 struct extent_buffer *leaf;
2042 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2043 leaf = path->nodes[i];
2044 if (path->slots[i] + 1 < btrfs_header_nritems(leaf)) {
2049 free_extent_buffer(path->nodes[*level]);
2050 path->nodes[*level] = NULL;
2051 BUG_ON(*level > wc->active_node);
2052 if (*level == wc->active_node)
2053 leave_shared_node(root, wc, *level);
2060 static int check_root_dir(struct inode_record *rec)
2062 struct inode_backref *backref;
2065 if (!rec->found_inode_item || rec->errors)
2067 if (rec->nlink != 1 || rec->found_link != 0)
2069 if (list_empty(&rec->backrefs))
2071 backref = to_inode_backref(rec->backrefs.next);
2072 if (!backref->found_inode_ref)
2074 if (backref->index != 0 || backref->namelen != 2 ||
2075 memcmp(backref->name, "..", 2))
2077 if (backref->found_dir_index || backref->found_dir_item)
2084 static int repair_inode_isize(struct btrfs_trans_handle *trans,
2085 struct btrfs_root *root, struct btrfs_path *path,
2086 struct inode_record *rec)
2088 struct btrfs_inode_item *ei;
2089 struct btrfs_key key;
2092 key.objectid = rec->ino;
2093 key.type = BTRFS_INODE_ITEM_KEY;
2094 key.offset = (u64)-1;
2096 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2100 if (!path->slots[0]) {
2107 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2108 if (key.objectid != rec->ino) {
2113 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2114 struct btrfs_inode_item);
2115 btrfs_set_inode_size(path->nodes[0], ei, rec->found_size);
2116 btrfs_mark_buffer_dirty(path->nodes[0]);
2117 rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
2118 printf("reset isize for dir %Lu root %Lu\n", rec->ino,
2119 root->root_key.objectid);
2121 btrfs_release_path(path);
2125 static int repair_inode_orphan_item(struct btrfs_trans_handle *trans,
2126 struct btrfs_root *root,
2127 struct btrfs_path *path,
2128 struct inode_record *rec)
2132 ret = btrfs_add_orphan_item(trans, root, path, rec->ino);
2133 btrfs_release_path(path);
2135 rec->errors &= ~I_ERR_NO_ORPHAN_ITEM;
2139 static int repair_inode_nbytes(struct btrfs_trans_handle *trans,
2140 struct btrfs_root *root,
2141 struct btrfs_path *path,
2142 struct inode_record *rec)
2144 struct btrfs_inode_item *ei;
2145 struct btrfs_key key;
2148 key.objectid = rec->ino;
2149 key.type = BTRFS_INODE_ITEM_KEY;
2152 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2159 /* Since ret == 0, no need to check anything */
2160 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2161 struct btrfs_inode_item);
2162 btrfs_set_inode_nbytes(path->nodes[0], ei, rec->found_size);
2163 btrfs_mark_buffer_dirty(path->nodes[0]);
2164 rec->errors &= ~I_ERR_FILE_NBYTES_WRONG;
2165 printf("reset nbytes for ino %llu root %llu\n",
2166 rec->ino, root->root_key.objectid);
2168 btrfs_release_path(path);
2172 static int add_missing_dir_index(struct btrfs_root *root,
2173 struct cache_tree *inode_cache,
2174 struct inode_record *rec,
2175 struct inode_backref *backref)
2177 struct btrfs_path *path;
2178 struct btrfs_trans_handle *trans;
2179 struct btrfs_dir_item *dir_item;
2180 struct extent_buffer *leaf;
2181 struct btrfs_key key;
2182 struct btrfs_disk_key disk_key;
2183 struct inode_record *dir_rec;
2184 unsigned long name_ptr;
2185 u32 data_size = sizeof(*dir_item) + backref->namelen;
2188 path = btrfs_alloc_path();
2192 trans = btrfs_start_transaction(root, 1);
2193 if (IS_ERR(trans)) {
2194 btrfs_free_path(path);
2195 return PTR_ERR(trans);
2198 fprintf(stderr, "repairing missing dir index item for inode %llu\n",
2199 (unsigned long long)rec->ino);
2200 key.objectid = backref->dir;
2201 key.type = BTRFS_DIR_INDEX_KEY;
2202 key.offset = backref->index;
2204 ret = btrfs_insert_empty_item(trans, root, path, &key, data_size);
2207 leaf = path->nodes[0];
2208 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
2210 disk_key.objectid = cpu_to_le64(rec->ino);
2211 disk_key.type = BTRFS_INODE_ITEM_KEY;
2212 disk_key.offset = 0;
2214 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
2215 btrfs_set_dir_type(leaf, dir_item, imode_to_type(rec->imode));
2216 btrfs_set_dir_data_len(leaf, dir_item, 0);
2217 btrfs_set_dir_name_len(leaf, dir_item, backref->namelen);
2218 name_ptr = (unsigned long)(dir_item + 1);
2219 write_extent_buffer(leaf, backref->name, name_ptr, backref->namelen);
2220 btrfs_mark_buffer_dirty(leaf);
2221 btrfs_free_path(path);
2222 btrfs_commit_transaction(trans, root);
2224 backref->found_dir_index = 1;
2225 dir_rec = get_inode_rec(inode_cache, backref->dir, 0);
2226 BUG_ON(IS_ERR(dir_rec));
2229 dir_rec->found_size += backref->namelen;
2230 if (dir_rec->found_size == dir_rec->isize &&
2231 (dir_rec->errors & I_ERR_DIR_ISIZE_WRONG))
2232 dir_rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
2233 if (dir_rec->found_size != dir_rec->isize)
2234 dir_rec->errors |= I_ERR_DIR_ISIZE_WRONG;
2239 static int delete_dir_index(struct btrfs_root *root,
2240 struct cache_tree *inode_cache,
2241 struct inode_record *rec,
2242 struct inode_backref *backref)
2244 struct btrfs_trans_handle *trans;
2245 struct btrfs_dir_item *di;
2246 struct btrfs_path *path;
2249 path = btrfs_alloc_path();
2253 trans = btrfs_start_transaction(root, 1);
2254 if (IS_ERR(trans)) {
2255 btrfs_free_path(path);
2256 return PTR_ERR(trans);
2260 fprintf(stderr, "Deleting bad dir index [%llu,%u,%llu] root %llu\n",
2261 (unsigned long long)backref->dir,
2262 BTRFS_DIR_INDEX_KEY, (unsigned long long)backref->index,
2263 (unsigned long long)root->objectid);
2265 di = btrfs_lookup_dir_index(trans, root, path, backref->dir,
2266 backref->name, backref->namelen,
2267 backref->index, -1);
2270 btrfs_free_path(path);
2271 btrfs_commit_transaction(trans, root);
2278 ret = btrfs_del_item(trans, root, path);
2280 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2282 btrfs_free_path(path);
2283 btrfs_commit_transaction(trans, root);
2287 static int create_inode_item(struct btrfs_root *root,
2288 struct inode_record *rec,
2289 struct inode_backref *backref, int root_dir)
2291 struct btrfs_trans_handle *trans;
2292 struct btrfs_inode_item inode_item;
2293 time_t now = time(NULL);
2296 trans = btrfs_start_transaction(root, 1);
2297 if (IS_ERR(trans)) {
2298 ret = PTR_ERR(trans);
2302 fprintf(stderr, "root %llu inode %llu recreating inode item, this may "
2303 "be incomplete, please check permissions and content after "
2304 "the fsck completes.\n", (unsigned long long)root->objectid,
2305 (unsigned long long)rec->ino);
2307 memset(&inode_item, 0, sizeof(inode_item));
2308 btrfs_set_stack_inode_generation(&inode_item, trans->transid);
2310 btrfs_set_stack_inode_nlink(&inode_item, 1);
2312 btrfs_set_stack_inode_nlink(&inode_item, rec->found_link);
2313 btrfs_set_stack_inode_nbytes(&inode_item, rec->found_size);
2314 if (rec->found_dir_item) {
2315 if (rec->found_file_extent)
2316 fprintf(stderr, "root %llu inode %llu has both a dir "
2317 "item and extents, unsure if it is a dir or a "
2318 "regular file so setting it as a directory\n",
2319 (unsigned long long)root->objectid,
2320 (unsigned long long)rec->ino);
2321 btrfs_set_stack_inode_mode(&inode_item, S_IFDIR | 0755);
2322 btrfs_set_stack_inode_size(&inode_item, rec->found_size);
2323 } else if (!rec->found_dir_item) {
2324 btrfs_set_stack_inode_size(&inode_item, rec->extent_end);
2325 btrfs_set_stack_inode_mode(&inode_item, S_IFREG | 0755);
2327 btrfs_set_stack_timespec_sec(&inode_item.atime, now);
2328 btrfs_set_stack_timespec_nsec(&inode_item.atime, 0);
2329 btrfs_set_stack_timespec_sec(&inode_item.ctime, now);
2330 btrfs_set_stack_timespec_nsec(&inode_item.ctime, 0);
2331 btrfs_set_stack_timespec_sec(&inode_item.mtime, now);
2332 btrfs_set_stack_timespec_nsec(&inode_item.mtime, 0);
2333 btrfs_set_stack_timespec_sec(&inode_item.otime, 0);
2334 btrfs_set_stack_timespec_nsec(&inode_item.otime, 0);
2336 ret = btrfs_insert_inode(trans, root, rec->ino, &inode_item);
2338 btrfs_commit_transaction(trans, root);
2342 static int repair_inode_backrefs(struct btrfs_root *root,
2343 struct inode_record *rec,
2344 struct cache_tree *inode_cache,
2347 struct inode_backref *tmp, *backref;
2348 u64 root_dirid = btrfs_root_dirid(&root->root_item);
2352 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
2353 if (!delete && rec->ino == root_dirid) {
2354 if (!rec->found_inode_item) {
2355 ret = create_inode_item(root, rec, backref, 1);
2362 /* Index 0 for root dir's are special, don't mess with it */
2363 if (rec->ino == root_dirid && backref->index == 0)
2367 ((backref->found_dir_index && !backref->found_inode_ref) ||
2368 (backref->found_dir_index && backref->found_inode_ref &&
2369 (backref->errors & REF_ERR_INDEX_UNMATCH)))) {
2370 ret = delete_dir_index(root, inode_cache, rec, backref);
2374 list_del(&backref->list);
2378 if (!delete && !backref->found_dir_index &&
2379 backref->found_dir_item && backref->found_inode_ref) {
2380 ret = add_missing_dir_index(root, inode_cache, rec,
2385 if (backref->found_dir_item &&
2386 backref->found_dir_index &&
2387 backref->found_dir_index) {
2388 if (!backref->errors &&
2389 backref->found_inode_ref) {
2390 list_del(&backref->list);
2396 if (!delete && (!backref->found_dir_index &&
2397 !backref->found_dir_item &&
2398 backref->found_inode_ref)) {
2399 struct btrfs_trans_handle *trans;
2400 struct btrfs_key location;
2402 ret = check_dir_conflict(root, backref->name,
2408 * let nlink fixing routine to handle it,
2409 * which can do it better.
2414 location.objectid = rec->ino;
2415 location.type = BTRFS_INODE_ITEM_KEY;
2416 location.offset = 0;
2418 trans = btrfs_start_transaction(root, 1);
2419 if (IS_ERR(trans)) {
2420 ret = PTR_ERR(trans);
2423 fprintf(stderr, "adding missing dir index/item pair "
2425 (unsigned long long)rec->ino);
2426 ret = btrfs_insert_dir_item(trans, root, backref->name,
2428 backref->dir, &location,
2429 imode_to_type(rec->imode),
2432 btrfs_commit_transaction(trans, root);
2436 if (!delete && (backref->found_inode_ref &&
2437 backref->found_dir_index &&
2438 backref->found_dir_item &&
2439 !(backref->errors & REF_ERR_INDEX_UNMATCH) &&
2440 !rec->found_inode_item)) {
2441 ret = create_inode_item(root, rec, backref, 0);
2448 return ret ? ret : repaired;
2452 * To determine the file type for nlink/inode_item repair
2454 * Return 0 if file type is found and BTRFS_FT_* is stored into type.
2455 * Return -ENOENT if file type is not found.
2457 static int find_file_type(struct inode_record *rec, u8 *type)
2459 struct inode_backref *backref;
2461 /* For inode item recovered case */
2462 if (rec->found_inode_item) {
2463 *type = imode_to_type(rec->imode);
2467 list_for_each_entry(backref, &rec->backrefs, list) {
2468 if (backref->found_dir_index || backref->found_dir_item) {
2469 *type = backref->filetype;
2477 * To determine the file name for nlink repair
2479 * Return 0 if file name is found, set name and namelen.
2480 * Return -ENOENT if file name is not found.
2482 static int find_file_name(struct inode_record *rec,
2483 char *name, int *namelen)
2485 struct inode_backref *backref;
2487 list_for_each_entry(backref, &rec->backrefs, list) {
2488 if (backref->found_dir_index || backref->found_dir_item ||
2489 backref->found_inode_ref) {
2490 memcpy(name, backref->name, backref->namelen);
2491 *namelen = backref->namelen;
2498 /* Reset the nlink of the inode to the correct one */
2499 static int reset_nlink(struct btrfs_trans_handle *trans,
2500 struct btrfs_root *root,
2501 struct btrfs_path *path,
2502 struct inode_record *rec)
2504 struct inode_backref *backref;
2505 struct inode_backref *tmp;
2506 struct btrfs_key key;
2507 struct btrfs_inode_item *inode_item;
2510 /* We don't believe this either, reset it and iterate backref */
2511 rec->found_link = 0;
2513 /* Remove all backref including the valid ones */
2514 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
2515 ret = btrfs_unlink(trans, root, rec->ino, backref->dir,
2516 backref->index, backref->name,
2517 backref->namelen, 0);
2521 /* remove invalid backref, so it won't be added back */
2522 if (!(backref->found_dir_index &&
2523 backref->found_dir_item &&
2524 backref->found_inode_ref)) {
2525 list_del(&backref->list);
2532 /* Set nlink to 0 */
2533 key.objectid = rec->ino;
2534 key.type = BTRFS_INODE_ITEM_KEY;
2536 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2543 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2544 struct btrfs_inode_item);
2545 btrfs_set_inode_nlink(path->nodes[0], inode_item, 0);
2546 btrfs_mark_buffer_dirty(path->nodes[0]);
2547 btrfs_release_path(path);
2550 * Add back valid inode_ref/dir_item/dir_index,
2551 * add_link() will handle the nlink inc, so new nlink must be correct
2553 list_for_each_entry(backref, &rec->backrefs, list) {
2554 ret = btrfs_add_link(trans, root, rec->ino, backref->dir,
2555 backref->name, backref->namelen,
2556 backref->filetype, &backref->index, 1);
2561 btrfs_release_path(path);
2565 static int repair_inode_nlinks(struct btrfs_trans_handle *trans,
2566 struct btrfs_root *root,
2567 struct btrfs_path *path,
2568 struct inode_record *rec)
2570 char *dir_name = "lost+found";
2571 char namebuf[BTRFS_NAME_LEN] = {0};
2576 int name_recovered = 0;
2577 int type_recovered = 0;
2581 * Get file name and type first before these invalid inode ref
2582 * are deleted by remove_all_invalid_backref()
2584 name_recovered = !find_file_name(rec, namebuf, &namelen);
2585 type_recovered = !find_file_type(rec, &type);
2587 if (!name_recovered) {
2588 printf("Can't get file name for inode %llu, using '%llu' as fallback\n",
2589 rec->ino, rec->ino);
2590 namelen = count_digits(rec->ino);
2591 sprintf(namebuf, "%llu", rec->ino);
2594 if (!type_recovered) {
2595 printf("Can't get file type for inode %llu, using FILE as fallback\n",
2597 type = BTRFS_FT_REG_FILE;
2601 ret = reset_nlink(trans, root, path, rec);
2604 "Failed to reset nlink for inode %llu: %s\n",
2605 rec->ino, strerror(-ret));
2609 if (rec->found_link == 0) {
2610 lost_found_ino = root->highest_inode;
2611 if (lost_found_ino >= BTRFS_LAST_FREE_OBJECTID) {
2616 ret = btrfs_mkdir(trans, root, dir_name, strlen(dir_name),
2617 BTRFS_FIRST_FREE_OBJECTID, &lost_found_ino,
2620 fprintf(stderr, "Failed to create '%s' dir: %s\n",
2621 dir_name, strerror(-ret));
2624 ret = btrfs_add_link(trans, root, rec->ino, lost_found_ino,
2625 namebuf, namelen, type, NULL, 1);
2627 * Add ".INO" suffix several times to handle case where
2628 * "FILENAME.INO" is already taken by another file.
2630 while (ret == -EEXIST) {
2632 * Conflicting file name, add ".INO" as suffix * +1 for '.'
2634 if (namelen + count_digits(rec->ino) + 1 >
2639 snprintf(namebuf + namelen, BTRFS_NAME_LEN - namelen,
2641 namelen += count_digits(rec->ino) + 1;
2642 ret = btrfs_add_link(trans, root, rec->ino,
2643 lost_found_ino, namebuf,
2644 namelen, type, NULL, 1);
2648 "Failed to link the inode %llu to %s dir: %s\n",
2649 rec->ino, dir_name, strerror(-ret));
2653 * Just increase the found_link, don't actually add the
2654 * backref. This will make things easier and this inode
2655 * record will be freed after the repair is done.
2656 * So fsck will not report problem about this inode.
2659 printf("Moving file '%.*s' to '%s' dir since it has no valid backref\n",
2660 namelen, namebuf, dir_name);
2662 printf("Fixed the nlink of inode %llu\n", rec->ino);
2665 * Clear the flag anyway, or we will loop forever for the same inode
2666 * as it will not be removed from the bad inode list and the dead loop
2669 rec->errors &= ~I_ERR_LINK_COUNT_WRONG;
2670 btrfs_release_path(path);
2675 * Check if there is any normal(reg or prealloc) file extent for given
2677 * This is used to determine the file type when neither its dir_index/item or
2678 * inode_item exists.
2680 * This will *NOT* report error, if any error happens, just consider it does
2681 * not have any normal file extent.
2683 static int find_normal_file_extent(struct btrfs_root *root, u64 ino)
2685 struct btrfs_path *path;
2686 struct btrfs_key key;
2687 struct btrfs_key found_key;
2688 struct btrfs_file_extent_item *fi;
2692 path = btrfs_alloc_path();
2696 key.type = BTRFS_EXTENT_DATA_KEY;
2699 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2704 if (ret && path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2705 ret = btrfs_next_leaf(root, path);
2712 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2714 if (found_key.objectid != ino ||
2715 found_key.type != BTRFS_EXTENT_DATA_KEY)
2717 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
2718 struct btrfs_file_extent_item);
2719 type = btrfs_file_extent_type(path->nodes[0], fi);
2720 if (type != BTRFS_FILE_EXTENT_INLINE) {
2726 btrfs_free_path(path);
2730 static u32 btrfs_type_to_imode(u8 type)
2732 static u32 imode_by_btrfs_type[] = {
2733 [BTRFS_FT_REG_FILE] = S_IFREG,
2734 [BTRFS_FT_DIR] = S_IFDIR,
2735 [BTRFS_FT_CHRDEV] = S_IFCHR,
2736 [BTRFS_FT_BLKDEV] = S_IFBLK,
2737 [BTRFS_FT_FIFO] = S_IFIFO,
2738 [BTRFS_FT_SOCK] = S_IFSOCK,
2739 [BTRFS_FT_SYMLINK] = S_IFLNK,
2742 return imode_by_btrfs_type[(type)];
2745 static int repair_inode_no_item(struct btrfs_trans_handle *trans,
2746 struct btrfs_root *root,
2747 struct btrfs_path *path,
2748 struct inode_record *rec)
2752 int type_recovered = 0;
2755 printf("Trying to rebuild inode:%llu\n", rec->ino);
2757 type_recovered = !find_file_type(rec, &filetype);
2760 * Try to determine inode type if type not found.
2762 * For found regular file extent, it must be FILE.
2763 * For found dir_item/index, it must be DIR.
2765 * For undetermined one, use FILE as fallback.
2768 * 1. If found backref(inode_index/item is already handled) to it,
2770 * Need new inode-inode ref structure to allow search for that.
2772 if (!type_recovered) {
2773 if (rec->found_file_extent &&
2774 find_normal_file_extent(root, rec->ino)) {
2776 filetype = BTRFS_FT_REG_FILE;
2777 } else if (rec->found_dir_item) {
2779 filetype = BTRFS_FT_DIR;
2780 } else if (!list_empty(&rec->orphan_extents)) {
2782 filetype = BTRFS_FT_REG_FILE;
2784 printf("Can't determine the filetype for inode %llu, assume it is a normal file\n",
2787 filetype = BTRFS_FT_REG_FILE;
2791 ret = btrfs_new_inode(trans, root, rec->ino,
2792 mode | btrfs_type_to_imode(filetype));
2797 * Here inode rebuild is done, we only rebuild the inode item,
2798 * don't repair the nlink(like move to lost+found).
2799 * That is the job of nlink repair.
2801 * We just fill the record and return
2803 rec->found_dir_item = 1;
2804 rec->imode = mode | btrfs_type_to_imode(filetype);
2806 rec->errors &= ~I_ERR_NO_INODE_ITEM;
2807 /* Ensure the inode_nlinks repair function will be called */
2808 rec->errors |= I_ERR_LINK_COUNT_WRONG;
2813 static int repair_inode_orphan_extent(struct btrfs_trans_handle *trans,
2814 struct btrfs_root *root,
2815 struct btrfs_path *path,
2816 struct inode_record *rec)
2818 struct orphan_data_extent *orphan;
2819 struct orphan_data_extent *tmp;
2822 list_for_each_entry_safe(orphan, tmp, &rec->orphan_extents, list) {
2824 * Check for conflicting file extents
2826 * Here we don't know whether the extents is compressed or not,
2827 * so we can only assume it not compressed nor data offset,
2828 * and use its disk_len as extent length.
2830 ret = btrfs_get_extent(NULL, root, path, orphan->objectid,
2831 orphan->offset, orphan->disk_len, 0);
2832 btrfs_release_path(path);
2837 "orphan extent (%llu, %llu) conflicts, delete the orphan\n",
2838 orphan->disk_bytenr, orphan->disk_len);
2839 ret = btrfs_free_extent(trans,
2840 root->fs_info->extent_root,
2841 orphan->disk_bytenr, orphan->disk_len,
2842 0, root->objectid, orphan->objectid,
2847 ret = btrfs_insert_file_extent(trans, root, orphan->objectid,
2848 orphan->offset, orphan->disk_bytenr,
2849 orphan->disk_len, orphan->disk_len);
2853 /* Update file size info */
2854 rec->found_size += orphan->disk_len;
2855 if (rec->found_size == rec->nbytes)
2856 rec->errors &= ~I_ERR_FILE_NBYTES_WRONG;
2858 /* Update the file extent hole info too */
2859 ret = del_file_extent_hole(&rec->holes, orphan->offset,
2863 if (RB_EMPTY_ROOT(&rec->holes))
2864 rec->errors &= ~I_ERR_FILE_EXTENT_DISCOUNT;
2866 list_del(&orphan->list);
2869 rec->errors &= ~I_ERR_FILE_EXTENT_ORPHAN;
2874 static int repair_inode_discount_extent(struct btrfs_trans_handle *trans,
2875 struct btrfs_root *root,
2876 struct btrfs_path *path,
2877 struct inode_record *rec)
2879 struct rb_node *node;
2880 struct file_extent_hole *hole;
2884 node = rb_first(&rec->holes);
2888 hole = rb_entry(node, struct file_extent_hole, node);
2889 ret = btrfs_punch_hole(trans, root, rec->ino,
2890 hole->start, hole->len);
2893 ret = del_file_extent_hole(&rec->holes, hole->start,
2897 if (RB_EMPTY_ROOT(&rec->holes))
2898 rec->errors &= ~I_ERR_FILE_EXTENT_DISCOUNT;
2899 node = rb_first(&rec->holes);
2901 /* special case for a file losing all its file extent */
2903 ret = btrfs_punch_hole(trans, root, rec->ino, 0,
2904 round_up(rec->isize, root->sectorsize));
2908 printf("Fixed discount file extents for inode: %llu in root: %llu\n",
2909 rec->ino, root->objectid);
2914 static int try_repair_inode(struct btrfs_root *root, struct inode_record *rec)
2916 struct btrfs_trans_handle *trans;
2917 struct btrfs_path *path;
2920 if (!(rec->errors & (I_ERR_DIR_ISIZE_WRONG |
2921 I_ERR_NO_ORPHAN_ITEM |
2922 I_ERR_LINK_COUNT_WRONG |
2923 I_ERR_NO_INODE_ITEM |
2924 I_ERR_FILE_EXTENT_ORPHAN |
2925 I_ERR_FILE_EXTENT_DISCOUNT|
2926 I_ERR_FILE_NBYTES_WRONG)))
2929 path = btrfs_alloc_path();
2934 * For nlink repair, it may create a dir and add link, so
2935 * 2 for parent(256)'s dir_index and dir_item
2936 * 2 for lost+found dir's inode_item and inode_ref
2937 * 1 for the new inode_ref of the file
2938 * 2 for lost+found dir's dir_index and dir_item for the file
2940 trans = btrfs_start_transaction(root, 7);
2941 if (IS_ERR(trans)) {
2942 btrfs_free_path(path);
2943 return PTR_ERR(trans);
2946 if (rec->errors & I_ERR_NO_INODE_ITEM)
2947 ret = repair_inode_no_item(trans, root, path, rec);
2948 if (!ret && rec->errors & I_ERR_FILE_EXTENT_ORPHAN)
2949 ret = repair_inode_orphan_extent(trans, root, path, rec);
2950 if (!ret && rec->errors & I_ERR_FILE_EXTENT_DISCOUNT)
2951 ret = repair_inode_discount_extent(trans, root, path, rec);
2952 if (!ret && rec->errors & I_ERR_DIR_ISIZE_WRONG)
2953 ret = repair_inode_isize(trans, root, path, rec);
2954 if (!ret && rec->errors & I_ERR_NO_ORPHAN_ITEM)
2955 ret = repair_inode_orphan_item(trans, root, path, rec);
2956 if (!ret && rec->errors & I_ERR_LINK_COUNT_WRONG)
2957 ret = repair_inode_nlinks(trans, root, path, rec);
2958 if (!ret && rec->errors & I_ERR_FILE_NBYTES_WRONG)
2959 ret = repair_inode_nbytes(trans, root, path, rec);
2960 btrfs_commit_transaction(trans, root);
2961 btrfs_free_path(path);
2965 static int check_inode_recs(struct btrfs_root *root,
2966 struct cache_tree *inode_cache)
2968 struct cache_extent *cache;
2969 struct ptr_node *node;
2970 struct inode_record *rec;
2971 struct inode_backref *backref;
2976 u64 root_dirid = btrfs_root_dirid(&root->root_item);
2978 if (btrfs_root_refs(&root->root_item) == 0) {
2979 if (!cache_tree_empty(inode_cache))
2980 fprintf(stderr, "warning line %d\n", __LINE__);
2985 * We need to record the highest inode number for later 'lost+found'
2987 * We must select an ino not used/referred by any existing inode, or
2988 * 'lost+found' ino may be a missing ino in a corrupted leaf,
2989 * this may cause 'lost+found' dir has wrong nlinks.
2991 cache = last_cache_extent(inode_cache);
2993 node = container_of(cache, struct ptr_node, cache);
2995 if (rec->ino > root->highest_inode)
2996 root->highest_inode = rec->ino;
3000 * We need to repair backrefs first because we could change some of the
3001 * errors in the inode recs.
3003 * We also need to go through and delete invalid backrefs first and then
3004 * add the correct ones second. We do this because we may get EEXIST
3005 * when adding back the correct index because we hadn't yet deleted the
3008 * For example, if we were missing a dir index then the directories
3009 * isize would be wrong, so if we fixed the isize to what we thought it
3010 * would be and then fixed the backref we'd still have a invalid fs, so
3011 * we need to add back the dir index and then check to see if the isize
3016 if (stage == 3 && !err)
3019 cache = search_cache_extent(inode_cache, 0);
3020 while (repair && cache) {
3021 node = container_of(cache, struct ptr_node, cache);
3023 cache = next_cache_extent(cache);
3025 /* Need to free everything up and rescan */
3027 remove_cache_extent(inode_cache, &node->cache);
3029 free_inode_rec(rec);
3033 if (list_empty(&rec->backrefs))
3036 ret = repair_inode_backrefs(root, rec, inode_cache,
3050 rec = get_inode_rec(inode_cache, root_dirid, 0);
3051 BUG_ON(IS_ERR(rec));
3053 ret = check_root_dir(rec);
3055 fprintf(stderr, "root %llu root dir %llu error\n",
3056 (unsigned long long)root->root_key.objectid,
3057 (unsigned long long)root_dirid);
3058 print_inode_error(root, rec);
3063 struct btrfs_trans_handle *trans;
3065 trans = btrfs_start_transaction(root, 1);
3066 if (IS_ERR(trans)) {
3067 err = PTR_ERR(trans);
3072 "root %llu missing its root dir, recreating\n",
3073 (unsigned long long)root->objectid);
3075 ret = btrfs_make_root_dir(trans, root, root_dirid);
3078 btrfs_commit_transaction(trans, root);
3082 fprintf(stderr, "root %llu root dir %llu not found\n",
3083 (unsigned long long)root->root_key.objectid,
3084 (unsigned long long)root_dirid);
3088 cache = search_cache_extent(inode_cache, 0);
3091 node = container_of(cache, struct ptr_node, cache);
3093 remove_cache_extent(inode_cache, &node->cache);
3095 if (rec->ino == root_dirid ||
3096 rec->ino == BTRFS_ORPHAN_OBJECTID) {
3097 free_inode_rec(rec);
3101 if (rec->errors & I_ERR_NO_ORPHAN_ITEM) {
3102 ret = check_orphan_item(root, rec->ino);
3104 rec->errors &= ~I_ERR_NO_ORPHAN_ITEM;
3105 if (can_free_inode_rec(rec)) {
3106 free_inode_rec(rec);
3111 if (!rec->found_inode_item)
3112 rec->errors |= I_ERR_NO_INODE_ITEM;
3113 if (rec->found_link != rec->nlink)
3114 rec->errors |= I_ERR_LINK_COUNT_WRONG;
3116 ret = try_repair_inode(root, rec);
3117 if (ret == 0 && can_free_inode_rec(rec)) {
3118 free_inode_rec(rec);
3124 if (!(repair && ret == 0))
3126 print_inode_error(root, rec);
3127 list_for_each_entry(backref, &rec->backrefs, list) {
3128 if (!backref->found_dir_item)
3129 backref->errors |= REF_ERR_NO_DIR_ITEM;
3130 if (!backref->found_dir_index)
3131 backref->errors |= REF_ERR_NO_DIR_INDEX;
3132 if (!backref->found_inode_ref)
3133 backref->errors |= REF_ERR_NO_INODE_REF;
3134 fprintf(stderr, "\tunresolved ref dir %llu index %llu"
3135 " namelen %u name %s filetype %d errors %x",
3136 (unsigned long long)backref->dir,
3137 (unsigned long long)backref->index,
3138 backref->namelen, backref->name,
3139 backref->filetype, backref->errors);
3140 print_ref_error(backref->errors);
3142 free_inode_rec(rec);
3144 return (error > 0) ? -1 : 0;
3147 static struct root_record *get_root_rec(struct cache_tree *root_cache,
3150 struct cache_extent *cache;
3151 struct root_record *rec = NULL;
3154 cache = lookup_cache_extent(root_cache, objectid, 1);
3156 rec = container_of(cache, struct root_record, cache);
3158 rec = calloc(1, sizeof(*rec));
3160 return ERR_PTR(-ENOMEM);
3161 rec->objectid = objectid;
3162 INIT_LIST_HEAD(&rec->backrefs);
3163 rec->cache.start = objectid;
3164 rec->cache.size = 1;
3166 ret = insert_cache_extent(root_cache, &rec->cache);
3168 return ERR_PTR(-EEXIST);
3173 static struct root_backref *get_root_backref(struct root_record *rec,
3174 u64 ref_root, u64 dir, u64 index,
3175 const char *name, int namelen)
3177 struct root_backref *backref;
3179 list_for_each_entry(backref, &rec->backrefs, list) {
3180 if (backref->ref_root != ref_root || backref->dir != dir ||
3181 backref->namelen != namelen)
3183 if (memcmp(name, backref->name, namelen))
3188 backref = calloc(1, sizeof(*backref) + namelen + 1);
3191 backref->ref_root = ref_root;
3193 backref->index = index;
3194 backref->namelen = namelen;
3195 memcpy(backref->name, name, namelen);
3196 backref->name[namelen] = '\0';
3197 list_add_tail(&backref->list, &rec->backrefs);
3201 static void free_root_record(struct cache_extent *cache)
3203 struct root_record *rec;
3204 struct root_backref *backref;
3206 rec = container_of(cache, struct root_record, cache);
3207 while (!list_empty(&rec->backrefs)) {
3208 backref = to_root_backref(rec->backrefs.next);
3209 list_del(&backref->list);
3216 FREE_EXTENT_CACHE_BASED_TREE(root_recs, free_root_record);
3218 static int add_root_backref(struct cache_tree *root_cache,
3219 u64 root_id, u64 ref_root, u64 dir, u64 index,
3220 const char *name, int namelen,
3221 int item_type, int errors)
3223 struct root_record *rec;
3224 struct root_backref *backref;
3226 rec = get_root_rec(root_cache, root_id);
3227 BUG_ON(IS_ERR(rec));
3228 backref = get_root_backref(rec, ref_root, dir, index, name, namelen);
3231 backref->errors |= errors;
3233 if (item_type != BTRFS_DIR_ITEM_KEY) {
3234 if (backref->found_dir_index || backref->found_back_ref ||
3235 backref->found_forward_ref) {
3236 if (backref->index != index)
3237 backref->errors |= REF_ERR_INDEX_UNMATCH;
3239 backref->index = index;
3243 if (item_type == BTRFS_DIR_ITEM_KEY) {
3244 if (backref->found_forward_ref)
3246 backref->found_dir_item = 1;
3247 } else if (item_type == BTRFS_DIR_INDEX_KEY) {
3248 backref->found_dir_index = 1;
3249 } else if (item_type == BTRFS_ROOT_REF_KEY) {
3250 if (backref->found_forward_ref)
3251 backref->errors |= REF_ERR_DUP_ROOT_REF;
3252 else if (backref->found_dir_item)
3254 backref->found_forward_ref = 1;
3255 } else if (item_type == BTRFS_ROOT_BACKREF_KEY) {
3256 if (backref->found_back_ref)
3257 backref->errors |= REF_ERR_DUP_ROOT_BACKREF;
3258 backref->found_back_ref = 1;
3263 if (backref->found_forward_ref && backref->found_dir_item)
3264 backref->reachable = 1;
3268 static int merge_root_recs(struct btrfs_root *root,
3269 struct cache_tree *src_cache,
3270 struct cache_tree *dst_cache)
3272 struct cache_extent *cache;
3273 struct ptr_node *node;
3274 struct inode_record *rec;
3275 struct inode_backref *backref;
3278 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
3279 free_inode_recs_tree(src_cache);
3284 cache = search_cache_extent(src_cache, 0);
3287 node = container_of(cache, struct ptr_node, cache);
3289 remove_cache_extent(src_cache, &node->cache);
3292 ret = is_child_root(root, root->objectid, rec->ino);
3298 list_for_each_entry(backref, &rec->backrefs, list) {
3299 BUG_ON(backref->found_inode_ref);
3300 if (backref->found_dir_item)
3301 add_root_backref(dst_cache, rec->ino,
3302 root->root_key.objectid, backref->dir,
3303 backref->index, backref->name,
3304 backref->namelen, BTRFS_DIR_ITEM_KEY,
3306 if (backref->found_dir_index)
3307 add_root_backref(dst_cache, rec->ino,
3308 root->root_key.objectid, backref->dir,
3309 backref->index, backref->name,
3310 backref->namelen, BTRFS_DIR_INDEX_KEY,
3314 free_inode_rec(rec);
3321 static int check_root_refs(struct btrfs_root *root,
3322 struct cache_tree *root_cache)
3324 struct root_record *rec;
3325 struct root_record *ref_root;
3326 struct root_backref *backref;
3327 struct cache_extent *cache;
3333 rec = get_root_rec(root_cache, BTRFS_FS_TREE_OBJECTID);
3334 BUG_ON(IS_ERR(rec));
3337 /* fixme: this can not detect circular references */
3340 cache = search_cache_extent(root_cache, 0);
3344 rec = container_of(cache, struct root_record, cache);
3345 cache = next_cache_extent(cache);
3347 if (rec->found_ref == 0)
3350 list_for_each_entry(backref, &rec->backrefs, list) {
3351 if (!backref->reachable)
3354 ref_root = get_root_rec(root_cache,
3356 BUG_ON(IS_ERR(ref_root));
3357 if (ref_root->found_ref > 0)
3360 backref->reachable = 0;
3362 if (rec->found_ref == 0)
3368 cache = search_cache_extent(root_cache, 0);
3372 rec = container_of(cache, struct root_record, cache);
3373 cache = next_cache_extent(cache);
3375 if (rec->found_ref == 0 &&
3376 rec->objectid >= BTRFS_FIRST_FREE_OBJECTID &&
3377 rec->objectid <= BTRFS_LAST_FREE_OBJECTID) {
3378 ret = check_orphan_item(root->fs_info->tree_root,
3384 * If we don't have a root item then we likely just have
3385 * a dir item in a snapshot for this root but no actual
3386 * ref key or anything so it's meaningless.
3388 if (!rec->found_root_item)
3391 fprintf(stderr, "fs tree %llu not referenced\n",
3392 (unsigned long long)rec->objectid);
3396 if (rec->found_ref > 0 && !rec->found_root_item)
3398 list_for_each_entry(backref, &rec->backrefs, list) {
3399 if (!backref->found_dir_item)
3400 backref->errors |= REF_ERR_NO_DIR_ITEM;
3401 if (!backref->found_dir_index)
3402 backref->errors |= REF_ERR_NO_DIR_INDEX;
3403 if (!backref->found_back_ref)
3404 backref->errors |= REF_ERR_NO_ROOT_BACKREF;
3405 if (!backref->found_forward_ref)
3406 backref->errors |= REF_ERR_NO_ROOT_REF;
3407 if (backref->reachable && backref->errors)
3414 fprintf(stderr, "fs tree %llu refs %u %s\n",
3415 (unsigned long long)rec->objectid, rec->found_ref,
3416 rec->found_root_item ? "" : "not found");
3418 list_for_each_entry(backref, &rec->backrefs, list) {
3419 if (!backref->reachable)
3421 if (!backref->errors && rec->found_root_item)
3423 fprintf(stderr, "\tunresolved ref root %llu dir %llu"
3424 " index %llu namelen %u name %s errors %x\n",
3425 (unsigned long long)backref->ref_root,
3426 (unsigned long long)backref->dir,
3427 (unsigned long long)backref->index,
3428 backref->namelen, backref->name,
3430 print_ref_error(backref->errors);
3433 return errors > 0 ? 1 : 0;
3436 static int process_root_ref(struct extent_buffer *eb, int slot,
3437 struct btrfs_key *key,
3438 struct cache_tree *root_cache)
3444 struct btrfs_root_ref *ref;
3445 char namebuf[BTRFS_NAME_LEN];
3448 ref = btrfs_item_ptr(eb, slot, struct btrfs_root_ref);
3450 dirid = btrfs_root_ref_dirid(eb, ref);
3451 index = btrfs_root_ref_sequence(eb, ref);
3452 name_len = btrfs_root_ref_name_len(eb, ref);
3454 if (name_len <= BTRFS_NAME_LEN) {
3458 len = BTRFS_NAME_LEN;
3459 error = REF_ERR_NAME_TOO_LONG;
3461 read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
3463 if (key->type == BTRFS_ROOT_REF_KEY) {
3464 add_root_backref(root_cache, key->offset, key->objectid, dirid,
3465 index, namebuf, len, key->type, error);
3467 add_root_backref(root_cache, key->objectid, key->offset, dirid,
3468 index, namebuf, len, key->type, error);
3473 static void free_corrupt_block(struct cache_extent *cache)
3475 struct btrfs_corrupt_block *corrupt;
3477 corrupt = container_of(cache, struct btrfs_corrupt_block, cache);
3481 FREE_EXTENT_CACHE_BASED_TREE(corrupt_blocks, free_corrupt_block);
3484 * Repair the btree of the given root.
3486 * The fix is to remove the node key in corrupt_blocks cache_tree.
3487 * and rebalance the tree.
3488 * After the fix, the btree should be writeable.
3490 static int repair_btree(struct btrfs_root *root,
3491 struct cache_tree *corrupt_blocks)
3493 struct btrfs_trans_handle *trans;
3494 struct btrfs_path *path;
3495 struct btrfs_corrupt_block *corrupt;
3496 struct cache_extent *cache;
3497 struct btrfs_key key;
3502 if (cache_tree_empty(corrupt_blocks))
3505 path = btrfs_alloc_path();
3509 trans = btrfs_start_transaction(root, 1);
3510 if (IS_ERR(trans)) {
3511 ret = PTR_ERR(trans);
3512 fprintf(stderr, "Error starting transaction: %s\n",
3516 cache = first_cache_extent(corrupt_blocks);
3518 corrupt = container_of(cache, struct btrfs_corrupt_block,
3520 level = corrupt->level;
3521 path->lowest_level = level;
3522 key.objectid = corrupt->key.objectid;
3523 key.type = corrupt->key.type;
3524 key.offset = corrupt->key.offset;
3527 * Here we don't want to do any tree balance, since it may
3528 * cause a balance with corrupted brother leaf/node,
3529 * so ins_len set to 0 here.
3530 * Balance will be done after all corrupt node/leaf is deleted.
3532 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3535 offset = btrfs_node_blockptr(path->nodes[level],
3536 path->slots[level]);
3538 /* Remove the ptr */
3539 ret = btrfs_del_ptr(trans, root, path, level,
3540 path->slots[level]);
3544 * Remove the corresponding extent
3545 * return value is not concerned.
3547 btrfs_release_path(path);
3548 ret = btrfs_free_extent(trans, root, offset, root->nodesize,
3549 0, root->root_key.objectid,
3551 cache = next_cache_extent(cache);
3554 /* Balance the btree using btrfs_search_slot() */
3555 cache = first_cache_extent(corrupt_blocks);
3557 corrupt = container_of(cache, struct btrfs_corrupt_block,
3559 memcpy(&key, &corrupt->key, sizeof(key));
3560 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3563 /* return will always >0 since it won't find the item */
3565 btrfs_release_path(path);
3566 cache = next_cache_extent(cache);
3569 btrfs_commit_transaction(trans, root);
3571 btrfs_free_path(path);
3575 static int check_fs_root(struct btrfs_root *root,
3576 struct cache_tree *root_cache,
3577 struct walk_control *wc)
3583 struct btrfs_path path;
3584 struct shared_node root_node;
3585 struct root_record *rec;
3586 struct btrfs_root_item *root_item = &root->root_item;
3587 struct cache_tree corrupt_blocks;
3588 struct orphan_data_extent *orphan;
3589 struct orphan_data_extent *tmp;
3590 enum btrfs_tree_block_status status;
3591 struct node_refs nrefs;
3594 * Reuse the corrupt_block cache tree to record corrupted tree block
3596 * Unlike the usage in extent tree check, here we do it in a per
3597 * fs/subvol tree base.
3599 cache_tree_init(&corrupt_blocks);
3600 root->fs_info->corrupt_blocks = &corrupt_blocks;
3602 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
3603 rec = get_root_rec(root_cache, root->root_key.objectid);
3604 BUG_ON(IS_ERR(rec));
3605 if (btrfs_root_refs(root_item) > 0)
3606 rec->found_root_item = 1;
3609 btrfs_init_path(&path);
3610 memset(&root_node, 0, sizeof(root_node));
3611 cache_tree_init(&root_node.root_cache);
3612 cache_tree_init(&root_node.inode_cache);
3613 memset(&nrefs, 0, sizeof(nrefs));
3615 /* Move the orphan extent record to corresponding inode_record */
3616 list_for_each_entry_safe(orphan, tmp,
3617 &root->orphan_data_extents, list) {
3618 struct inode_record *inode;
3620 inode = get_inode_rec(&root_node.inode_cache, orphan->objectid,
3622 BUG_ON(IS_ERR(inode));
3623 inode->errors |= I_ERR_FILE_EXTENT_ORPHAN;
3624 list_move(&orphan->list, &inode->orphan_extents);
3627 level = btrfs_header_level(root->node);
3628 memset(wc->nodes, 0, sizeof(wc->nodes));
3629 wc->nodes[level] = &root_node;
3630 wc->active_node = level;
3631 wc->root_level = level;
3633 /* We may not have checked the root block, lets do that now */
3634 if (btrfs_is_leaf(root->node))
3635 status = btrfs_check_leaf(root, NULL, root->node);
3637 status = btrfs_check_node(root, NULL, root->node);
3638 if (status != BTRFS_TREE_BLOCK_CLEAN)
3641 if (btrfs_root_refs(root_item) > 0 ||
3642 btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3643 path.nodes[level] = root->node;
3644 extent_buffer_get(root->node);
3645 path.slots[level] = 0;
3647 struct btrfs_key key;
3648 struct btrfs_disk_key found_key;
3650 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3651 level = root_item->drop_level;
3652 path.lowest_level = level;
3653 if (level > btrfs_header_level(root->node) ||
3654 level >= BTRFS_MAX_LEVEL) {
3655 error("ignoring invalid drop level: %u", level);
3658 wret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
3661 btrfs_node_key(path.nodes[level], &found_key,
3663 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3664 sizeof(found_key)));
3668 wret = walk_down_tree(root, &path, wc, &level, &nrefs);
3674 wret = walk_up_tree(root, &path, wc, &level);
3681 btrfs_release_path(&path);
3683 if (!cache_tree_empty(&corrupt_blocks)) {
3684 struct cache_extent *cache;
3685 struct btrfs_corrupt_block *corrupt;
3687 printf("The following tree block(s) is corrupted in tree %llu:\n",
3688 root->root_key.objectid);
3689 cache = first_cache_extent(&corrupt_blocks);
3691 corrupt = container_of(cache,
3692 struct btrfs_corrupt_block,
3694 printf("\ttree block bytenr: %llu, level: %d, node key: (%llu, %u, %llu)\n",
3695 cache->start, corrupt->level,
3696 corrupt->key.objectid, corrupt->key.type,
3697 corrupt->key.offset);
3698 cache = next_cache_extent(cache);
3701 printf("Try to repair the btree for root %llu\n",
3702 root->root_key.objectid);
3703 ret = repair_btree(root, &corrupt_blocks);
3705 fprintf(stderr, "Failed to repair btree: %s\n",
3708 printf("Btree for root %llu is fixed\n",
3709 root->root_key.objectid);
3713 err = merge_root_recs(root, &root_node.root_cache, root_cache);
3717 if (root_node.current) {
3718 root_node.current->checked = 1;
3719 maybe_free_inode_rec(&root_node.inode_cache,
3723 err = check_inode_recs(root, &root_node.inode_cache);
3727 free_corrupt_blocks_tree(&corrupt_blocks);
3728 root->fs_info->corrupt_blocks = NULL;
3729 free_orphan_data_extents(&root->orphan_data_extents);
3733 static int fs_root_objectid(u64 objectid)
3735 if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
3736 objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3738 return is_fstree(objectid);
3741 static int check_fs_roots(struct btrfs_root *root,
3742 struct cache_tree *root_cache)
3744 struct btrfs_path path;
3745 struct btrfs_key key;
3746 struct walk_control wc;
3747 struct extent_buffer *leaf, *tree_node;
3748 struct btrfs_root *tmp_root;
3749 struct btrfs_root *tree_root = root->fs_info->tree_root;
3753 if (ctx.progress_enabled) {
3754 ctx.tp = TASK_FS_ROOTS;
3755 task_start(ctx.info);
3759 * Just in case we made any changes to the extent tree that weren't
3760 * reflected into the free space cache yet.
3763 reset_cached_block_groups(root->fs_info);
3764 memset(&wc, 0, sizeof(wc));
3765 cache_tree_init(&wc.shared);
3766 btrfs_init_path(&path);
3771 key.type = BTRFS_ROOT_ITEM_KEY;
3772 ret = btrfs_search_slot(NULL, tree_root, &key, &path, 0, 0);
3777 tree_node = tree_root->node;
3779 if (tree_node != tree_root->node) {
3780 free_root_recs_tree(root_cache);
3781 btrfs_release_path(&path);
3784 leaf = path.nodes[0];
3785 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
3786 ret = btrfs_next_leaf(tree_root, &path);
3792 leaf = path.nodes[0];
3794 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
3795 if (key.type == BTRFS_ROOT_ITEM_KEY &&
3796 fs_root_objectid(key.objectid)) {
3797 if (key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
3798 tmp_root = btrfs_read_fs_root_no_cache(
3799 root->fs_info, &key);
3801 key.offset = (u64)-1;
3802 tmp_root = btrfs_read_fs_root(
3803 root->fs_info, &key);
3805 if (IS_ERR(tmp_root)) {
3809 ret = check_fs_root(tmp_root, root_cache, &wc);
3810 if (ret == -EAGAIN) {
3811 free_root_recs_tree(root_cache);
3812 btrfs_release_path(&path);
3817 if (key.objectid == BTRFS_TREE_RELOC_OBJECTID)
3818 btrfs_free_fs_root(tmp_root);
3819 } else if (key.type == BTRFS_ROOT_REF_KEY ||
3820 key.type == BTRFS_ROOT_BACKREF_KEY) {
3821 process_root_ref(leaf, path.slots[0], &key,
3828 btrfs_release_path(&path);
3830 free_extent_cache_tree(&wc.shared);
3831 if (!cache_tree_empty(&wc.shared))
3832 fprintf(stderr, "warning line %d\n", __LINE__);
3834 task_stop(ctx.info);
3839 static int all_backpointers_checked(struct extent_record *rec, int print_errs)
3841 struct list_head *cur = rec->backrefs.next;
3842 struct extent_backref *back;
3843 struct tree_backref *tback;
3844 struct data_backref *dback;
3848 while(cur != &rec->backrefs) {
3849 back = to_extent_backref(cur);
3851 if (!back->found_extent_tree) {
3855 if (back->is_data) {
3856 dback = to_data_backref(back);
3857 fprintf(stderr, "Backref %llu %s %llu"
3858 " owner %llu offset %llu num_refs %lu"
3859 " not found in extent tree\n",
3860 (unsigned long long)rec->start,
3861 back->full_backref ?
3863 back->full_backref ?
3864 (unsigned long long)dback->parent:
3865 (unsigned long long)dback->root,
3866 (unsigned long long)dback->owner,
3867 (unsigned long long)dback->offset,
3868 (unsigned long)dback->num_refs);
3870 tback = to_tree_backref(back);
3871 fprintf(stderr, "Backref %llu parent %llu"
3872 " root %llu not found in extent tree\n",
3873 (unsigned long long)rec->start,
3874 (unsigned long long)tback->parent,
3875 (unsigned long long)tback->root);
3878 if (!back->is_data && !back->found_ref) {
3882 tback = to_tree_backref(back);
3883 fprintf(stderr, "Backref %llu %s %llu not referenced back %p\n",
3884 (unsigned long long)rec->start,
3885 back->full_backref ? "parent" : "root",
3886 back->full_backref ?
3887 (unsigned long long)tback->parent :
3888 (unsigned long long)tback->root, back);
3890 if (back->is_data) {
3891 dback = to_data_backref(back);
3892 if (dback->found_ref != dback->num_refs) {
3896 fprintf(stderr, "Incorrect local backref count"
3897 " on %llu %s %llu owner %llu"
3898 " offset %llu found %u wanted %u back %p\n",
3899 (unsigned long long)rec->start,
3900 back->full_backref ?
3902 back->full_backref ?
3903 (unsigned long long)dback->parent:
3904 (unsigned long long)dback->root,
3905 (unsigned long long)dback->owner,
3906 (unsigned long long)dback->offset,
3907 dback->found_ref, dback->num_refs, back);
3909 if (dback->disk_bytenr != rec->start) {
3913 fprintf(stderr, "Backref disk bytenr does not"
3914 " match extent record, bytenr=%llu, "
3915 "ref bytenr=%llu\n",
3916 (unsigned long long)rec->start,
3917 (unsigned long long)dback->disk_bytenr);
3920 if (dback->bytes != rec->nr) {
3924 fprintf(stderr, "Backref bytes do not match "
3925 "extent backref, bytenr=%llu, ref "
3926 "bytes=%llu, backref bytes=%llu\n",
3927 (unsigned long long)rec->start,
3928 (unsigned long long)rec->nr,
3929 (unsigned long long)dback->bytes);
3932 if (!back->is_data) {
3935 dback = to_data_backref(back);
3936 found += dback->found_ref;
3939 if (found != rec->refs) {
3943 fprintf(stderr, "Incorrect global backref count "
3944 "on %llu found %llu wanted %llu\n",
3945 (unsigned long long)rec->start,
3946 (unsigned long long)found,
3947 (unsigned long long)rec->refs);
3953 static int free_all_extent_backrefs(struct extent_record *rec)
3955 struct extent_backref *back;
3956 struct list_head *cur;
3957 while (!list_empty(&rec->backrefs)) {
3958 cur = rec->backrefs.next;
3959 back = to_extent_backref(cur);
3966 static void free_extent_record_cache(struct btrfs_fs_info *fs_info,
3967 struct cache_tree *extent_cache)
3969 struct cache_extent *cache;
3970 struct extent_record *rec;
3973 cache = first_cache_extent(extent_cache);
3976 rec = container_of(cache, struct extent_record, cache);
3977 remove_cache_extent(extent_cache, cache);
3978 free_all_extent_backrefs(rec);
3983 static int maybe_free_extent_rec(struct cache_tree *extent_cache,
3984 struct extent_record *rec)
3986 if (rec->content_checked && rec->owner_ref_checked &&
3987 rec->extent_item_refs == rec->refs && rec->refs > 0 &&
3988 rec->num_duplicates == 0 && !all_backpointers_checked(rec, 0) &&
3989 !rec->bad_full_backref && !rec->crossing_stripes &&
3990 !rec->wrong_chunk_type) {
3991 remove_cache_extent(extent_cache, &rec->cache);
3992 free_all_extent_backrefs(rec);
3993 list_del_init(&rec->list);
3999 static int check_owner_ref(struct btrfs_root *root,
4000 struct extent_record *rec,
4001 struct extent_buffer *buf)
4003 struct extent_backref *node;
4004 struct tree_backref *back;
4005 struct btrfs_root *ref_root;
4006 struct btrfs_key key;
4007 struct btrfs_path path;
4008 struct extent_buffer *parent;
4013 list_for_each_entry(node, &rec->backrefs, list) {
4016 if (!node->found_ref)
4018 if (node->full_backref)
4020 back = to_tree_backref(node);
4021 if (btrfs_header_owner(buf) == back->root)
4024 BUG_ON(rec->is_root);
4026 /* try to find the block by search corresponding fs tree */
4027 key.objectid = btrfs_header_owner(buf);
4028 key.type = BTRFS_ROOT_ITEM_KEY;
4029 key.offset = (u64)-1;
4031 ref_root = btrfs_read_fs_root(root->fs_info, &key);
4032 if (IS_ERR(ref_root))
4035 level = btrfs_header_level(buf);
4037 btrfs_item_key_to_cpu(buf, &key, 0);
4039 btrfs_node_key_to_cpu(buf, &key, 0);
4041 btrfs_init_path(&path);
4042 path.lowest_level = level + 1;
4043 ret = btrfs_search_slot(NULL, ref_root, &key, &path, 0, 0);
4047 parent = path.nodes[level + 1];
4048 if (parent && buf->start == btrfs_node_blockptr(parent,
4049 path.slots[level + 1]))
4052 btrfs_release_path(&path);
4053 return found ? 0 : 1;
4056 static int is_extent_tree_record(struct extent_record *rec)
4058 struct list_head *cur = rec->backrefs.next;
4059 struct extent_backref *node;
4060 struct tree_backref *back;
4063 while(cur != &rec->backrefs) {
4064 node = to_extent_backref(cur);
4068 back = to_tree_backref(node);
4069 if (node->full_backref)
4071 if (back->root == BTRFS_EXTENT_TREE_OBJECTID)
4078 static int record_bad_block_io(struct btrfs_fs_info *info,
4079 struct cache_tree *extent_cache,
4082 struct extent_record *rec;
4083 struct cache_extent *cache;
4084 struct btrfs_key key;
4086 cache = lookup_cache_extent(extent_cache, start, len);
4090 rec = container_of(cache, struct extent_record, cache);
4091 if (!is_extent_tree_record(rec))
4094 btrfs_disk_key_to_cpu(&key, &rec->parent_key);
4095 return btrfs_add_corrupt_extent_record(info, &key, start, len, 0);
4098 static int swap_values(struct btrfs_root *root, struct btrfs_path *path,
4099 struct extent_buffer *buf, int slot)
4101 if (btrfs_header_level(buf)) {
4102 struct btrfs_key_ptr ptr1, ptr2;
4104 read_extent_buffer(buf, &ptr1, btrfs_node_key_ptr_offset(slot),
4105 sizeof(struct btrfs_key_ptr));
4106 read_extent_buffer(buf, &ptr2,
4107 btrfs_node_key_ptr_offset(slot + 1),
4108 sizeof(struct btrfs_key_ptr));
4109 write_extent_buffer(buf, &ptr1,
4110 btrfs_node_key_ptr_offset(slot + 1),
4111 sizeof(struct btrfs_key_ptr));
4112 write_extent_buffer(buf, &ptr2,
4113 btrfs_node_key_ptr_offset(slot),
4114 sizeof(struct btrfs_key_ptr));
4116 struct btrfs_disk_key key;
4117 btrfs_node_key(buf, &key, 0);
4118 btrfs_fixup_low_keys(root, path, &key,
4119 btrfs_header_level(buf) + 1);
4122 struct btrfs_item *item1, *item2;
4123 struct btrfs_key k1, k2;
4124 char *item1_data, *item2_data;
4125 u32 item1_offset, item2_offset, item1_size, item2_size;
4127 item1 = btrfs_item_nr(slot);
4128 item2 = btrfs_item_nr(slot + 1);
4129 btrfs_item_key_to_cpu(buf, &k1, slot);
4130 btrfs_item_key_to_cpu(buf, &k2, slot + 1);
4131 item1_offset = btrfs_item_offset(buf, item1);
4132 item2_offset = btrfs_item_offset(buf, item2);
4133 item1_size = btrfs_item_size(buf, item1);
4134 item2_size = btrfs_item_size(buf, item2);
4136 item1_data = malloc(item1_size);
4139 item2_data = malloc(item2_size);
4145 read_extent_buffer(buf, item1_data, item1_offset, item1_size);
4146 read_extent_buffer(buf, item2_data, item2_offset, item2_size);
4148 write_extent_buffer(buf, item1_data, item2_offset, item2_size);
4149 write_extent_buffer(buf, item2_data, item1_offset, item1_size);
4153 btrfs_set_item_offset(buf, item1, item2_offset);
4154 btrfs_set_item_offset(buf, item2, item1_offset);
4155 btrfs_set_item_size(buf, item1, item2_size);
4156 btrfs_set_item_size(buf, item2, item1_size);
4158 path->slots[0] = slot;
4159 btrfs_set_item_key_unsafe(root, path, &k2);
4160 path->slots[0] = slot + 1;
4161 btrfs_set_item_key_unsafe(root, path, &k1);
4166 static int fix_key_order(struct btrfs_trans_handle *trans,
4167 struct btrfs_root *root,
4168 struct btrfs_path *path)
4170 struct extent_buffer *buf;
4171 struct btrfs_key k1, k2;
4173 int level = path->lowest_level;
4176 buf = path->nodes[level];
4177 for (i = 0; i < btrfs_header_nritems(buf) - 1; i++) {
4179 btrfs_node_key_to_cpu(buf, &k1, i);
4180 btrfs_node_key_to_cpu(buf, &k2, i + 1);
4182 btrfs_item_key_to_cpu(buf, &k1, i);
4183 btrfs_item_key_to_cpu(buf, &k2, i + 1);
4185 if (btrfs_comp_cpu_keys(&k1, &k2) < 0)
4187 ret = swap_values(root, path, buf, i);
4190 btrfs_mark_buffer_dirty(buf);
4196 static int delete_bogus_item(struct btrfs_trans_handle *trans,
4197 struct btrfs_root *root,
4198 struct btrfs_path *path,
4199 struct extent_buffer *buf, int slot)
4201 struct btrfs_key key;
4202 int nritems = btrfs_header_nritems(buf);
4204 btrfs_item_key_to_cpu(buf, &key, slot);
4206 /* These are all the keys we can deal with missing. */
4207 if (key.type != BTRFS_DIR_INDEX_KEY &&
4208 key.type != BTRFS_EXTENT_ITEM_KEY &&
4209 key.type != BTRFS_METADATA_ITEM_KEY &&
4210 key.type != BTRFS_TREE_BLOCK_REF_KEY &&
4211 key.type != BTRFS_EXTENT_DATA_REF_KEY)
4214 printf("Deleting bogus item [%llu,%u,%llu] at slot %d on block %llu\n",
4215 (unsigned long long)key.objectid, key.type,
4216 (unsigned long long)key.offset, slot, buf->start);
4217 memmove_extent_buffer(buf, btrfs_item_nr_offset(slot),
4218 btrfs_item_nr_offset(slot + 1),
4219 sizeof(struct btrfs_item) *
4220 (nritems - slot - 1));
4221 btrfs_set_header_nritems(buf, nritems - 1);
4223 struct btrfs_disk_key disk_key;
4225 btrfs_item_key(buf, &disk_key, 0);
4226 btrfs_fixup_low_keys(root, path, &disk_key, 1);
4228 btrfs_mark_buffer_dirty(buf);
4232 static int fix_item_offset(struct btrfs_trans_handle *trans,
4233 struct btrfs_root *root,
4234 struct btrfs_path *path)
4236 struct extent_buffer *buf;
4240 /* We should only get this for leaves */
4241 BUG_ON(path->lowest_level);
4242 buf = path->nodes[0];
4244 for (i = 0; i < btrfs_header_nritems(buf); i++) {
4245 unsigned int shift = 0, offset;
4247 if (i == 0 && btrfs_item_end_nr(buf, i) !=
4248 BTRFS_LEAF_DATA_SIZE(root)) {
4249 if (btrfs_item_end_nr(buf, i) >
4250 BTRFS_LEAF_DATA_SIZE(root)) {
4251 ret = delete_bogus_item(trans, root, path,
4255 fprintf(stderr, "item is off the end of the "
4256 "leaf, can't fix\n");
4260 shift = BTRFS_LEAF_DATA_SIZE(root) -
4261 btrfs_item_end_nr(buf, i);
4262 } else if (i > 0 && btrfs_item_end_nr(buf, i) !=
4263 btrfs_item_offset_nr(buf, i - 1)) {
4264 if (btrfs_item_end_nr(buf, i) >
4265 btrfs_item_offset_nr(buf, i - 1)) {
4266 ret = delete_bogus_item(trans, root, path,
4270 fprintf(stderr, "items overlap, can't fix\n");
4274 shift = btrfs_item_offset_nr(buf, i - 1) -
4275 btrfs_item_end_nr(buf, i);
4280 printf("Shifting item nr %d by %u bytes in block %llu\n",
4281 i, shift, (unsigned long long)buf->start);
4282 offset = btrfs_item_offset_nr(buf, i);
4283 memmove_extent_buffer(buf,
4284 btrfs_leaf_data(buf) + offset + shift,
4285 btrfs_leaf_data(buf) + offset,
4286 btrfs_item_size_nr(buf, i));
4287 btrfs_set_item_offset(buf, btrfs_item_nr(i),
4289 btrfs_mark_buffer_dirty(buf);
4293 * We may have moved things, in which case we want to exit so we don't
4294 * write those changes out. Once we have proper abort functionality in
4295 * progs this can be changed to something nicer.
4302 * Attempt to fix basic block failures. If we can't fix it for whatever reason
4303 * then just return -EIO.
4305 static int try_to_fix_bad_block(struct btrfs_root *root,
4306 struct extent_buffer *buf,
4307 enum btrfs_tree_block_status status)
4309 struct btrfs_trans_handle *trans;
4310 struct ulist *roots;
4311 struct ulist_node *node;
4312 struct btrfs_root *search_root;
4313 struct btrfs_path *path;
4314 struct ulist_iterator iter;
4315 struct btrfs_key root_key, key;
4318 if (status != BTRFS_TREE_BLOCK_BAD_KEY_ORDER &&
4319 status != BTRFS_TREE_BLOCK_INVALID_OFFSETS)
4322 path = btrfs_alloc_path();
4326 ret = btrfs_find_all_roots(NULL, root->fs_info, buf->start,
4329 btrfs_free_path(path);
4333 ULIST_ITER_INIT(&iter);
4334 while ((node = ulist_next(roots, &iter))) {
4335 root_key.objectid = node->val;
4336 root_key.type = BTRFS_ROOT_ITEM_KEY;
4337 root_key.offset = (u64)-1;
4339 search_root = btrfs_read_fs_root(root->fs_info, &root_key);
4346 trans = btrfs_start_transaction(search_root, 0);
4347 if (IS_ERR(trans)) {
4348 ret = PTR_ERR(trans);
4352 path->lowest_level = btrfs_header_level(buf);
4353 path->skip_check_block = 1;
4354 if (path->lowest_level)
4355 btrfs_node_key_to_cpu(buf, &key, 0);
4357 btrfs_item_key_to_cpu(buf, &key, 0);
4358 ret = btrfs_search_slot(trans, search_root, &key, path, 0, 1);
4361 btrfs_commit_transaction(trans, search_root);
4364 if (status == BTRFS_TREE_BLOCK_BAD_KEY_ORDER)
4365 ret = fix_key_order(trans, search_root, path);
4366 else if (status == BTRFS_TREE_BLOCK_INVALID_OFFSETS)
4367 ret = fix_item_offset(trans, search_root, path);
4369 btrfs_commit_transaction(trans, search_root);
4372 btrfs_release_path(path);
4373 btrfs_commit_transaction(trans, search_root);
4376 btrfs_free_path(path);
4380 static int check_block(struct btrfs_root *root,
4381 struct cache_tree *extent_cache,
4382 struct extent_buffer *buf, u64 flags)
4384 struct extent_record *rec;
4385 struct cache_extent *cache;
4386 struct btrfs_key key;
4387 enum btrfs_tree_block_status status;
4391 cache = lookup_cache_extent(extent_cache, buf->start, buf->len);
4394 rec = container_of(cache, struct extent_record, cache);
4395 rec->generation = btrfs_header_generation(buf);
4397 level = btrfs_header_level(buf);
4398 if (btrfs_header_nritems(buf) > 0) {
4401 btrfs_item_key_to_cpu(buf, &key, 0);
4403 btrfs_node_key_to_cpu(buf, &key, 0);
4405 rec->info_objectid = key.objectid;
4407 rec->info_level = level;
4409 if (btrfs_is_leaf(buf))
4410 status = btrfs_check_leaf(root, &rec->parent_key, buf);
4412 status = btrfs_check_node(root, &rec->parent_key, buf);
4414 if (status != BTRFS_TREE_BLOCK_CLEAN) {
4416 status = try_to_fix_bad_block(root, buf, status);
4417 if (status != BTRFS_TREE_BLOCK_CLEAN) {
4419 fprintf(stderr, "bad block %llu\n",
4420 (unsigned long long)buf->start);
4423 * Signal to callers we need to start the scan over
4424 * again since we'll have cowed blocks.
4429 rec->content_checked = 1;
4430 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4431 rec->owner_ref_checked = 1;
4433 ret = check_owner_ref(root, rec, buf);
4435 rec->owner_ref_checked = 1;
4439 maybe_free_extent_rec(extent_cache, rec);
4443 static struct tree_backref *find_tree_backref(struct extent_record *rec,
4444 u64 parent, u64 root)
4446 struct list_head *cur = rec->backrefs.next;
4447 struct extent_backref *node;
4448 struct tree_backref *back;
4450 while(cur != &rec->backrefs) {
4451 node = to_extent_backref(cur);
4455 back = to_tree_backref(node);
4457 if (!node->full_backref)
4459 if (parent == back->parent)
4462 if (node->full_backref)
4464 if (back->root == root)
4471 static struct tree_backref *alloc_tree_backref(struct extent_record *rec,
4472 u64 parent, u64 root)
4474 struct tree_backref *ref = malloc(sizeof(*ref));
4478 memset(&ref->node, 0, sizeof(ref->node));
4480 ref->parent = parent;
4481 ref->node.full_backref = 1;
4484 ref->node.full_backref = 0;
4486 list_add_tail(&ref->node.list, &rec->backrefs);
4491 static struct data_backref *find_data_backref(struct extent_record *rec,
4492 u64 parent, u64 root,
4493 u64 owner, u64 offset,
4495 u64 disk_bytenr, u64 bytes)
4497 struct list_head *cur = rec->backrefs.next;
4498 struct extent_backref *node;
4499 struct data_backref *back;
4501 while(cur != &rec->backrefs) {
4502 node = to_extent_backref(cur);
4506 back = to_data_backref(node);
4508 if (!node->full_backref)
4510 if (parent == back->parent)
4513 if (node->full_backref)
4515 if (back->root == root && back->owner == owner &&
4516 back->offset == offset) {
4517 if (found_ref && node->found_ref &&
4518 (back->bytes != bytes ||
4519 back->disk_bytenr != disk_bytenr))
4528 static struct data_backref *alloc_data_backref(struct extent_record *rec,
4529 u64 parent, u64 root,
4530 u64 owner, u64 offset,
4533 struct data_backref *ref = malloc(sizeof(*ref));
4537 memset(&ref->node, 0, sizeof(ref->node));
4538 ref->node.is_data = 1;
4541 ref->parent = parent;
4544 ref->node.full_backref = 1;
4548 ref->offset = offset;
4549 ref->node.full_backref = 0;
4551 ref->bytes = max_size;
4554 list_add_tail(&ref->node.list, &rec->backrefs);
4555 if (max_size > rec->max_size)
4556 rec->max_size = max_size;
4560 /* Check if the type of extent matches with its chunk */
4561 static void check_extent_type(struct extent_record *rec)
4563 struct btrfs_block_group_cache *bg_cache;
4565 bg_cache = btrfs_lookup_first_block_group(global_info, rec->start);
4569 /* data extent, check chunk directly*/
4570 if (!rec->metadata) {
4571 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_DATA))
4572 rec->wrong_chunk_type = 1;
4576 /* metadata extent, check the obvious case first */
4577 if (!(bg_cache->flags & (BTRFS_BLOCK_GROUP_SYSTEM |
4578 BTRFS_BLOCK_GROUP_METADATA))) {
4579 rec->wrong_chunk_type = 1;
4584 * Check SYSTEM extent, as it's also marked as metadata, we can only
4585 * make sure it's a SYSTEM extent by its backref
4587 if (!list_empty(&rec->backrefs)) {
4588 struct extent_backref *node;
4589 struct tree_backref *tback;
4592 node = to_extent_backref(rec->backrefs.next);
4593 if (node->is_data) {
4594 /* tree block shouldn't have data backref */
4595 rec->wrong_chunk_type = 1;
4598 tback = container_of(node, struct tree_backref, node);
4600 if (tback->root == BTRFS_CHUNK_TREE_OBJECTID)
4601 bg_type = BTRFS_BLOCK_GROUP_SYSTEM;
4603 bg_type = BTRFS_BLOCK_GROUP_METADATA;
4604 if (!(bg_cache->flags & bg_type))
4605 rec->wrong_chunk_type = 1;
4610 * Allocate a new extent record, fill default values from @tmpl and insert int
4611 * @extent_cache. Caller is supposed to make sure the [start,nr) is not in
4612 * the cache, otherwise it fails.
4614 static int add_extent_rec_nolookup(struct cache_tree *extent_cache,
4615 struct extent_record *tmpl)
4617 struct extent_record *rec;
4620 rec = malloc(sizeof(*rec));
4623 rec->start = tmpl->start;
4624 rec->max_size = tmpl->max_size;
4625 rec->nr = max(tmpl->nr, tmpl->max_size);
4626 rec->found_rec = tmpl->found_rec;
4627 rec->content_checked = tmpl->content_checked;
4628 rec->owner_ref_checked = tmpl->owner_ref_checked;
4629 rec->num_duplicates = 0;
4630 rec->metadata = tmpl->metadata;
4631 rec->flag_block_full_backref = FLAG_UNSET;
4632 rec->bad_full_backref = 0;
4633 rec->crossing_stripes = 0;
4634 rec->wrong_chunk_type = 0;
4635 rec->is_root = tmpl->is_root;
4636 rec->refs = tmpl->refs;
4637 rec->extent_item_refs = tmpl->extent_item_refs;
4638 rec->parent_generation = tmpl->parent_generation;
4639 INIT_LIST_HEAD(&rec->backrefs);
4640 INIT_LIST_HEAD(&rec->dups);
4641 INIT_LIST_HEAD(&rec->list);
4642 memcpy(&rec->parent_key, &tmpl->parent_key, sizeof(tmpl->parent_key));
4643 rec->cache.start = tmpl->start;
4644 rec->cache.size = tmpl->nr;
4645 ret = insert_cache_extent(extent_cache, &rec->cache);
4650 bytes_used += rec->nr;
4653 rec->crossing_stripes = check_crossing_stripes(rec->start,
4654 global_info->tree_root->nodesize);
4655 check_extent_type(rec);
4660 * Lookup and modify an extent, some values of @tmpl are interpreted verbatim,
4662 * - refs - if found, increase refs
4663 * - is_root - if found, set
4664 * - content_checked - if found, set
4665 * - owner_ref_checked - if found, set
4667 * If not found, create a new one, initialize and insert.
4669 static int add_extent_rec(struct cache_tree *extent_cache,
4670 struct extent_record *tmpl)
4672 struct extent_record *rec;
4673 struct cache_extent *cache;
4677 cache = lookup_cache_extent(extent_cache, tmpl->start, tmpl->nr);
4679 rec = container_of(cache, struct extent_record, cache);
4683 rec->nr = max(tmpl->nr, tmpl->max_size);
4686 * We need to make sure to reset nr to whatever the extent
4687 * record says was the real size, this way we can compare it to
4690 if (tmpl->found_rec) {
4691 if (tmpl->start != rec->start || rec->found_rec) {
4692 struct extent_record *tmp;
4695 if (list_empty(&rec->list))
4696 list_add_tail(&rec->list,
4697 &duplicate_extents);
4700 * We have to do this song and dance in case we
4701 * find an extent record that falls inside of
4702 * our current extent record but does not have
4703 * the same objectid.
4705 tmp = malloc(sizeof(*tmp));
4708 tmp->start = tmpl->start;
4709 tmp->max_size = tmpl->max_size;
4712 tmp->metadata = tmpl->metadata;
4713 tmp->extent_item_refs = tmpl->extent_item_refs;
4714 INIT_LIST_HEAD(&tmp->list);
4715 list_add_tail(&tmp->list, &rec->dups);
4716 rec->num_duplicates++;
4723 if (tmpl->extent_item_refs && !dup) {
4724 if (rec->extent_item_refs) {
4725 fprintf(stderr, "block %llu rec "
4726 "extent_item_refs %llu, passed %llu\n",
4727 (unsigned long long)tmpl->start,
4728 (unsigned long long)
4729 rec->extent_item_refs,
4730 (unsigned long long)tmpl->extent_item_refs);
4732 rec->extent_item_refs = tmpl->extent_item_refs;
4736 if (tmpl->content_checked)
4737 rec->content_checked = 1;
4738 if (tmpl->owner_ref_checked)
4739 rec->owner_ref_checked = 1;
4740 memcpy(&rec->parent_key, &tmpl->parent_key,
4741 sizeof(tmpl->parent_key));
4742 if (tmpl->parent_generation)
4743 rec->parent_generation = tmpl->parent_generation;
4744 if (rec->max_size < tmpl->max_size)
4745 rec->max_size = tmpl->max_size;
4748 * A metadata extent can't cross stripe_len boundary, otherwise
4749 * kernel scrub won't be able to handle it.
4750 * As now stripe_len is fixed to BTRFS_STRIPE_LEN, just check
4754 rec->crossing_stripes = check_crossing_stripes(
4755 rec->start, global_info->tree_root->nodesize);
4756 check_extent_type(rec);
4757 maybe_free_extent_rec(extent_cache, rec);
4761 ret = add_extent_rec_nolookup(extent_cache, tmpl);
4766 static int add_tree_backref(struct cache_tree *extent_cache, u64 bytenr,
4767 u64 parent, u64 root, int found_ref)
4769 struct extent_record *rec;
4770 struct tree_backref *back;
4771 struct cache_extent *cache;
4774 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4776 struct extent_record tmpl;
4778 memset(&tmpl, 0, sizeof(tmpl));
4779 tmpl.start = bytenr;
4783 ret = add_extent_rec_nolookup(extent_cache, &tmpl);
4787 /* really a bug in cache_extent implement now */
4788 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4793 rec = container_of(cache, struct extent_record, cache);
4794 if (rec->start != bytenr) {
4796 * Several cause, from unaligned bytenr to over lapping extents
4801 back = find_tree_backref(rec, parent, root);
4803 back = alloc_tree_backref(rec, parent, root);
4809 if (back->node.found_ref) {
4810 fprintf(stderr, "Extent back ref already exists "
4811 "for %llu parent %llu root %llu \n",
4812 (unsigned long long)bytenr,
4813 (unsigned long long)parent,
4814 (unsigned long long)root);
4816 back->node.found_ref = 1;
4818 if (back->node.found_extent_tree) {
4819 fprintf(stderr, "Extent back ref already exists "
4820 "for %llu parent %llu root %llu \n",
4821 (unsigned long long)bytenr,
4822 (unsigned long long)parent,
4823 (unsigned long long)root);
4825 back->node.found_extent_tree = 1;
4827 check_extent_type(rec);
4828 maybe_free_extent_rec(extent_cache, rec);
4832 static int add_data_backref(struct cache_tree *extent_cache, u64 bytenr,
4833 u64 parent, u64 root, u64 owner, u64 offset,
4834 u32 num_refs, int found_ref, u64 max_size)
4836 struct extent_record *rec;
4837 struct data_backref *back;
4838 struct cache_extent *cache;
4841 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4843 struct extent_record tmpl;
4845 memset(&tmpl, 0, sizeof(tmpl));
4846 tmpl.start = bytenr;
4848 tmpl.max_size = max_size;
4850 ret = add_extent_rec_nolookup(extent_cache, &tmpl);
4854 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4859 rec = container_of(cache, struct extent_record, cache);
4860 if (rec->max_size < max_size)
4861 rec->max_size = max_size;
4864 * If found_ref is set then max_size is the real size and must match the
4865 * existing refs. So if we have already found a ref then we need to
4866 * make sure that this ref matches the existing one, otherwise we need
4867 * to add a new backref so we can notice that the backrefs don't match
4868 * and we need to figure out who is telling the truth. This is to
4869 * account for that awful fsync bug I introduced where we'd end up with
4870 * a btrfs_file_extent_item that would have its length include multiple
4871 * prealloc extents or point inside of a prealloc extent.
4873 back = find_data_backref(rec, parent, root, owner, offset, found_ref,
4876 back = alloc_data_backref(rec, parent, root, owner, offset,
4882 BUG_ON(num_refs != 1);
4883 if (back->node.found_ref)
4884 BUG_ON(back->bytes != max_size);
4885 back->node.found_ref = 1;
4886 back->found_ref += 1;
4887 back->bytes = max_size;
4888 back->disk_bytenr = bytenr;
4890 rec->content_checked = 1;
4891 rec->owner_ref_checked = 1;
4893 if (back->node.found_extent_tree) {
4894 fprintf(stderr, "Extent back ref already exists "
4895 "for %llu parent %llu root %llu "
4896 "owner %llu offset %llu num_refs %lu\n",
4897 (unsigned long long)bytenr,
4898 (unsigned long long)parent,
4899 (unsigned long long)root,
4900 (unsigned long long)owner,
4901 (unsigned long long)offset,
4902 (unsigned long)num_refs);
4904 back->num_refs = num_refs;
4905 back->node.found_extent_tree = 1;
4907 maybe_free_extent_rec(extent_cache, rec);
4911 static int add_pending(struct cache_tree *pending,
4912 struct cache_tree *seen, u64 bytenr, u32 size)
4915 ret = add_cache_extent(seen, bytenr, size);
4918 add_cache_extent(pending, bytenr, size);
4922 static int pick_next_pending(struct cache_tree *pending,
4923 struct cache_tree *reada,
4924 struct cache_tree *nodes,
4925 u64 last, struct block_info *bits, int bits_nr,
4928 unsigned long node_start = last;
4929 struct cache_extent *cache;
4932 cache = search_cache_extent(reada, 0);
4934 bits[0].start = cache->start;
4935 bits[0].size = cache->size;
4940 if (node_start > 32768)
4941 node_start -= 32768;
4943 cache = search_cache_extent(nodes, node_start);
4945 cache = search_cache_extent(nodes, 0);
4948 cache = search_cache_extent(pending, 0);
4953 bits[ret].start = cache->start;
4954 bits[ret].size = cache->size;
4955 cache = next_cache_extent(cache);
4957 } while (cache && ret < bits_nr);
4963 bits[ret].start = cache->start;
4964 bits[ret].size = cache->size;
4965 cache = next_cache_extent(cache);
4967 } while (cache && ret < bits_nr);
4969 if (bits_nr - ret > 8) {
4970 u64 lookup = bits[0].start + bits[0].size;
4971 struct cache_extent *next;
4972 next = search_cache_extent(pending, lookup);
4974 if (next->start - lookup > 32768)
4976 bits[ret].start = next->start;
4977 bits[ret].size = next->size;
4978 lookup = next->start + next->size;
4982 next = next_cache_extent(next);
4990 static void free_chunk_record(struct cache_extent *cache)
4992 struct chunk_record *rec;
4994 rec = container_of(cache, struct chunk_record, cache);
4995 list_del_init(&rec->list);
4996 list_del_init(&rec->dextents);
5000 void free_chunk_cache_tree(struct cache_tree *chunk_cache)
5002 cache_tree_free_extents(chunk_cache, free_chunk_record);
5005 static void free_device_record(struct rb_node *node)
5007 struct device_record *rec;
5009 rec = container_of(node, struct device_record, node);
5013 FREE_RB_BASED_TREE(device_cache, free_device_record);
5015 int insert_block_group_record(struct block_group_tree *tree,
5016 struct block_group_record *bg_rec)
5020 ret = insert_cache_extent(&tree->tree, &bg_rec->cache);
5024 list_add_tail(&bg_rec->list, &tree->block_groups);
5028 static void free_block_group_record(struct cache_extent *cache)
5030 struct block_group_record *rec;
5032 rec = container_of(cache, struct block_group_record, cache);
5033 list_del_init(&rec->list);
5037 void free_block_group_tree(struct block_group_tree *tree)
5039 cache_tree_free_extents(&tree->tree, free_block_group_record);
5042 int insert_device_extent_record(struct device_extent_tree *tree,
5043 struct device_extent_record *de_rec)
5048 * Device extent is a bit different from the other extents, because
5049 * the extents which belong to the different devices may have the
5050 * same start and size, so we need use the special extent cache
5051 * search/insert functions.
5053 ret = insert_cache_extent2(&tree->tree, &de_rec->cache);
5057 list_add_tail(&de_rec->chunk_list, &tree->no_chunk_orphans);
5058 list_add_tail(&de_rec->device_list, &tree->no_device_orphans);
5062 static void free_device_extent_record(struct cache_extent *cache)
5064 struct device_extent_record *rec;
5066 rec = container_of(cache, struct device_extent_record, cache);
5067 if (!list_empty(&rec->chunk_list))
5068 list_del_init(&rec->chunk_list);
5069 if (!list_empty(&rec->device_list))
5070 list_del_init(&rec->device_list);
5074 void free_device_extent_tree(struct device_extent_tree *tree)
5076 cache_tree_free_extents(&tree->tree, free_device_extent_record);
5079 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5080 static int process_extent_ref_v0(struct cache_tree *extent_cache,
5081 struct extent_buffer *leaf, int slot)
5083 struct btrfs_extent_ref_v0 *ref0;
5084 struct btrfs_key key;
5087 btrfs_item_key_to_cpu(leaf, &key, slot);
5088 ref0 = btrfs_item_ptr(leaf, slot, struct btrfs_extent_ref_v0);
5089 if (btrfs_ref_objectid_v0(leaf, ref0) < BTRFS_FIRST_FREE_OBJECTID) {
5090 ret = add_tree_backref(extent_cache, key.objectid, key.offset,
5093 ret = add_data_backref(extent_cache, key.objectid, key.offset,
5094 0, 0, 0, btrfs_ref_count_v0(leaf, ref0), 0, 0);
5100 struct chunk_record *btrfs_new_chunk_record(struct extent_buffer *leaf,
5101 struct btrfs_key *key,
5104 struct btrfs_chunk *ptr;
5105 struct chunk_record *rec;
5108 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5109 num_stripes = btrfs_chunk_num_stripes(leaf, ptr);
5111 rec = calloc(1, btrfs_chunk_record_size(num_stripes));
5113 fprintf(stderr, "memory allocation failed\n");
5117 INIT_LIST_HEAD(&rec->list);
5118 INIT_LIST_HEAD(&rec->dextents);
5121 rec->cache.start = key->offset;
5122 rec->cache.size = btrfs_chunk_length(leaf, ptr);
5124 rec->generation = btrfs_header_generation(leaf);
5126 rec->objectid = key->objectid;
5127 rec->type = key->type;
5128 rec->offset = key->offset;
5130 rec->length = rec->cache.size;
5131 rec->owner = btrfs_chunk_owner(leaf, ptr);
5132 rec->stripe_len = btrfs_chunk_stripe_len(leaf, ptr);
5133 rec->type_flags = btrfs_chunk_type(leaf, ptr);
5134 rec->io_width = btrfs_chunk_io_width(leaf, ptr);
5135 rec->io_align = btrfs_chunk_io_align(leaf, ptr);
5136 rec->sector_size = btrfs_chunk_sector_size(leaf, ptr);
5137 rec->num_stripes = num_stripes;
5138 rec->sub_stripes = btrfs_chunk_sub_stripes(leaf, ptr);
5140 for (i = 0; i < rec->num_stripes; ++i) {
5141 rec->stripes[i].devid =
5142 btrfs_stripe_devid_nr(leaf, ptr, i);
5143 rec->stripes[i].offset =
5144 btrfs_stripe_offset_nr(leaf, ptr, i);
5145 read_extent_buffer(leaf, rec->stripes[i].dev_uuid,
5146 (unsigned long)btrfs_stripe_dev_uuid_nr(ptr, i),
5153 static int process_chunk_item(struct cache_tree *chunk_cache,
5154 struct btrfs_key *key, struct extent_buffer *eb,
5157 struct chunk_record *rec;
5158 struct btrfs_chunk *chunk;
5161 chunk = btrfs_item_ptr(eb, slot, struct btrfs_chunk);
5163 * Do extra check for this chunk item,
5165 * It's still possible one can craft a leaf with CHUNK_ITEM, with
5166 * wrong onwer(3) out of chunk tree, to pass both chunk tree check
5167 * and owner<->key_type check.
5169 ret = btrfs_check_chunk_valid(global_info->tree_root, eb, chunk, slot,
5172 error("chunk(%llu, %llu) is not valid, ignore it",
5173 key->offset, btrfs_chunk_length(eb, chunk));
5176 rec = btrfs_new_chunk_record(eb, key, slot);
5177 ret = insert_cache_extent(chunk_cache, &rec->cache);
5179 fprintf(stderr, "Chunk[%llu, %llu] existed.\n",
5180 rec->offset, rec->length);
5187 static int process_device_item(struct rb_root *dev_cache,
5188 struct btrfs_key *key, struct extent_buffer *eb, int slot)
5190 struct btrfs_dev_item *ptr;
5191 struct device_record *rec;
5194 ptr = btrfs_item_ptr(eb,
5195 slot, struct btrfs_dev_item);
5197 rec = malloc(sizeof(*rec));
5199 fprintf(stderr, "memory allocation failed\n");
5203 rec->devid = key->offset;
5204 rec->generation = btrfs_header_generation(eb);
5206 rec->objectid = key->objectid;
5207 rec->type = key->type;
5208 rec->offset = key->offset;
5210 rec->devid = btrfs_device_id(eb, ptr);
5211 rec->total_byte = btrfs_device_total_bytes(eb, ptr);
5212 rec->byte_used = btrfs_device_bytes_used(eb, ptr);
5214 ret = rb_insert(dev_cache, &rec->node, device_record_compare);
5216 fprintf(stderr, "Device[%llu] existed.\n", rec->devid);
5223 struct block_group_record *
5224 btrfs_new_block_group_record(struct extent_buffer *leaf, struct btrfs_key *key,
5227 struct btrfs_block_group_item *ptr;
5228 struct block_group_record *rec;
5230 rec = calloc(1, sizeof(*rec));
5232 fprintf(stderr, "memory allocation failed\n");
5236 rec->cache.start = key->objectid;
5237 rec->cache.size = key->offset;
5239 rec->generation = btrfs_header_generation(leaf);
5241 rec->objectid = key->objectid;
5242 rec->type = key->type;
5243 rec->offset = key->offset;
5245 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_block_group_item);
5246 rec->flags = btrfs_disk_block_group_flags(leaf, ptr);
5248 INIT_LIST_HEAD(&rec->list);
5253 static int process_block_group_item(struct block_group_tree *block_group_cache,
5254 struct btrfs_key *key,
5255 struct extent_buffer *eb, int slot)
5257 struct block_group_record *rec;
5260 rec = btrfs_new_block_group_record(eb, key, slot);
5261 ret = insert_block_group_record(block_group_cache, rec);
5263 fprintf(stderr, "Block Group[%llu, %llu] existed.\n",
5264 rec->objectid, rec->offset);
5271 struct device_extent_record *
5272 btrfs_new_device_extent_record(struct extent_buffer *leaf,
5273 struct btrfs_key *key, int slot)
5275 struct device_extent_record *rec;
5276 struct btrfs_dev_extent *ptr;
5278 rec = calloc(1, sizeof(*rec));
5280 fprintf(stderr, "memory allocation failed\n");
5284 rec->cache.objectid = key->objectid;
5285 rec->cache.start = key->offset;
5287 rec->generation = btrfs_header_generation(leaf);
5289 rec->objectid = key->objectid;
5290 rec->type = key->type;
5291 rec->offset = key->offset;
5293 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
5294 rec->chunk_objecteid =
5295 btrfs_dev_extent_chunk_objectid(leaf, ptr);
5297 btrfs_dev_extent_chunk_offset(leaf, ptr);
5298 rec->length = btrfs_dev_extent_length(leaf, ptr);
5299 rec->cache.size = rec->length;
5301 INIT_LIST_HEAD(&rec->chunk_list);
5302 INIT_LIST_HEAD(&rec->device_list);
5308 process_device_extent_item(struct device_extent_tree *dev_extent_cache,
5309 struct btrfs_key *key, struct extent_buffer *eb,
5312 struct device_extent_record *rec;
5315 rec = btrfs_new_device_extent_record(eb, key, slot);
5316 ret = insert_device_extent_record(dev_extent_cache, rec);
5319 "Device extent[%llu, %llu, %llu] existed.\n",
5320 rec->objectid, rec->offset, rec->length);
5327 static int process_extent_item(struct btrfs_root *root,
5328 struct cache_tree *extent_cache,
5329 struct extent_buffer *eb, int slot)
5331 struct btrfs_extent_item *ei;
5332 struct btrfs_extent_inline_ref *iref;
5333 struct btrfs_extent_data_ref *dref;
5334 struct btrfs_shared_data_ref *sref;
5335 struct btrfs_key key;
5336 struct extent_record tmpl;
5341 u32 item_size = btrfs_item_size_nr(eb, slot);
5347 btrfs_item_key_to_cpu(eb, &key, slot);
5349 if (key.type == BTRFS_METADATA_ITEM_KEY) {
5351 num_bytes = root->nodesize;
5353 num_bytes = key.offset;
5356 if (!IS_ALIGNED(key.objectid, root->sectorsize)) {
5357 error("ignoring invalid extent, bytenr %llu is not aligned to %u",
5358 key.objectid, root->sectorsize);
5361 if (item_size < sizeof(*ei)) {
5362 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5363 struct btrfs_extent_item_v0 *ei0;
5364 BUG_ON(item_size != sizeof(*ei0));
5365 ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
5366 refs = btrfs_extent_refs_v0(eb, ei0);
5370 memset(&tmpl, 0, sizeof(tmpl));
5371 tmpl.start = key.objectid;
5372 tmpl.nr = num_bytes;
5373 tmpl.extent_item_refs = refs;
5374 tmpl.metadata = metadata;
5376 tmpl.max_size = num_bytes;
5378 return add_extent_rec(extent_cache, &tmpl);
5381 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
5382 refs = btrfs_extent_refs(eb, ei);
5383 if (btrfs_extent_flags(eb, ei) & BTRFS_EXTENT_FLAG_TREE_BLOCK)
5387 if (metadata && num_bytes != root->nodesize) {
5388 error("ignore invalid metadata extent, length %llu does not equal to %u",
5389 num_bytes, root->nodesize);
5392 if (!metadata && !IS_ALIGNED(num_bytes, root->sectorsize)) {
5393 error("ignore invalid data extent, length %llu is not aligned to %u",
5394 num_bytes, root->sectorsize);
5398 memset(&tmpl, 0, sizeof(tmpl));
5399 tmpl.start = key.objectid;
5400 tmpl.nr = num_bytes;
5401 tmpl.extent_item_refs = refs;
5402 tmpl.metadata = metadata;
5404 tmpl.max_size = num_bytes;
5405 add_extent_rec(extent_cache, &tmpl);
5407 ptr = (unsigned long)(ei + 1);
5408 if (btrfs_extent_flags(eb, ei) & BTRFS_EXTENT_FLAG_TREE_BLOCK &&
5409 key.type == BTRFS_EXTENT_ITEM_KEY)
5410 ptr += sizeof(struct btrfs_tree_block_info);
5412 end = (unsigned long)ei + item_size;
5414 iref = (struct btrfs_extent_inline_ref *)ptr;
5415 type = btrfs_extent_inline_ref_type(eb, iref);
5416 offset = btrfs_extent_inline_ref_offset(eb, iref);
5418 case BTRFS_TREE_BLOCK_REF_KEY:
5419 ret = add_tree_backref(extent_cache, key.objectid,
5422 error("add_tree_backref failed: %s",
5425 case BTRFS_SHARED_BLOCK_REF_KEY:
5426 ret = add_tree_backref(extent_cache, key.objectid,
5429 error("add_tree_backref failed: %s",
5432 case BTRFS_EXTENT_DATA_REF_KEY:
5433 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
5434 add_data_backref(extent_cache, key.objectid, 0,
5435 btrfs_extent_data_ref_root(eb, dref),
5436 btrfs_extent_data_ref_objectid(eb,
5438 btrfs_extent_data_ref_offset(eb, dref),
5439 btrfs_extent_data_ref_count(eb, dref),
5442 case BTRFS_SHARED_DATA_REF_KEY:
5443 sref = (struct btrfs_shared_data_ref *)(iref + 1);
5444 add_data_backref(extent_cache, key.objectid, offset,
5446 btrfs_shared_data_ref_count(eb, sref),
5450 fprintf(stderr, "corrupt extent record: key %Lu %u %Lu\n",
5451 key.objectid, key.type, num_bytes);
5454 ptr += btrfs_extent_inline_ref_size(type);
5461 static int check_cache_range(struct btrfs_root *root,
5462 struct btrfs_block_group_cache *cache,
5463 u64 offset, u64 bytes)
5465 struct btrfs_free_space *entry;
5471 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
5472 bytenr = btrfs_sb_offset(i);
5473 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
5474 cache->key.objectid, bytenr, 0,
5475 &logical, &nr, &stripe_len);
5480 if (logical[nr] + stripe_len <= offset)
5482 if (offset + bytes <= logical[nr])
5484 if (logical[nr] == offset) {
5485 if (stripe_len >= bytes) {
5489 bytes -= stripe_len;
5490 offset += stripe_len;
5491 } else if (logical[nr] < offset) {
5492 if (logical[nr] + stripe_len >=
5497 bytes = (offset + bytes) -
5498 (logical[nr] + stripe_len);
5499 offset = logical[nr] + stripe_len;
5502 * Could be tricky, the super may land in the
5503 * middle of the area we're checking. First
5504 * check the easiest case, it's at the end.
5506 if (logical[nr] + stripe_len >=
5508 bytes = logical[nr] - offset;
5512 /* Check the left side */
5513 ret = check_cache_range(root, cache,
5515 logical[nr] - offset);
5521 /* Now we continue with the right side */
5522 bytes = (offset + bytes) -
5523 (logical[nr] + stripe_len);
5524 offset = logical[nr] + stripe_len;
5531 entry = btrfs_find_free_space(cache->free_space_ctl, offset, bytes);
5533 fprintf(stderr, "There is no free space entry for %Lu-%Lu\n",
5534 offset, offset+bytes);
5538 if (entry->offset != offset) {
5539 fprintf(stderr, "Wanted offset %Lu, found %Lu\n", offset,
5544 if (entry->bytes != bytes) {
5545 fprintf(stderr, "Wanted bytes %Lu, found %Lu for off %Lu\n",
5546 bytes, entry->bytes, offset);
5550 unlink_free_space(cache->free_space_ctl, entry);
5555 static int verify_space_cache(struct btrfs_root *root,
5556 struct btrfs_block_group_cache *cache)
5558 struct btrfs_path *path;
5559 struct extent_buffer *leaf;
5560 struct btrfs_key key;
5564 path = btrfs_alloc_path();
5568 root = root->fs_info->extent_root;
5570 last = max_t(u64, cache->key.objectid, BTRFS_SUPER_INFO_OFFSET);
5572 key.objectid = last;
5574 key.type = BTRFS_EXTENT_ITEM_KEY;
5576 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5581 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5582 ret = btrfs_next_leaf(root, path);
5590 leaf = path->nodes[0];
5591 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5592 if (key.objectid >= cache->key.offset + cache->key.objectid)
5594 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
5595 key.type != BTRFS_METADATA_ITEM_KEY) {
5600 if (last == key.objectid) {
5601 if (key.type == BTRFS_EXTENT_ITEM_KEY)
5602 last = key.objectid + key.offset;
5604 last = key.objectid + root->nodesize;
5609 ret = check_cache_range(root, cache, last,
5610 key.objectid - last);
5613 if (key.type == BTRFS_EXTENT_ITEM_KEY)
5614 last = key.objectid + key.offset;
5616 last = key.objectid + root->nodesize;
5620 if (last < cache->key.objectid + cache->key.offset)
5621 ret = check_cache_range(root, cache, last,
5622 cache->key.objectid +
5623 cache->key.offset - last);
5626 btrfs_free_path(path);
5629 !RB_EMPTY_ROOT(&cache->free_space_ctl->free_space_offset)) {
5630 fprintf(stderr, "There are still entries left in the space "
5638 static int check_space_cache(struct btrfs_root *root)
5640 struct btrfs_block_group_cache *cache;
5641 u64 start = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
5645 if (btrfs_super_cache_generation(root->fs_info->super_copy) != -1ULL &&
5646 btrfs_super_generation(root->fs_info->super_copy) !=
5647 btrfs_super_cache_generation(root->fs_info->super_copy)) {
5648 printf("cache and super generation don't match, space cache "
5649 "will be invalidated\n");
5653 if (ctx.progress_enabled) {
5654 ctx.tp = TASK_FREE_SPACE;
5655 task_start(ctx.info);
5659 cache = btrfs_lookup_first_block_group(root->fs_info, start);
5663 start = cache->key.objectid + cache->key.offset;
5664 if (!cache->free_space_ctl) {
5665 if (btrfs_init_free_space_ctl(cache,
5666 root->sectorsize)) {
5671 btrfs_remove_free_space_cache(cache);
5674 if (btrfs_fs_compat_ro(root->fs_info,
5675 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)) {
5676 ret = exclude_super_stripes(root, cache);
5678 fprintf(stderr, "could not exclude super stripes: %s\n",
5683 ret = load_free_space_tree(root->fs_info, cache);
5684 free_excluded_extents(root, cache);
5686 fprintf(stderr, "could not load free space tree: %s\n",
5693 ret = load_free_space_cache(root->fs_info, cache);
5698 ret = verify_space_cache(root, cache);
5700 fprintf(stderr, "cache appears valid but isn't %Lu\n",
5701 cache->key.objectid);
5706 task_stop(ctx.info);
5708 return error ? -EINVAL : 0;
5711 static int check_extent_csums(struct btrfs_root *root, u64 bytenr,
5712 u64 num_bytes, unsigned long leaf_offset,
5713 struct extent_buffer *eb) {
5716 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
5718 unsigned long csum_offset;
5722 u64 data_checked = 0;
5728 if (num_bytes % root->sectorsize)
5731 data = malloc(num_bytes);
5735 while (offset < num_bytes) {
5738 read_len = num_bytes - offset;
5739 /* read as much space once a time */
5740 ret = read_extent_data(root, data + offset,
5741 bytenr + offset, &read_len, mirror);
5745 /* verify every 4k data's checksum */
5746 while (data_checked < read_len) {
5748 tmp = offset + data_checked;
5750 csum = btrfs_csum_data(NULL, (char *)data + tmp,
5751 csum, root->sectorsize);
5752 btrfs_csum_final(csum, (char *)&csum);
5754 csum_offset = leaf_offset +
5755 tmp / root->sectorsize * csum_size;
5756 read_extent_buffer(eb, (char *)&csum_expected,
5757 csum_offset, csum_size);
5758 /* try another mirror */
5759 if (csum != csum_expected) {
5760 fprintf(stderr, "mirror %d bytenr %llu csum %u expected csum %u\n",
5761 mirror, bytenr + tmp,
5762 csum, csum_expected);
5763 num_copies = btrfs_num_copies(
5764 &root->fs_info->mapping_tree,
5766 if (mirror < num_copies - 1) {
5771 data_checked += root->sectorsize;
5780 static int check_extent_exists(struct btrfs_root *root, u64 bytenr,
5783 struct btrfs_path *path;
5784 struct extent_buffer *leaf;
5785 struct btrfs_key key;
5788 path = btrfs_alloc_path();
5790 fprintf(stderr, "Error allocating path\n");
5794 key.objectid = bytenr;
5795 key.type = BTRFS_EXTENT_ITEM_KEY;
5796 key.offset = (u64)-1;
5799 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
5802 fprintf(stderr, "Error looking up extent record %d\n", ret);
5803 btrfs_free_path(path);
5806 if (path->slots[0] > 0) {
5809 ret = btrfs_prev_leaf(root, path);
5812 } else if (ret > 0) {
5819 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5822 * Block group items come before extent items if they have the same
5823 * bytenr, so walk back one more just in case. Dear future traveller,
5824 * first congrats on mastering time travel. Now if it's not too much
5825 * trouble could you go back to 2006 and tell Chris to make the
5826 * BLOCK_GROUP_ITEM_KEY (and BTRFS_*_REF_KEY) lower than the
5827 * EXTENT_ITEM_KEY please?
5829 while (key.type > BTRFS_EXTENT_ITEM_KEY) {
5830 if (path->slots[0] > 0) {
5833 ret = btrfs_prev_leaf(root, path);
5836 } else if (ret > 0) {
5841 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5845 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5846 ret = btrfs_next_leaf(root, path);
5848 fprintf(stderr, "Error going to next leaf "
5850 btrfs_free_path(path);
5856 leaf = path->nodes[0];
5857 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5858 if (key.type != BTRFS_EXTENT_ITEM_KEY) {
5862 if (key.objectid + key.offset < bytenr) {
5866 if (key.objectid > bytenr + num_bytes)
5869 if (key.objectid == bytenr) {
5870 if (key.offset >= num_bytes) {
5874 num_bytes -= key.offset;
5875 bytenr += key.offset;
5876 } else if (key.objectid < bytenr) {
5877 if (key.objectid + key.offset >= bytenr + num_bytes) {
5881 num_bytes = (bytenr + num_bytes) -
5882 (key.objectid + key.offset);
5883 bytenr = key.objectid + key.offset;
5885 if (key.objectid + key.offset < bytenr + num_bytes) {
5886 u64 new_start = key.objectid + key.offset;
5887 u64 new_bytes = bytenr + num_bytes - new_start;
5890 * Weird case, the extent is in the middle of
5891 * our range, we'll have to search one side
5892 * and then the other. Not sure if this happens
5893 * in real life, but no harm in coding it up
5894 * anyway just in case.
5896 btrfs_release_path(path);
5897 ret = check_extent_exists(root, new_start,
5900 fprintf(stderr, "Right section didn't "
5904 num_bytes = key.objectid - bytenr;
5907 num_bytes = key.objectid - bytenr;
5914 if (num_bytes && !ret) {
5915 fprintf(stderr, "There are no extents for csum range "
5916 "%Lu-%Lu\n", bytenr, bytenr+num_bytes);
5920 btrfs_free_path(path);
5924 static int check_csums(struct btrfs_root *root)
5926 struct btrfs_path *path;
5927 struct extent_buffer *leaf;
5928 struct btrfs_key key;
5929 u64 offset = 0, num_bytes = 0;
5930 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
5934 unsigned long leaf_offset;
5936 root = root->fs_info->csum_root;
5937 if (!extent_buffer_uptodate(root->node)) {
5938 fprintf(stderr, "No valid csum tree found\n");
5942 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
5943 key.type = BTRFS_EXTENT_CSUM_KEY;
5946 path = btrfs_alloc_path();
5950 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5952 fprintf(stderr, "Error searching csum tree %d\n", ret);
5953 btrfs_free_path(path);
5957 if (ret > 0 && path->slots[0])
5962 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5963 ret = btrfs_next_leaf(root, path);
5965 fprintf(stderr, "Error going to next leaf "
5972 leaf = path->nodes[0];
5974 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5975 if (key.type != BTRFS_EXTENT_CSUM_KEY) {
5980 data_len = (btrfs_item_size_nr(leaf, path->slots[0]) /
5981 csum_size) * root->sectorsize;
5982 if (!check_data_csum)
5983 goto skip_csum_check;
5984 leaf_offset = btrfs_item_ptr_offset(leaf, path->slots[0]);
5985 ret = check_extent_csums(root, key.offset, data_len,
5991 offset = key.offset;
5992 } else if (key.offset != offset + num_bytes) {
5993 ret = check_extent_exists(root, offset, num_bytes);
5995 fprintf(stderr, "Csum exists for %Lu-%Lu but "
5996 "there is no extent record\n",
5997 offset, offset+num_bytes);
6000 offset = key.offset;
6003 num_bytes += data_len;
6007 btrfs_free_path(path);
6011 static int is_dropped_key(struct btrfs_key *key,
6012 struct btrfs_key *drop_key) {
6013 if (key->objectid < drop_key->objectid)
6015 else if (key->objectid == drop_key->objectid) {
6016 if (key->type < drop_key->type)
6018 else if (key->type == drop_key->type) {
6019 if (key->offset < drop_key->offset)
6027 * Here are the rules for FULL_BACKREF.
6029 * 1) If BTRFS_HEADER_FLAG_RELOC is set then we have FULL_BACKREF set.
6030 * 2) If btrfs_header_owner(buf) no longer points to buf then we have
6032 * 3) We cowed the block walking down a reloc tree. This is impossible to tell
6033 * if it happened after the relocation occurred since we'll have dropped the
6034 * reloc root, so it's entirely possible to have FULL_BACKREF set on buf and
6035 * have no real way to know for sure.
6037 * We process the blocks one root at a time, and we start from the lowest root
6038 * objectid and go to the highest. So we can just lookup the owner backref for
6039 * the record and if we don't find it then we know it doesn't exist and we have
6042 * FIXME: if we ever start reclaiming root objectid's then we need to fix this
6043 * assumption and simply indicate that we _think_ that the FULL BACKREF needs to
6044 * be set or not and then we can check later once we've gathered all the refs.
6046 static int calc_extent_flag(struct btrfs_root *root,
6047 struct cache_tree *extent_cache,
6048 struct extent_buffer *buf,
6049 struct root_item_record *ri,
6052 struct extent_record *rec;
6053 struct cache_extent *cache;
6054 struct tree_backref *tback;
6057 cache = lookup_cache_extent(extent_cache, buf->start, 1);
6058 /* we have added this extent before */
6062 rec = container_of(cache, struct extent_record, cache);
6065 * Except file/reloc tree, we can not have
6068 if (ri->objectid < BTRFS_FIRST_FREE_OBJECTID)
6073 if (buf->start == ri->bytenr)
6076 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
6079 owner = btrfs_header_owner(buf);
6080 if (owner == ri->objectid)
6083 tback = find_tree_backref(rec, 0, owner);
6088 if (rec->flag_block_full_backref != FLAG_UNSET &&
6089 rec->flag_block_full_backref != 0)
6090 rec->bad_full_backref = 1;
6093 *flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6094 if (rec->flag_block_full_backref != FLAG_UNSET &&
6095 rec->flag_block_full_backref != 1)
6096 rec->bad_full_backref = 1;
6100 static void report_mismatch_key_root(u8 key_type, u64 rootid)
6102 fprintf(stderr, "Invalid key type(");
6103 print_key_type(stderr, 0, key_type);
6104 fprintf(stderr, ") found in root(");
6105 print_objectid(stderr, rootid, 0);
6106 fprintf(stderr, ")\n");
6110 * Check if the key is valid with its extent buffer.
6112 * This is a early check in case invalid key exists in a extent buffer
6113 * This is not comprehensive yet, but should prevent wrong key/item passed
6116 static int check_type_with_root(u64 rootid, u8 key_type)
6119 /* Only valid in chunk tree */
6120 case BTRFS_DEV_ITEM_KEY:
6121 case BTRFS_CHUNK_ITEM_KEY:
6122 if (rootid != BTRFS_CHUNK_TREE_OBJECTID)
6125 /* valid in csum and log tree */
6126 case BTRFS_CSUM_TREE_OBJECTID:
6127 if (!(rootid == BTRFS_TREE_LOG_OBJECTID ||
6131 case BTRFS_EXTENT_ITEM_KEY:
6132 case BTRFS_METADATA_ITEM_KEY:
6133 case BTRFS_BLOCK_GROUP_ITEM_KEY:
6134 if (rootid != BTRFS_EXTENT_TREE_OBJECTID)
6137 case BTRFS_ROOT_ITEM_KEY:
6138 if (rootid != BTRFS_ROOT_TREE_OBJECTID)
6141 case BTRFS_DEV_EXTENT_KEY:
6142 if (rootid != BTRFS_DEV_TREE_OBJECTID)
6148 report_mismatch_key_root(key_type, rootid);
6152 static int run_next_block(struct btrfs_root *root,
6153 struct block_info *bits,
6156 struct cache_tree *pending,
6157 struct cache_tree *seen,
6158 struct cache_tree *reada,
6159 struct cache_tree *nodes,
6160 struct cache_tree *extent_cache,
6161 struct cache_tree *chunk_cache,
6162 struct rb_root *dev_cache,
6163 struct block_group_tree *block_group_cache,
6164 struct device_extent_tree *dev_extent_cache,
6165 struct root_item_record *ri)
6167 struct extent_buffer *buf;
6168 struct extent_record *rec = NULL;
6179 struct btrfs_key key;
6180 struct cache_extent *cache;
6183 nritems = pick_next_pending(pending, reada, nodes, *last, bits,
6184 bits_nr, &reada_bits);
6189 for(i = 0; i < nritems; i++) {
6190 ret = add_cache_extent(reada, bits[i].start,
6195 /* fixme, get the parent transid */
6196 readahead_tree_block(root, bits[i].start,
6200 *last = bits[0].start;
6201 bytenr = bits[0].start;
6202 size = bits[0].size;
6204 cache = lookup_cache_extent(pending, bytenr, size);
6206 remove_cache_extent(pending, cache);
6209 cache = lookup_cache_extent(reada, bytenr, size);
6211 remove_cache_extent(reada, cache);
6214 cache = lookup_cache_extent(nodes, bytenr, size);
6216 remove_cache_extent(nodes, cache);
6219 cache = lookup_cache_extent(extent_cache, bytenr, size);
6221 rec = container_of(cache, struct extent_record, cache);
6222 gen = rec->parent_generation;
6225 /* fixme, get the real parent transid */
6226 buf = read_tree_block(root, bytenr, size, gen);
6227 if (!extent_buffer_uptodate(buf)) {
6228 record_bad_block_io(root->fs_info,
6229 extent_cache, bytenr, size);
6233 nritems = btrfs_header_nritems(buf);
6236 if (!init_extent_tree) {
6237 ret = btrfs_lookup_extent_info(NULL, root, bytenr,
6238 btrfs_header_level(buf), 1, NULL,
6241 ret = calc_extent_flag(root, extent_cache, buf, ri, &flags);
6243 fprintf(stderr, "Couldn't calc extent flags\n");
6244 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6249 ret = calc_extent_flag(root, extent_cache, buf, ri, &flags);
6251 fprintf(stderr, "Couldn't calc extent flags\n");
6252 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6256 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6258 ri->objectid != BTRFS_TREE_RELOC_OBJECTID &&
6259 ri->objectid == btrfs_header_owner(buf)) {
6261 * Ok we got to this block from it's original owner and
6262 * we have FULL_BACKREF set. Relocation can leave
6263 * converted blocks over so this is altogether possible,
6264 * however it's not possible if the generation > the
6265 * last snapshot, so check for this case.
6267 if (!btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC) &&
6268 btrfs_header_generation(buf) > ri->last_snapshot) {
6269 flags &= ~BTRFS_BLOCK_FLAG_FULL_BACKREF;
6270 rec->bad_full_backref = 1;
6275 (ri->objectid == BTRFS_TREE_RELOC_OBJECTID ||
6276 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
6277 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6278 rec->bad_full_backref = 1;
6282 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6283 rec->flag_block_full_backref = 1;
6287 rec->flag_block_full_backref = 0;
6289 owner = btrfs_header_owner(buf);
6292 ret = check_block(root, extent_cache, buf, flags);
6296 if (btrfs_is_leaf(buf)) {
6297 btree_space_waste += btrfs_leaf_free_space(root, buf);
6298 for (i = 0; i < nritems; i++) {
6299 struct btrfs_file_extent_item *fi;
6300 btrfs_item_key_to_cpu(buf, &key, i);
6302 * Check key type against the leaf owner.
6303 * Could filter quite a lot of early error if
6306 if (check_type_with_root(btrfs_header_owner(buf),
6308 fprintf(stderr, "ignoring invalid key\n");
6311 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
6312 process_extent_item(root, extent_cache, buf,
6316 if (key.type == BTRFS_METADATA_ITEM_KEY) {
6317 process_extent_item(root, extent_cache, buf,
6321 if (key.type == BTRFS_EXTENT_CSUM_KEY) {
6323 btrfs_item_size_nr(buf, i);
6326 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6327 process_chunk_item(chunk_cache, &key, buf, i);
6330 if (key.type == BTRFS_DEV_ITEM_KEY) {
6331 process_device_item(dev_cache, &key, buf, i);
6334 if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6335 process_block_group_item(block_group_cache,
6339 if (key.type == BTRFS_DEV_EXTENT_KEY) {
6340 process_device_extent_item(dev_extent_cache,
6345 if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
6346 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6347 process_extent_ref_v0(extent_cache, buf, i);
6354 if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
6355 ret = add_tree_backref(extent_cache,
6356 key.objectid, 0, key.offset, 0);
6358 error("add_tree_backref failed: %s",
6362 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
6363 ret = add_tree_backref(extent_cache,
6364 key.objectid, key.offset, 0, 0);
6366 error("add_tree_backref failed: %s",
6370 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
6371 struct btrfs_extent_data_ref *ref;
6372 ref = btrfs_item_ptr(buf, i,
6373 struct btrfs_extent_data_ref);
6374 add_data_backref(extent_cache,
6376 btrfs_extent_data_ref_root(buf, ref),
6377 btrfs_extent_data_ref_objectid(buf,
6379 btrfs_extent_data_ref_offset(buf, ref),
6380 btrfs_extent_data_ref_count(buf, ref),
6381 0, root->sectorsize);
6384 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
6385 struct btrfs_shared_data_ref *ref;
6386 ref = btrfs_item_ptr(buf, i,
6387 struct btrfs_shared_data_ref);
6388 add_data_backref(extent_cache,
6389 key.objectid, key.offset, 0, 0, 0,
6390 btrfs_shared_data_ref_count(buf, ref),
6391 0, root->sectorsize);
6394 if (key.type == BTRFS_ORPHAN_ITEM_KEY) {
6395 struct bad_item *bad;
6397 if (key.objectid == BTRFS_ORPHAN_OBJECTID)
6401 bad = malloc(sizeof(struct bad_item));
6404 INIT_LIST_HEAD(&bad->list);
6405 memcpy(&bad->key, &key,
6406 sizeof(struct btrfs_key));
6407 bad->root_id = owner;
6408 list_add_tail(&bad->list, &delete_items);
6411 if (key.type != BTRFS_EXTENT_DATA_KEY)
6413 fi = btrfs_item_ptr(buf, i,
6414 struct btrfs_file_extent_item);
6415 if (btrfs_file_extent_type(buf, fi) ==
6416 BTRFS_FILE_EXTENT_INLINE)
6418 if (btrfs_file_extent_disk_bytenr(buf, fi) == 0)
6421 data_bytes_allocated +=
6422 btrfs_file_extent_disk_num_bytes(buf, fi);
6423 if (data_bytes_allocated < root->sectorsize) {
6426 data_bytes_referenced +=
6427 btrfs_file_extent_num_bytes(buf, fi);
6428 add_data_backref(extent_cache,
6429 btrfs_file_extent_disk_bytenr(buf, fi),
6430 parent, owner, key.objectid, key.offset -
6431 btrfs_file_extent_offset(buf, fi), 1, 1,
6432 btrfs_file_extent_disk_num_bytes(buf, fi));
6436 struct btrfs_key first_key;
6438 first_key.objectid = 0;
6441 btrfs_item_key_to_cpu(buf, &first_key, 0);
6442 level = btrfs_header_level(buf);
6443 for (i = 0; i < nritems; i++) {
6444 struct extent_record tmpl;
6446 ptr = btrfs_node_blockptr(buf, i);
6447 size = root->nodesize;
6448 btrfs_node_key_to_cpu(buf, &key, i);
6450 if ((level == ri->drop_level)
6451 && is_dropped_key(&key, &ri->drop_key)) {
6456 memset(&tmpl, 0, sizeof(tmpl));
6457 btrfs_cpu_key_to_disk(&tmpl.parent_key, &key);
6458 tmpl.parent_generation = btrfs_node_ptr_generation(buf, i);
6463 tmpl.max_size = size;
6464 ret = add_extent_rec(extent_cache, &tmpl);
6468 ret = add_tree_backref(extent_cache, ptr, parent,
6471 error("add_tree_backref failed: %s",
6477 add_pending(nodes, seen, ptr, size);
6479 add_pending(pending, seen, ptr, size);
6482 btree_space_waste += (BTRFS_NODEPTRS_PER_BLOCK(root) -
6483 nritems) * sizeof(struct btrfs_key_ptr);
6485 total_btree_bytes += buf->len;
6486 if (fs_root_objectid(btrfs_header_owner(buf)))
6487 total_fs_tree_bytes += buf->len;
6488 if (btrfs_header_owner(buf) == BTRFS_EXTENT_TREE_OBJECTID)
6489 total_extent_tree_bytes += buf->len;
6490 if (!found_old_backref &&
6491 btrfs_header_owner(buf) == BTRFS_TREE_RELOC_OBJECTID &&
6492 btrfs_header_backref_rev(buf) == BTRFS_MIXED_BACKREF_REV &&
6493 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
6494 found_old_backref = 1;
6496 free_extent_buffer(buf);
6500 static int add_root_to_pending(struct extent_buffer *buf,
6501 struct cache_tree *extent_cache,
6502 struct cache_tree *pending,
6503 struct cache_tree *seen,
6504 struct cache_tree *nodes,
6507 struct extent_record tmpl;
6510 if (btrfs_header_level(buf) > 0)
6511 add_pending(nodes, seen, buf->start, buf->len);
6513 add_pending(pending, seen, buf->start, buf->len);
6515 memset(&tmpl, 0, sizeof(tmpl));
6516 tmpl.start = buf->start;
6521 tmpl.max_size = buf->len;
6522 add_extent_rec(extent_cache, &tmpl);
6524 if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
6525 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
6526 ret = add_tree_backref(extent_cache, buf->start, buf->start,
6529 ret = add_tree_backref(extent_cache, buf->start, 0, objectid,
6534 /* as we fix the tree, we might be deleting blocks that
6535 * we're tracking for repair. This hook makes sure we
6536 * remove any backrefs for blocks as we are fixing them.
6538 static int free_extent_hook(struct btrfs_trans_handle *trans,
6539 struct btrfs_root *root,
6540 u64 bytenr, u64 num_bytes, u64 parent,
6541 u64 root_objectid, u64 owner, u64 offset,
6544 struct extent_record *rec;
6545 struct cache_extent *cache;
6547 struct cache_tree *extent_cache = root->fs_info->fsck_extent_cache;
6549 is_data = owner >= BTRFS_FIRST_FREE_OBJECTID;
6550 cache = lookup_cache_extent(extent_cache, bytenr, num_bytes);
6554 rec = container_of(cache, struct extent_record, cache);
6556 struct data_backref *back;
6557 back = find_data_backref(rec, parent, root_objectid, owner,
6558 offset, 1, bytenr, num_bytes);
6561 if (back->node.found_ref) {
6562 back->found_ref -= refs_to_drop;
6564 rec->refs -= refs_to_drop;
6566 if (back->node.found_extent_tree) {
6567 back->num_refs -= refs_to_drop;
6568 if (rec->extent_item_refs)
6569 rec->extent_item_refs -= refs_to_drop;
6571 if (back->found_ref == 0)
6572 back->node.found_ref = 0;
6573 if (back->num_refs == 0)
6574 back->node.found_extent_tree = 0;
6576 if (!back->node.found_extent_tree && back->node.found_ref) {
6577 list_del(&back->node.list);
6581 struct tree_backref *back;
6582 back = find_tree_backref(rec, parent, root_objectid);
6585 if (back->node.found_ref) {
6588 back->node.found_ref = 0;
6590 if (back->node.found_extent_tree) {
6591 if (rec->extent_item_refs)
6592 rec->extent_item_refs--;
6593 back->node.found_extent_tree = 0;
6595 if (!back->node.found_extent_tree && back->node.found_ref) {
6596 list_del(&back->node.list);
6600 maybe_free_extent_rec(extent_cache, rec);
6605 static int delete_extent_records(struct btrfs_trans_handle *trans,
6606 struct btrfs_root *root,
6607 struct btrfs_path *path,
6608 u64 bytenr, u64 new_len)
6610 struct btrfs_key key;
6611 struct btrfs_key found_key;
6612 struct extent_buffer *leaf;
6617 key.objectid = bytenr;
6619 key.offset = (u64)-1;
6622 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
6629 if (path->slots[0] == 0)
6635 leaf = path->nodes[0];
6636 slot = path->slots[0];
6638 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6639 if (found_key.objectid != bytenr)
6642 if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
6643 found_key.type != BTRFS_METADATA_ITEM_KEY &&
6644 found_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
6645 found_key.type != BTRFS_EXTENT_DATA_REF_KEY &&
6646 found_key.type != BTRFS_EXTENT_REF_V0_KEY &&
6647 found_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
6648 found_key.type != BTRFS_SHARED_DATA_REF_KEY) {
6649 btrfs_release_path(path);
6650 if (found_key.type == 0) {
6651 if (found_key.offset == 0)
6653 key.offset = found_key.offset - 1;
6654 key.type = found_key.type;
6656 key.type = found_key.type - 1;
6657 key.offset = (u64)-1;
6661 fprintf(stderr, "repair deleting extent record: key %Lu %u %Lu\n",
6662 found_key.objectid, found_key.type, found_key.offset);
6664 ret = btrfs_del_item(trans, root->fs_info->extent_root, path);
6667 btrfs_release_path(path);
6669 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
6670 found_key.type == BTRFS_METADATA_ITEM_KEY) {
6671 u64 bytes = (found_key.type == BTRFS_EXTENT_ITEM_KEY) ?
6672 found_key.offset : root->nodesize;
6674 ret = btrfs_update_block_group(trans, root, bytenr,
6681 btrfs_release_path(path);
6686 * for a single backref, this will allocate a new extent
6687 * and add the backref to it.
6689 static int record_extent(struct btrfs_trans_handle *trans,
6690 struct btrfs_fs_info *info,
6691 struct btrfs_path *path,
6692 struct extent_record *rec,
6693 struct extent_backref *back,
6694 int allocated, u64 flags)
6697 struct btrfs_root *extent_root = info->extent_root;
6698 struct extent_buffer *leaf;
6699 struct btrfs_key ins_key;
6700 struct btrfs_extent_item *ei;
6701 struct tree_backref *tback;
6702 struct data_backref *dback;
6703 struct btrfs_tree_block_info *bi;
6706 rec->max_size = max_t(u64, rec->max_size,
6707 info->extent_root->nodesize);
6710 u32 item_size = sizeof(*ei);
6713 item_size += sizeof(*bi);
6715 ins_key.objectid = rec->start;
6716 ins_key.offset = rec->max_size;
6717 ins_key.type = BTRFS_EXTENT_ITEM_KEY;
6719 ret = btrfs_insert_empty_item(trans, extent_root, path,
6720 &ins_key, item_size);
6724 leaf = path->nodes[0];
6725 ei = btrfs_item_ptr(leaf, path->slots[0],
6726 struct btrfs_extent_item);
6728 btrfs_set_extent_refs(leaf, ei, 0);
6729 btrfs_set_extent_generation(leaf, ei, rec->generation);
6731 if (back->is_data) {
6732 btrfs_set_extent_flags(leaf, ei,
6733 BTRFS_EXTENT_FLAG_DATA);
6735 struct btrfs_disk_key copy_key;;
6737 tback = to_tree_backref(back);
6738 bi = (struct btrfs_tree_block_info *)(ei + 1);
6739 memset_extent_buffer(leaf, 0, (unsigned long)bi,
6742 btrfs_set_disk_key_objectid(©_key,
6743 rec->info_objectid);
6744 btrfs_set_disk_key_type(©_key, 0);
6745 btrfs_set_disk_key_offset(©_key, 0);
6747 btrfs_set_tree_block_level(leaf, bi, rec->info_level);
6748 btrfs_set_tree_block_key(leaf, bi, ©_key);
6750 btrfs_set_extent_flags(leaf, ei,
6751 BTRFS_EXTENT_FLAG_TREE_BLOCK | flags);
6754 btrfs_mark_buffer_dirty(leaf);
6755 ret = btrfs_update_block_group(trans, extent_root, rec->start,
6756 rec->max_size, 1, 0);
6759 btrfs_release_path(path);
6762 if (back->is_data) {
6766 dback = to_data_backref(back);
6767 if (back->full_backref)
6768 parent = dback->parent;
6772 for (i = 0; i < dback->found_ref; i++) {
6773 /* if parent != 0, we're doing a full backref
6774 * passing BTRFS_FIRST_FREE_OBJECTID as the owner
6775 * just makes the backref allocator create a data
6778 ret = btrfs_inc_extent_ref(trans, info->extent_root,
6779 rec->start, rec->max_size,
6783 BTRFS_FIRST_FREE_OBJECTID :
6789 fprintf(stderr, "adding new data backref"
6790 " on %llu %s %llu owner %llu"
6791 " offset %llu found %d\n",
6792 (unsigned long long)rec->start,
6793 back->full_backref ?
6795 back->full_backref ?
6796 (unsigned long long)parent :
6797 (unsigned long long)dback->root,
6798 (unsigned long long)dback->owner,
6799 (unsigned long long)dback->offset,
6804 tback = to_tree_backref(back);
6805 if (back->full_backref)
6806 parent = tback->parent;
6810 ret = btrfs_inc_extent_ref(trans, info->extent_root,
6811 rec->start, rec->max_size,
6812 parent, tback->root, 0, 0);
6813 fprintf(stderr, "adding new tree backref on "
6814 "start %llu len %llu parent %llu root %llu\n",
6815 rec->start, rec->max_size, parent, tback->root);
6818 btrfs_release_path(path);
6822 static struct extent_entry *find_entry(struct list_head *entries,
6823 u64 bytenr, u64 bytes)
6825 struct extent_entry *entry = NULL;
6827 list_for_each_entry(entry, entries, list) {
6828 if (entry->bytenr == bytenr && entry->bytes == bytes)
6835 static struct extent_entry *find_most_right_entry(struct list_head *entries)
6837 struct extent_entry *entry, *best = NULL, *prev = NULL;
6839 list_for_each_entry(entry, entries, list) {
6846 * If there are as many broken entries as entries then we know
6847 * not to trust this particular entry.
6849 if (entry->broken == entry->count)
6853 * If our current entry == best then we can't be sure our best
6854 * is really the best, so we need to keep searching.
6856 if (best && best->count == entry->count) {
6862 /* Prev == entry, not good enough, have to keep searching */
6863 if (!prev->broken && prev->count == entry->count)
6867 best = (prev->count > entry->count) ? prev : entry;
6868 else if (best->count < entry->count)
6876 static int repair_ref(struct btrfs_fs_info *info, struct btrfs_path *path,
6877 struct data_backref *dback, struct extent_entry *entry)
6879 struct btrfs_trans_handle *trans;
6880 struct btrfs_root *root;
6881 struct btrfs_file_extent_item *fi;
6882 struct extent_buffer *leaf;
6883 struct btrfs_key key;
6887 key.objectid = dback->root;
6888 key.type = BTRFS_ROOT_ITEM_KEY;
6889 key.offset = (u64)-1;
6890 root = btrfs_read_fs_root(info, &key);
6892 fprintf(stderr, "Couldn't find root for our ref\n");
6897 * The backref points to the original offset of the extent if it was
6898 * split, so we need to search down to the offset we have and then walk
6899 * forward until we find the backref we're looking for.
6901 key.objectid = dback->owner;
6902 key.type = BTRFS_EXTENT_DATA_KEY;
6903 key.offset = dback->offset;
6904 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6906 fprintf(stderr, "Error looking up ref %d\n", ret);
6911 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
6912 ret = btrfs_next_leaf(root, path);
6914 fprintf(stderr, "Couldn't find our ref, next\n");
6918 leaf = path->nodes[0];
6919 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6920 if (key.objectid != dback->owner ||
6921 key.type != BTRFS_EXTENT_DATA_KEY) {
6922 fprintf(stderr, "Couldn't find our ref, search\n");
6925 fi = btrfs_item_ptr(leaf, path->slots[0],
6926 struct btrfs_file_extent_item);
6927 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6928 bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6930 if (bytenr == dback->disk_bytenr && bytes == dback->bytes)
6935 btrfs_release_path(path);
6937 trans = btrfs_start_transaction(root, 1);
6939 return PTR_ERR(trans);
6942 * Ok we have the key of the file extent we want to fix, now we can cow
6943 * down to the thing and fix it.
6945 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6947 fprintf(stderr, "Error cowing down to ref [%Lu, %u, %Lu]: %d\n",
6948 key.objectid, key.type, key.offset, ret);
6952 fprintf(stderr, "Well that's odd, we just found this key "
6953 "[%Lu, %u, %Lu]\n", key.objectid, key.type,
6958 leaf = path->nodes[0];
6959 fi = btrfs_item_ptr(leaf, path->slots[0],
6960 struct btrfs_file_extent_item);
6962 if (btrfs_file_extent_compression(leaf, fi) &&
6963 dback->disk_bytenr != entry->bytenr) {
6964 fprintf(stderr, "Ref doesn't match the record start and is "
6965 "compressed, please take a btrfs-image of this file "
6966 "system and send it to a btrfs developer so they can "
6967 "complete this functionality for bytenr %Lu\n",
6968 dback->disk_bytenr);
6973 if (dback->node.broken && dback->disk_bytenr != entry->bytenr) {
6974 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6975 } else if (dback->disk_bytenr > entry->bytenr) {
6976 u64 off_diff, offset;
6978 off_diff = dback->disk_bytenr - entry->bytenr;
6979 offset = btrfs_file_extent_offset(leaf, fi);
6980 if (dback->disk_bytenr + offset +
6981 btrfs_file_extent_num_bytes(leaf, fi) >
6982 entry->bytenr + entry->bytes) {
6983 fprintf(stderr, "Ref is past the entry end, please "
6984 "take a btrfs-image of this file system and "
6985 "send it to a btrfs developer, ref %Lu\n",
6986 dback->disk_bytenr);
6991 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6992 btrfs_set_file_extent_offset(leaf, fi, offset);
6993 } else if (dback->disk_bytenr < entry->bytenr) {
6996 offset = btrfs_file_extent_offset(leaf, fi);
6997 if (dback->disk_bytenr + offset < entry->bytenr) {
6998 fprintf(stderr, "Ref is before the entry start, please"
6999 " take a btrfs-image of this file system and "
7000 "send it to a btrfs developer, ref %Lu\n",
7001 dback->disk_bytenr);
7006 offset += dback->disk_bytenr;
7007 offset -= entry->bytenr;
7008 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
7009 btrfs_set_file_extent_offset(leaf, fi, offset);
7012 btrfs_set_file_extent_disk_num_bytes(leaf, fi, entry->bytes);
7015 * Chances are if disk_num_bytes were wrong then so is ram_bytes, but
7016 * only do this if we aren't using compression, otherwise it's a
7019 if (!btrfs_file_extent_compression(leaf, fi))
7020 btrfs_set_file_extent_ram_bytes(leaf, fi, entry->bytes);
7022 printf("ram bytes may be wrong?\n");
7023 btrfs_mark_buffer_dirty(leaf);
7025 err = btrfs_commit_transaction(trans, root);
7026 btrfs_release_path(path);
7027 return ret ? ret : err;
7030 static int verify_backrefs(struct btrfs_fs_info *info, struct btrfs_path *path,
7031 struct extent_record *rec)
7033 struct extent_backref *back;
7034 struct data_backref *dback;
7035 struct extent_entry *entry, *best = NULL;
7038 int broken_entries = 0;
7043 * Metadata is easy and the backrefs should always agree on bytenr and
7044 * size, if not we've got bigger issues.
7049 list_for_each_entry(back, &rec->backrefs, list) {
7050 if (back->full_backref || !back->is_data)
7053 dback = to_data_backref(back);
7056 * We only pay attention to backrefs that we found a real
7059 if (dback->found_ref == 0)
7063 * For now we only catch when the bytes don't match, not the
7064 * bytenr. We can easily do this at the same time, but I want
7065 * to have a fs image to test on before we just add repair
7066 * functionality willy-nilly so we know we won't screw up the
7070 entry = find_entry(&entries, dback->disk_bytenr,
7073 entry = malloc(sizeof(struct extent_entry));
7078 memset(entry, 0, sizeof(*entry));
7079 entry->bytenr = dback->disk_bytenr;
7080 entry->bytes = dback->bytes;
7081 list_add_tail(&entry->list, &entries);
7086 * If we only have on entry we may think the entries agree when
7087 * in reality they don't so we have to do some extra checking.
7089 if (dback->disk_bytenr != rec->start ||
7090 dback->bytes != rec->nr || back->broken)
7101 /* Yay all the backrefs agree, carry on good sir */
7102 if (nr_entries <= 1 && !mismatch)
7105 fprintf(stderr, "attempting to repair backref discrepency for bytenr "
7106 "%Lu\n", rec->start);
7109 * First we want to see if the backrefs can agree amongst themselves who
7110 * is right, so figure out which one of the entries has the highest
7113 best = find_most_right_entry(&entries);
7116 * Ok so we may have an even split between what the backrefs think, so
7117 * this is where we use the extent ref to see what it thinks.
7120 entry = find_entry(&entries, rec->start, rec->nr);
7121 if (!entry && (!broken_entries || !rec->found_rec)) {
7122 fprintf(stderr, "Backrefs don't agree with each other "
7123 "and extent record doesn't agree with anybody,"
7124 " so we can't fix bytenr %Lu bytes %Lu\n",
7125 rec->start, rec->nr);
7128 } else if (!entry) {
7130 * Ok our backrefs were broken, we'll assume this is the
7131 * correct value and add an entry for this range.
7133 entry = malloc(sizeof(struct extent_entry));
7138 memset(entry, 0, sizeof(*entry));
7139 entry->bytenr = rec->start;
7140 entry->bytes = rec->nr;
7141 list_add_tail(&entry->list, &entries);
7145 best = find_most_right_entry(&entries);
7147 fprintf(stderr, "Backrefs and extent record evenly "
7148 "split on who is right, this is going to "
7149 "require user input to fix bytenr %Lu bytes "
7150 "%Lu\n", rec->start, rec->nr);
7157 * I don't think this can happen currently as we'll abort() if we catch
7158 * this case higher up, but in case somebody removes that we still can't
7159 * deal with it properly here yet, so just bail out of that's the case.
7161 if (best->bytenr != rec->start) {
7162 fprintf(stderr, "Extent start and backref starts don't match, "
7163 "please use btrfs-image on this file system and send "
7164 "it to a btrfs developer so they can make fsck fix "
7165 "this particular case. bytenr is %Lu, bytes is %Lu\n",
7166 rec->start, rec->nr);
7172 * Ok great we all agreed on an extent record, let's go find the real
7173 * references and fix up the ones that don't match.
7175 list_for_each_entry(back, &rec->backrefs, list) {
7176 if (back->full_backref || !back->is_data)
7179 dback = to_data_backref(back);
7182 * Still ignoring backrefs that don't have a real ref attached
7185 if (dback->found_ref == 0)
7188 if (dback->bytes == best->bytes &&
7189 dback->disk_bytenr == best->bytenr)
7192 ret = repair_ref(info, path, dback, best);
7198 * Ok we messed with the actual refs, which means we need to drop our
7199 * entire cache and go back and rescan. I know this is a huge pain and
7200 * adds a lot of extra work, but it's the only way to be safe. Once all
7201 * the backrefs agree we may not need to do anything to the extent
7206 while (!list_empty(&entries)) {
7207 entry = list_entry(entries.next, struct extent_entry, list);
7208 list_del_init(&entry->list);
7214 static int process_duplicates(struct btrfs_root *root,
7215 struct cache_tree *extent_cache,
7216 struct extent_record *rec)
7218 struct extent_record *good, *tmp;
7219 struct cache_extent *cache;
7223 * If we found a extent record for this extent then return, or if we
7224 * have more than one duplicate we are likely going to need to delete
7227 if (rec->found_rec || rec->num_duplicates > 1)
7230 /* Shouldn't happen but just in case */
7231 BUG_ON(!rec->num_duplicates);
7234 * So this happens if we end up with a backref that doesn't match the
7235 * actual extent entry. So either the backref is bad or the extent
7236 * entry is bad. Either way we want to have the extent_record actually
7237 * reflect what we found in the extent_tree, so we need to take the
7238 * duplicate out and use that as the extent_record since the only way we
7239 * get a duplicate is if we find a real life BTRFS_EXTENT_ITEM_KEY.
7241 remove_cache_extent(extent_cache, &rec->cache);
7243 good = to_extent_record(rec->dups.next);
7244 list_del_init(&good->list);
7245 INIT_LIST_HEAD(&good->backrefs);
7246 INIT_LIST_HEAD(&good->dups);
7247 good->cache.start = good->start;
7248 good->cache.size = good->nr;
7249 good->content_checked = 0;
7250 good->owner_ref_checked = 0;
7251 good->num_duplicates = 0;
7252 good->refs = rec->refs;
7253 list_splice_init(&rec->backrefs, &good->backrefs);
7255 cache = lookup_cache_extent(extent_cache, good->start,
7259 tmp = container_of(cache, struct extent_record, cache);
7262 * If we find another overlapping extent and it's found_rec is
7263 * set then it's a duplicate and we need to try and delete
7266 if (tmp->found_rec || tmp->num_duplicates > 0) {
7267 if (list_empty(&good->list))
7268 list_add_tail(&good->list,
7269 &duplicate_extents);
7270 good->num_duplicates += tmp->num_duplicates + 1;
7271 list_splice_init(&tmp->dups, &good->dups);
7272 list_del_init(&tmp->list);
7273 list_add_tail(&tmp->list, &good->dups);
7274 remove_cache_extent(extent_cache, &tmp->cache);
7279 * Ok we have another non extent item backed extent rec, so lets
7280 * just add it to this extent and carry on like we did above.
7282 good->refs += tmp->refs;
7283 list_splice_init(&tmp->backrefs, &good->backrefs);
7284 remove_cache_extent(extent_cache, &tmp->cache);
7287 ret = insert_cache_extent(extent_cache, &good->cache);
7290 return good->num_duplicates ? 0 : 1;
7293 static int delete_duplicate_records(struct btrfs_root *root,
7294 struct extent_record *rec)
7296 struct btrfs_trans_handle *trans;
7297 LIST_HEAD(delete_list);
7298 struct btrfs_path *path;
7299 struct extent_record *tmp, *good, *n;
7302 struct btrfs_key key;
7304 path = btrfs_alloc_path();
7311 /* Find the record that covers all of the duplicates. */
7312 list_for_each_entry(tmp, &rec->dups, list) {
7313 if (good->start < tmp->start)
7315 if (good->nr > tmp->nr)
7318 if (tmp->start + tmp->nr < good->start + good->nr) {
7319 fprintf(stderr, "Ok we have overlapping extents that "
7320 "aren't completely covered by each other, this "
7321 "is going to require more careful thought. "
7322 "The extents are [%Lu-%Lu] and [%Lu-%Lu]\n",
7323 tmp->start, tmp->nr, good->start, good->nr);
7330 list_add_tail(&rec->list, &delete_list);
7332 list_for_each_entry_safe(tmp, n, &rec->dups, list) {
7335 list_move_tail(&tmp->list, &delete_list);
7338 root = root->fs_info->extent_root;
7339 trans = btrfs_start_transaction(root, 1);
7340 if (IS_ERR(trans)) {
7341 ret = PTR_ERR(trans);
7345 list_for_each_entry(tmp, &delete_list, list) {
7346 if (tmp->found_rec == 0)
7348 key.objectid = tmp->start;
7349 key.type = BTRFS_EXTENT_ITEM_KEY;
7350 key.offset = tmp->nr;
7352 /* Shouldn't happen but just in case */
7353 if (tmp->metadata) {
7354 fprintf(stderr, "Well this shouldn't happen, extent "
7355 "record overlaps but is metadata? "
7356 "[%Lu, %Lu]\n", tmp->start, tmp->nr);
7360 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7366 ret = btrfs_del_item(trans, root, path);
7369 btrfs_release_path(path);
7372 err = btrfs_commit_transaction(trans, root);
7376 while (!list_empty(&delete_list)) {
7377 tmp = to_extent_record(delete_list.next);
7378 list_del_init(&tmp->list);
7384 while (!list_empty(&rec->dups)) {
7385 tmp = to_extent_record(rec->dups.next);
7386 list_del_init(&tmp->list);
7390 btrfs_free_path(path);
7392 if (!ret && !nr_del)
7393 rec->num_duplicates = 0;
7395 return ret ? ret : nr_del;
7398 static int find_possible_backrefs(struct btrfs_fs_info *info,
7399 struct btrfs_path *path,
7400 struct cache_tree *extent_cache,
7401 struct extent_record *rec)
7403 struct btrfs_root *root;
7404 struct extent_backref *back;
7405 struct data_backref *dback;
7406 struct cache_extent *cache;
7407 struct btrfs_file_extent_item *fi;
7408 struct btrfs_key key;
7412 list_for_each_entry(back, &rec->backrefs, list) {
7413 /* Don't care about full backrefs (poor unloved backrefs) */
7414 if (back->full_backref || !back->is_data)
7417 dback = to_data_backref(back);
7419 /* We found this one, we don't need to do a lookup */
7420 if (dback->found_ref)
7423 key.objectid = dback->root;
7424 key.type = BTRFS_ROOT_ITEM_KEY;
7425 key.offset = (u64)-1;
7427 root = btrfs_read_fs_root(info, &key);
7429 /* No root, definitely a bad ref, skip */
7430 if (IS_ERR(root) && PTR_ERR(root) == -ENOENT)
7432 /* Other err, exit */
7434 return PTR_ERR(root);
7436 key.objectid = dback->owner;
7437 key.type = BTRFS_EXTENT_DATA_KEY;
7438 key.offset = dback->offset;
7439 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7441 btrfs_release_path(path);
7444 /* Didn't find it, we can carry on */
7449 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
7450 struct btrfs_file_extent_item);
7451 bytenr = btrfs_file_extent_disk_bytenr(path->nodes[0], fi);
7452 bytes = btrfs_file_extent_disk_num_bytes(path->nodes[0], fi);
7453 btrfs_release_path(path);
7454 cache = lookup_cache_extent(extent_cache, bytenr, 1);
7456 struct extent_record *tmp;
7457 tmp = container_of(cache, struct extent_record, cache);
7460 * If we found an extent record for the bytenr for this
7461 * particular backref then we can't add it to our
7462 * current extent record. We only want to add backrefs
7463 * that don't have a corresponding extent item in the
7464 * extent tree since they likely belong to this record
7465 * and we need to fix it if it doesn't match bytenrs.
7471 dback->found_ref += 1;
7472 dback->disk_bytenr = bytenr;
7473 dback->bytes = bytes;
7476 * Set this so the verify backref code knows not to trust the
7477 * values in this backref.
7486 * Record orphan data ref into corresponding root.
7488 * Return 0 if the extent item contains data ref and recorded.
7489 * Return 1 if the extent item contains no useful data ref
7490 * On that case, it may contains only shared_dataref or metadata backref
7491 * or the file extent exists(this should be handled by the extent bytenr
7493 * Return <0 if something goes wrong.
7495 static int record_orphan_data_extents(struct btrfs_fs_info *fs_info,
7496 struct extent_record *rec)
7498 struct btrfs_key key;
7499 struct btrfs_root *dest_root;
7500 struct extent_backref *back;
7501 struct data_backref *dback;
7502 struct orphan_data_extent *orphan;
7503 struct btrfs_path *path;
7504 int recorded_data_ref = 0;
7509 path = btrfs_alloc_path();
7512 list_for_each_entry(back, &rec->backrefs, list) {
7513 if (back->full_backref || !back->is_data ||
7514 !back->found_extent_tree)
7516 dback = to_data_backref(back);
7517 if (dback->found_ref)
7519 key.objectid = dback->root;
7520 key.type = BTRFS_ROOT_ITEM_KEY;
7521 key.offset = (u64)-1;
7523 dest_root = btrfs_read_fs_root(fs_info, &key);
7525 /* For non-exist root we just skip it */
7526 if (IS_ERR(dest_root) || !dest_root)
7529 key.objectid = dback->owner;
7530 key.type = BTRFS_EXTENT_DATA_KEY;
7531 key.offset = dback->offset;
7533 ret = btrfs_search_slot(NULL, dest_root, &key, path, 0, 0);
7535 * For ret < 0, it's OK since the fs-tree may be corrupted,
7536 * we need to record it for inode/file extent rebuild.
7537 * For ret > 0, we record it only for file extent rebuild.
7538 * For ret == 0, the file extent exists but only bytenr
7539 * mismatch, let the original bytenr fix routine to handle,
7545 orphan = malloc(sizeof(*orphan));
7550 INIT_LIST_HEAD(&orphan->list);
7551 orphan->root = dback->root;
7552 orphan->objectid = dback->owner;
7553 orphan->offset = dback->offset;
7554 orphan->disk_bytenr = rec->cache.start;
7555 orphan->disk_len = rec->cache.size;
7556 list_add(&dest_root->orphan_data_extents, &orphan->list);
7557 recorded_data_ref = 1;
7560 btrfs_free_path(path);
7562 return !recorded_data_ref;
7568 * when an incorrect extent item is found, this will delete
7569 * all of the existing entries for it and recreate them
7570 * based on what the tree scan found.
7572 static int fixup_extent_refs(struct btrfs_fs_info *info,
7573 struct cache_tree *extent_cache,
7574 struct extent_record *rec)
7576 struct btrfs_trans_handle *trans = NULL;
7578 struct btrfs_path *path;
7579 struct list_head *cur = rec->backrefs.next;
7580 struct cache_extent *cache;
7581 struct extent_backref *back;
7585 if (rec->flag_block_full_backref)
7586 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7588 path = btrfs_alloc_path();
7592 if (rec->refs != rec->extent_item_refs && !rec->metadata) {
7594 * Sometimes the backrefs themselves are so broken they don't
7595 * get attached to any meaningful rec, so first go back and
7596 * check any of our backrefs that we couldn't find and throw
7597 * them into the list if we find the backref so that
7598 * verify_backrefs can figure out what to do.
7600 ret = find_possible_backrefs(info, path, extent_cache, rec);
7605 /* step one, make sure all of the backrefs agree */
7606 ret = verify_backrefs(info, path, rec);
7610 trans = btrfs_start_transaction(info->extent_root, 1);
7611 if (IS_ERR(trans)) {
7612 ret = PTR_ERR(trans);
7616 /* step two, delete all the existing records */
7617 ret = delete_extent_records(trans, info->extent_root, path,
7618 rec->start, rec->max_size);
7623 /* was this block corrupt? If so, don't add references to it */
7624 cache = lookup_cache_extent(info->corrupt_blocks,
7625 rec->start, rec->max_size);
7631 /* step three, recreate all the refs we did find */
7632 while(cur != &rec->backrefs) {
7633 back = to_extent_backref(cur);
7637 * if we didn't find any references, don't create a
7640 if (!back->found_ref)
7643 rec->bad_full_backref = 0;
7644 ret = record_extent(trans, info, path, rec, back, allocated, flags);
7652 int err = btrfs_commit_transaction(trans, info->extent_root);
7657 btrfs_free_path(path);
7661 static int fixup_extent_flags(struct btrfs_fs_info *fs_info,
7662 struct extent_record *rec)
7664 struct btrfs_trans_handle *trans;
7665 struct btrfs_root *root = fs_info->extent_root;
7666 struct btrfs_path *path;
7667 struct btrfs_extent_item *ei;
7668 struct btrfs_key key;
7672 key.objectid = rec->start;
7673 if (rec->metadata) {
7674 key.type = BTRFS_METADATA_ITEM_KEY;
7675 key.offset = rec->info_level;
7677 key.type = BTRFS_EXTENT_ITEM_KEY;
7678 key.offset = rec->max_size;
7681 path = btrfs_alloc_path();
7685 trans = btrfs_start_transaction(root, 0);
7686 if (IS_ERR(trans)) {
7687 btrfs_free_path(path);
7688 return PTR_ERR(trans);
7691 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
7693 btrfs_free_path(path);
7694 btrfs_commit_transaction(trans, root);
7697 fprintf(stderr, "Didn't find extent for %llu\n",
7698 (unsigned long long)rec->start);
7699 btrfs_free_path(path);
7700 btrfs_commit_transaction(trans, root);
7704 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
7705 struct btrfs_extent_item);
7706 flags = btrfs_extent_flags(path->nodes[0], ei);
7707 if (rec->flag_block_full_backref) {
7708 fprintf(stderr, "setting full backref on %llu\n",
7709 (unsigned long long)key.objectid);
7710 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7712 fprintf(stderr, "clearing full backref on %llu\n",
7713 (unsigned long long)key.objectid);
7714 flags &= ~BTRFS_BLOCK_FLAG_FULL_BACKREF;
7716 btrfs_set_extent_flags(path->nodes[0], ei, flags);
7717 btrfs_mark_buffer_dirty(path->nodes[0]);
7718 btrfs_free_path(path);
7719 return btrfs_commit_transaction(trans, root);
7722 /* right now we only prune from the extent allocation tree */
7723 static int prune_one_block(struct btrfs_trans_handle *trans,
7724 struct btrfs_fs_info *info,
7725 struct btrfs_corrupt_block *corrupt)
7728 struct btrfs_path path;
7729 struct extent_buffer *eb;
7733 int level = corrupt->level + 1;
7735 btrfs_init_path(&path);
7737 /* we want to stop at the parent to our busted block */
7738 path.lowest_level = level;
7740 ret = btrfs_search_slot(trans, info->extent_root,
7741 &corrupt->key, &path, -1, 1);
7746 eb = path.nodes[level];
7753 * hopefully the search gave us the block we want to prune,
7754 * lets try that first
7756 slot = path.slots[level];
7757 found = btrfs_node_blockptr(eb, slot);
7758 if (found == corrupt->cache.start)
7761 nritems = btrfs_header_nritems(eb);
7763 /* the search failed, lets scan this node and hope we find it */
7764 for (slot = 0; slot < nritems; slot++) {
7765 found = btrfs_node_blockptr(eb, slot);
7766 if (found == corrupt->cache.start)
7770 * we couldn't find the bad block. TODO, search all the nodes for pointers
7773 if (eb == info->extent_root->node) {
7778 btrfs_release_path(&path);
7783 printk("deleting pointer to block %Lu\n", corrupt->cache.start);
7784 ret = btrfs_del_ptr(trans, info->extent_root, &path, level, slot);
7787 btrfs_release_path(&path);
7791 static int prune_corrupt_blocks(struct btrfs_fs_info *info)
7793 struct btrfs_trans_handle *trans = NULL;
7794 struct cache_extent *cache;
7795 struct btrfs_corrupt_block *corrupt;
7798 cache = search_cache_extent(info->corrupt_blocks, 0);
7802 trans = btrfs_start_transaction(info->extent_root, 1);
7804 return PTR_ERR(trans);
7806 corrupt = container_of(cache, struct btrfs_corrupt_block, cache);
7807 prune_one_block(trans, info, corrupt);
7808 remove_cache_extent(info->corrupt_blocks, cache);
7811 return btrfs_commit_transaction(trans, info->extent_root);
7815 static void reset_cached_block_groups(struct btrfs_fs_info *fs_info)
7817 struct btrfs_block_group_cache *cache;
7822 ret = find_first_extent_bit(&fs_info->free_space_cache, 0,
7823 &start, &end, EXTENT_DIRTY);
7826 clear_extent_dirty(&fs_info->free_space_cache, start, end,
7832 cache = btrfs_lookup_first_block_group(fs_info, start);
7837 start = cache->key.objectid + cache->key.offset;
7841 static int check_extent_refs(struct btrfs_root *root,
7842 struct cache_tree *extent_cache)
7844 struct extent_record *rec;
7845 struct cache_extent *cache;
7854 * if we're doing a repair, we have to make sure
7855 * we don't allocate from the problem extents.
7856 * In the worst case, this will be all the
7859 cache = search_cache_extent(extent_cache, 0);
7861 rec = container_of(cache, struct extent_record, cache);
7862 set_extent_dirty(root->fs_info->excluded_extents,
7864 rec->start + rec->max_size - 1,
7866 cache = next_cache_extent(cache);
7869 /* pin down all the corrupted blocks too */
7870 cache = search_cache_extent(root->fs_info->corrupt_blocks, 0);
7872 set_extent_dirty(root->fs_info->excluded_extents,
7874 cache->start + cache->size - 1,
7876 cache = next_cache_extent(cache);
7878 prune_corrupt_blocks(root->fs_info);
7879 reset_cached_block_groups(root->fs_info);
7882 reset_cached_block_groups(root->fs_info);
7885 * We need to delete any duplicate entries we find first otherwise we
7886 * could mess up the extent tree when we have backrefs that actually
7887 * belong to a different extent item and not the weird duplicate one.
7889 while (repair && !list_empty(&duplicate_extents)) {
7890 rec = to_extent_record(duplicate_extents.next);
7891 list_del_init(&rec->list);
7893 /* Sometimes we can find a backref before we find an actual
7894 * extent, so we need to process it a little bit to see if there
7895 * truly are multiple EXTENT_ITEM_KEY's for the same range, or
7896 * if this is a backref screwup. If we need to delete stuff
7897 * process_duplicates() will return 0, otherwise it will return
7900 if (process_duplicates(root, extent_cache, rec))
7902 ret = delete_duplicate_records(root, rec);
7906 * delete_duplicate_records will return the number of entries
7907 * deleted, so if it's greater than 0 then we know we actually
7908 * did something and we need to remove.
7922 cache = search_cache_extent(extent_cache, 0);
7925 rec = container_of(cache, struct extent_record, cache);
7926 if (rec->num_duplicates) {
7927 fprintf(stderr, "extent item %llu has multiple extent "
7928 "items\n", (unsigned long long)rec->start);
7933 if (rec->refs != rec->extent_item_refs) {
7934 fprintf(stderr, "ref mismatch on [%llu %llu] ",
7935 (unsigned long long)rec->start,
7936 (unsigned long long)rec->nr);
7937 fprintf(stderr, "extent item %llu, found %llu\n",
7938 (unsigned long long)rec->extent_item_refs,
7939 (unsigned long long)rec->refs);
7940 ret = record_orphan_data_extents(root->fs_info, rec);
7947 * we can't use the extent to repair file
7948 * extent, let the fallback method handle it.
7950 if (!fixed && repair) {
7951 ret = fixup_extent_refs(
7962 if (all_backpointers_checked(rec, 1)) {
7963 fprintf(stderr, "backpointer mismatch on [%llu %llu]\n",
7964 (unsigned long long)rec->start,
7965 (unsigned long long)rec->nr);
7967 if (!fixed && !recorded && repair) {
7968 ret = fixup_extent_refs(root->fs_info,
7977 if (!rec->owner_ref_checked) {
7978 fprintf(stderr, "owner ref check failed [%llu %llu]\n",
7979 (unsigned long long)rec->start,
7980 (unsigned long long)rec->nr);
7981 if (!fixed && !recorded && repair) {
7982 ret = fixup_extent_refs(root->fs_info,
7991 if (rec->bad_full_backref) {
7992 fprintf(stderr, "bad full backref, on [%llu]\n",
7993 (unsigned long long)rec->start);
7995 ret = fixup_extent_flags(root->fs_info, rec);
8004 * Although it's not a extent ref's problem, we reuse this
8005 * routine for error reporting.
8006 * No repair function yet.
8008 if (rec->crossing_stripes) {
8010 "bad metadata [%llu, %llu) crossing stripe boundary\n",
8011 rec->start, rec->start + rec->max_size);
8016 if (rec->wrong_chunk_type) {
8018 "bad extent [%llu, %llu), type mismatch with chunk\n",
8019 rec->start, rec->start + rec->max_size);
8024 remove_cache_extent(extent_cache, cache);
8025 free_all_extent_backrefs(rec);
8026 if (!init_extent_tree && repair && (!cur_err || fixed))
8027 clear_extent_dirty(root->fs_info->excluded_extents,
8029 rec->start + rec->max_size - 1,
8035 if (ret && ret != -EAGAIN) {
8036 fprintf(stderr, "failed to repair damaged filesystem, aborting\n");
8039 struct btrfs_trans_handle *trans;
8041 root = root->fs_info->extent_root;
8042 trans = btrfs_start_transaction(root, 1);
8043 if (IS_ERR(trans)) {
8044 ret = PTR_ERR(trans);
8048 btrfs_fix_block_accounting(trans, root);
8049 ret = btrfs_commit_transaction(trans, root);
8054 fprintf(stderr, "repaired damaged extent references\n");
8060 u64 calc_stripe_length(u64 type, u64 length, int num_stripes)
8064 if (type & BTRFS_BLOCK_GROUP_RAID0) {
8065 stripe_size = length;
8066 stripe_size /= num_stripes;
8067 } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
8068 stripe_size = length * 2;
8069 stripe_size /= num_stripes;
8070 } else if (type & BTRFS_BLOCK_GROUP_RAID5) {
8071 stripe_size = length;
8072 stripe_size /= (num_stripes - 1);
8073 } else if (type & BTRFS_BLOCK_GROUP_RAID6) {
8074 stripe_size = length;
8075 stripe_size /= (num_stripes - 2);
8077 stripe_size = length;
8083 * Check the chunk with its block group/dev list ref:
8084 * Return 0 if all refs seems valid.
8085 * Return 1 if part of refs seems valid, need later check for rebuild ref
8086 * like missing block group and needs to search extent tree to rebuild them.
8087 * Return -1 if essential refs are missing and unable to rebuild.
8089 static int check_chunk_refs(struct chunk_record *chunk_rec,
8090 struct block_group_tree *block_group_cache,
8091 struct device_extent_tree *dev_extent_cache,
8094 struct cache_extent *block_group_item;
8095 struct block_group_record *block_group_rec;
8096 struct cache_extent *dev_extent_item;
8097 struct device_extent_record *dev_extent_rec;
8101 int metadump_v2 = 0;
8105 block_group_item = lookup_cache_extent(&block_group_cache->tree,
8108 if (block_group_item) {
8109 block_group_rec = container_of(block_group_item,
8110 struct block_group_record,
8112 if (chunk_rec->length != block_group_rec->offset ||
8113 chunk_rec->offset != block_group_rec->objectid ||
8115 chunk_rec->type_flags != block_group_rec->flags)) {
8118 "Chunk[%llu, %u, %llu]: length(%llu), offset(%llu), type(%llu) mismatch with block group[%llu, %u, %llu]: offset(%llu), objectid(%llu), flags(%llu)\n",
8119 chunk_rec->objectid,
8124 chunk_rec->type_flags,
8125 block_group_rec->objectid,
8126 block_group_rec->type,
8127 block_group_rec->offset,
8128 block_group_rec->offset,
8129 block_group_rec->objectid,
8130 block_group_rec->flags);
8133 list_del_init(&block_group_rec->list);
8134 chunk_rec->bg_rec = block_group_rec;
8139 "Chunk[%llu, %u, %llu]: length(%llu), offset(%llu), type(%llu) is not found in block group\n",
8140 chunk_rec->objectid,
8145 chunk_rec->type_flags);
8152 length = calc_stripe_length(chunk_rec->type_flags, chunk_rec->length,
8153 chunk_rec->num_stripes);
8154 for (i = 0; i < chunk_rec->num_stripes; ++i) {
8155 devid = chunk_rec->stripes[i].devid;
8156 offset = chunk_rec->stripes[i].offset;
8157 dev_extent_item = lookup_cache_extent2(&dev_extent_cache->tree,
8158 devid, offset, length);
8159 if (dev_extent_item) {
8160 dev_extent_rec = container_of(dev_extent_item,
8161 struct device_extent_record,
8163 if (dev_extent_rec->objectid != devid ||
8164 dev_extent_rec->offset != offset ||
8165 dev_extent_rec->chunk_offset != chunk_rec->offset ||
8166 dev_extent_rec->length != length) {
8169 "Chunk[%llu, %u, %llu] stripe[%llu, %llu] dismatch dev extent[%llu, %llu, %llu]\n",
8170 chunk_rec->objectid,
8173 chunk_rec->stripes[i].devid,
8174 chunk_rec->stripes[i].offset,
8175 dev_extent_rec->objectid,
8176 dev_extent_rec->offset,
8177 dev_extent_rec->length);
8180 list_move(&dev_extent_rec->chunk_list,
8181 &chunk_rec->dextents);
8186 "Chunk[%llu, %u, %llu] stripe[%llu, %llu] is not found in dev extent\n",
8187 chunk_rec->objectid,
8190 chunk_rec->stripes[i].devid,
8191 chunk_rec->stripes[i].offset);
8198 /* check btrfs_chunk -> btrfs_dev_extent / btrfs_block_group_item */
8199 int check_chunks(struct cache_tree *chunk_cache,
8200 struct block_group_tree *block_group_cache,
8201 struct device_extent_tree *dev_extent_cache,
8202 struct list_head *good, struct list_head *bad,
8203 struct list_head *rebuild, int silent)
8205 struct cache_extent *chunk_item;
8206 struct chunk_record *chunk_rec;
8207 struct block_group_record *bg_rec;
8208 struct device_extent_record *dext_rec;
8212 chunk_item = first_cache_extent(chunk_cache);
8213 while (chunk_item) {
8214 chunk_rec = container_of(chunk_item, struct chunk_record,
8216 err = check_chunk_refs(chunk_rec, block_group_cache,
8217 dev_extent_cache, silent);
8220 if (err == 0 && good)
8221 list_add_tail(&chunk_rec->list, good);
8222 if (err > 0 && rebuild)
8223 list_add_tail(&chunk_rec->list, rebuild);
8225 list_add_tail(&chunk_rec->list, bad);
8226 chunk_item = next_cache_extent(chunk_item);
8229 list_for_each_entry(bg_rec, &block_group_cache->block_groups, list) {
8232 "Block group[%llu, %llu] (flags = %llu) didn't find the relative chunk.\n",
8240 list_for_each_entry(dext_rec, &dev_extent_cache->no_chunk_orphans,
8244 "Device extent[%llu, %llu, %llu] didn't find the relative chunk.\n",
8255 static int check_device_used(struct device_record *dev_rec,
8256 struct device_extent_tree *dext_cache)
8258 struct cache_extent *cache;
8259 struct device_extent_record *dev_extent_rec;
8262 cache = search_cache_extent2(&dext_cache->tree, dev_rec->devid, 0);
8264 dev_extent_rec = container_of(cache,
8265 struct device_extent_record,
8267 if (dev_extent_rec->objectid != dev_rec->devid)
8270 list_del_init(&dev_extent_rec->device_list);
8271 total_byte += dev_extent_rec->length;
8272 cache = next_cache_extent(cache);
8275 if (total_byte != dev_rec->byte_used) {
8277 "Dev extent's total-byte(%llu) is not equal to byte-used(%llu) in dev[%llu, %u, %llu]\n",
8278 total_byte, dev_rec->byte_used, dev_rec->objectid,
8279 dev_rec->type, dev_rec->offset);
8286 /* check btrfs_dev_item -> btrfs_dev_extent */
8287 static int check_devices(struct rb_root *dev_cache,
8288 struct device_extent_tree *dev_extent_cache)
8290 struct rb_node *dev_node;
8291 struct device_record *dev_rec;
8292 struct device_extent_record *dext_rec;
8296 dev_node = rb_first(dev_cache);
8298 dev_rec = container_of(dev_node, struct device_record, node);
8299 err = check_device_used(dev_rec, dev_extent_cache);
8303 dev_node = rb_next(dev_node);
8305 list_for_each_entry(dext_rec, &dev_extent_cache->no_device_orphans,
8308 "Device extent[%llu, %llu, %llu] didn't find its device.\n",
8309 dext_rec->objectid, dext_rec->offset, dext_rec->length);
8316 static int add_root_item_to_list(struct list_head *head,
8317 u64 objectid, u64 bytenr, u64 last_snapshot,
8318 u8 level, u8 drop_level,
8319 int level_size, struct btrfs_key *drop_key)
8322 struct root_item_record *ri_rec;
8323 ri_rec = malloc(sizeof(*ri_rec));
8326 ri_rec->bytenr = bytenr;
8327 ri_rec->objectid = objectid;
8328 ri_rec->level = level;
8329 ri_rec->level_size = level_size;
8330 ri_rec->drop_level = drop_level;
8331 ri_rec->last_snapshot = last_snapshot;
8333 memcpy(&ri_rec->drop_key, drop_key, sizeof(*drop_key));
8334 list_add_tail(&ri_rec->list, head);
8339 static void free_root_item_list(struct list_head *list)
8341 struct root_item_record *ri_rec;
8343 while (!list_empty(list)) {
8344 ri_rec = list_first_entry(list, struct root_item_record,
8346 list_del_init(&ri_rec->list);
8351 static int deal_root_from_list(struct list_head *list,
8352 struct btrfs_root *root,
8353 struct block_info *bits,
8355 struct cache_tree *pending,
8356 struct cache_tree *seen,
8357 struct cache_tree *reada,
8358 struct cache_tree *nodes,
8359 struct cache_tree *extent_cache,
8360 struct cache_tree *chunk_cache,
8361 struct rb_root *dev_cache,
8362 struct block_group_tree *block_group_cache,
8363 struct device_extent_tree *dev_extent_cache)
8368 while (!list_empty(list)) {
8369 struct root_item_record *rec;
8370 struct extent_buffer *buf;
8371 rec = list_entry(list->next,
8372 struct root_item_record, list);
8374 buf = read_tree_block(root->fs_info->tree_root,
8375 rec->bytenr, rec->level_size, 0);
8376 if (!extent_buffer_uptodate(buf)) {
8377 free_extent_buffer(buf);
8381 ret = add_root_to_pending(buf, extent_cache, pending,
8382 seen, nodes, rec->objectid);
8386 * To rebuild extent tree, we need deal with snapshot
8387 * one by one, otherwise we deal with node firstly which
8388 * can maximize readahead.
8391 ret = run_next_block(root, bits, bits_nr, &last,
8392 pending, seen, reada, nodes,
8393 extent_cache, chunk_cache,
8394 dev_cache, block_group_cache,
8395 dev_extent_cache, rec);
8399 free_extent_buffer(buf);
8400 list_del(&rec->list);
8406 ret = run_next_block(root, bits, bits_nr, &last, pending, seen,
8407 reada, nodes, extent_cache, chunk_cache,
8408 dev_cache, block_group_cache,
8409 dev_extent_cache, NULL);
8419 static int check_chunks_and_extents(struct btrfs_root *root)
8421 struct rb_root dev_cache;
8422 struct cache_tree chunk_cache;
8423 struct block_group_tree block_group_cache;
8424 struct device_extent_tree dev_extent_cache;
8425 struct cache_tree extent_cache;
8426 struct cache_tree seen;
8427 struct cache_tree pending;
8428 struct cache_tree reada;
8429 struct cache_tree nodes;
8430 struct extent_io_tree excluded_extents;
8431 struct cache_tree corrupt_blocks;
8432 struct btrfs_path path;
8433 struct btrfs_key key;
8434 struct btrfs_key found_key;
8436 struct block_info *bits;
8438 struct extent_buffer *leaf;
8440 struct btrfs_root_item ri;
8441 struct list_head dropping_trees;
8442 struct list_head normal_trees;
8443 struct btrfs_root *root1;
8448 dev_cache = RB_ROOT;
8449 cache_tree_init(&chunk_cache);
8450 block_group_tree_init(&block_group_cache);
8451 device_extent_tree_init(&dev_extent_cache);
8453 cache_tree_init(&extent_cache);
8454 cache_tree_init(&seen);
8455 cache_tree_init(&pending);
8456 cache_tree_init(&nodes);
8457 cache_tree_init(&reada);
8458 cache_tree_init(&corrupt_blocks);
8459 extent_io_tree_init(&excluded_extents);
8460 INIT_LIST_HEAD(&dropping_trees);
8461 INIT_LIST_HEAD(&normal_trees);
8464 root->fs_info->excluded_extents = &excluded_extents;
8465 root->fs_info->fsck_extent_cache = &extent_cache;
8466 root->fs_info->free_extent_hook = free_extent_hook;
8467 root->fs_info->corrupt_blocks = &corrupt_blocks;
8471 bits = malloc(bits_nr * sizeof(struct block_info));
8477 if (ctx.progress_enabled) {
8478 ctx.tp = TASK_EXTENTS;
8479 task_start(ctx.info);
8483 root1 = root->fs_info->tree_root;
8484 level = btrfs_header_level(root1->node);
8485 ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
8486 root1->node->start, 0, level, 0,
8487 root1->nodesize, NULL);
8490 root1 = root->fs_info->chunk_root;
8491 level = btrfs_header_level(root1->node);
8492 ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
8493 root1->node->start, 0, level, 0,
8494 root1->nodesize, NULL);
8497 btrfs_init_path(&path);
8500 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
8501 ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
8506 leaf = path.nodes[0];
8507 slot = path.slots[0];
8508 if (slot >= btrfs_header_nritems(path.nodes[0])) {
8509 ret = btrfs_next_leaf(root, &path);
8512 leaf = path.nodes[0];
8513 slot = path.slots[0];
8515 btrfs_item_key_to_cpu(leaf, &found_key, path.slots[0]);
8516 if (btrfs_key_type(&found_key) == BTRFS_ROOT_ITEM_KEY) {
8517 unsigned long offset;
8520 offset = btrfs_item_ptr_offset(leaf, path.slots[0]);
8521 read_extent_buffer(leaf, &ri, offset, sizeof(ri));
8522 last_snapshot = btrfs_root_last_snapshot(&ri);
8523 if (btrfs_disk_key_objectid(&ri.drop_progress) == 0) {
8524 level = btrfs_root_level(&ri);
8525 level_size = root->nodesize;
8526 ret = add_root_item_to_list(&normal_trees,
8528 btrfs_root_bytenr(&ri),
8529 last_snapshot, level,
8530 0, level_size, NULL);
8534 level = btrfs_root_level(&ri);
8535 level_size = root->nodesize;
8536 objectid = found_key.objectid;
8537 btrfs_disk_key_to_cpu(&found_key,
8539 ret = add_root_item_to_list(&dropping_trees,
8541 btrfs_root_bytenr(&ri),
8542 last_snapshot, level,
8544 level_size, &found_key);
8551 btrfs_release_path(&path);
8554 * check_block can return -EAGAIN if it fixes something, please keep
8555 * this in mind when dealing with return values from these functions, if
8556 * we get -EAGAIN we want to fall through and restart the loop.
8558 ret = deal_root_from_list(&normal_trees, root, bits, bits_nr, &pending,
8559 &seen, &reada, &nodes, &extent_cache,
8560 &chunk_cache, &dev_cache, &block_group_cache,
8567 ret = deal_root_from_list(&dropping_trees, root, bits, bits_nr,
8568 &pending, &seen, &reada, &nodes,
8569 &extent_cache, &chunk_cache, &dev_cache,
8570 &block_group_cache, &dev_extent_cache);
8577 ret = check_chunks(&chunk_cache, &block_group_cache,
8578 &dev_extent_cache, NULL, NULL, NULL, 0);
8585 ret = check_extent_refs(root, &extent_cache);
8592 ret = check_devices(&dev_cache, &dev_extent_cache);
8597 task_stop(ctx.info);
8599 free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
8600 extent_io_tree_cleanup(&excluded_extents);
8601 root->fs_info->fsck_extent_cache = NULL;
8602 root->fs_info->free_extent_hook = NULL;
8603 root->fs_info->corrupt_blocks = NULL;
8604 root->fs_info->excluded_extents = NULL;
8607 free_chunk_cache_tree(&chunk_cache);
8608 free_device_cache_tree(&dev_cache);
8609 free_block_group_tree(&block_group_cache);
8610 free_device_extent_tree(&dev_extent_cache);
8611 free_extent_cache_tree(&seen);
8612 free_extent_cache_tree(&pending);
8613 free_extent_cache_tree(&reada);
8614 free_extent_cache_tree(&nodes);
8617 free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
8618 free_extent_cache_tree(&seen);
8619 free_extent_cache_tree(&pending);
8620 free_extent_cache_tree(&reada);
8621 free_extent_cache_tree(&nodes);
8622 free_chunk_cache_tree(&chunk_cache);
8623 free_block_group_tree(&block_group_cache);
8624 free_device_cache_tree(&dev_cache);
8625 free_device_extent_tree(&dev_extent_cache);
8626 free_extent_record_cache(root->fs_info, &extent_cache);
8627 free_root_item_list(&normal_trees);
8628 free_root_item_list(&dropping_trees);
8629 extent_io_tree_cleanup(&excluded_extents);
8634 * Check backrefs of a tree block given by @bytenr or @eb.
8636 * @root: the root containing the @bytenr or @eb
8637 * @eb: tree block extent buffer, can be NULL
8638 * @bytenr: bytenr of the tree block to search
8639 * @level: tree level of the tree block
8640 * @owner: owner of the tree block
8642 * Return >0 for any error found and output error message
8643 * Return 0 for no error found
8645 static int check_tree_block_ref(struct btrfs_root *root,
8646 struct extent_buffer *eb, u64 bytenr,
8647 int level, u64 owner)
8649 struct btrfs_key key;
8650 struct btrfs_root *extent_root = root->fs_info->extent_root;
8651 struct btrfs_path path;
8652 struct btrfs_extent_item *ei;
8653 struct btrfs_extent_inline_ref *iref;
8654 struct extent_buffer *leaf;
8660 u32 nodesize = root->nodesize;
8667 btrfs_init_path(&path);
8668 key.objectid = bytenr;
8669 if (btrfs_fs_incompat(root->fs_info,
8670 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA))
8671 key.type = BTRFS_METADATA_ITEM_KEY;
8673 key.type = BTRFS_EXTENT_ITEM_KEY;
8674 key.offset = (u64)-1;
8676 /* Search for the backref in extent tree */
8677 ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
8679 err |= BACKREF_MISSING;
8682 ret = btrfs_previous_extent_item(extent_root, &path, bytenr);
8684 err |= BACKREF_MISSING;
8688 leaf = path.nodes[0];
8689 slot = path.slots[0];
8690 btrfs_item_key_to_cpu(leaf, &key, slot);
8692 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
8694 if (key.type == BTRFS_METADATA_ITEM_KEY) {
8695 skinny_level = (int)key.offset;
8696 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
8698 struct btrfs_tree_block_info *info;
8700 info = (struct btrfs_tree_block_info *)(ei + 1);
8701 skinny_level = btrfs_tree_block_level(leaf, info);
8702 iref = (struct btrfs_extent_inline_ref *)(info + 1);
8709 if (!(btrfs_extent_flags(leaf, ei) &
8710 BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
8712 "extent[%llu %u] backref type mismatch, missing bit: %llx",
8713 key.objectid, nodesize,
8714 BTRFS_EXTENT_FLAG_TREE_BLOCK);
8715 err = BACKREF_MISMATCH;
8717 header_gen = btrfs_header_generation(eb);
8718 extent_gen = btrfs_extent_generation(leaf, ei);
8719 if (header_gen != extent_gen) {
8721 "extent[%llu %u] backref generation mismatch, wanted: %llu, have: %llu",
8722 key.objectid, nodesize, header_gen,
8724 err = BACKREF_MISMATCH;
8726 if (level != skinny_level) {
8728 "extent[%llu %u] level mismatch, wanted: %u, have: %u",
8729 key.objectid, nodesize, level, skinny_level);
8730 err = BACKREF_MISMATCH;
8732 if (!is_fstree(owner) && btrfs_extent_refs(leaf, ei) != 1) {
8734 "extent[%llu %u] is referred by other roots than %llu",
8735 key.objectid, nodesize, root->objectid);
8736 err = BACKREF_MISMATCH;
8741 * Iterate the extent/metadata item to find the exact backref
8743 item_size = btrfs_item_size_nr(leaf, slot);
8744 ptr = (unsigned long)iref;
8745 end = (unsigned long)ei + item_size;
8747 iref = (struct btrfs_extent_inline_ref *)ptr;
8748 type = btrfs_extent_inline_ref_type(leaf, iref);
8749 offset = btrfs_extent_inline_ref_offset(leaf, iref);
8751 if (type == BTRFS_TREE_BLOCK_REF_KEY &&
8752 (offset == root->objectid || offset == owner)) {
8754 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
8755 /* Check if the backref points to valid referencer */
8756 found_ref = !check_tree_block_ref(root, NULL, offset,
8762 ptr += btrfs_extent_inline_ref_size(type);
8766 * Inlined extent item doesn't have what we need, check
8767 * TREE_BLOCK_REF_KEY
8770 btrfs_release_path(&path);
8771 key.objectid = bytenr;
8772 key.type = BTRFS_TREE_BLOCK_REF_KEY;
8773 key.offset = root->objectid;
8775 ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
8780 err |= BACKREF_MISSING;
8782 btrfs_release_path(&path);
8783 if (eb && (err & BACKREF_MISSING))
8784 error("extent[%llu %u] backref lost (owner: %llu, level: %u)",
8785 bytenr, nodesize, owner, level);
8790 * Check EXTENT_DATA item, mainly for its dbackref in extent tree
8792 * Return >0 any error found and output error message
8793 * Return 0 for no error found
8795 static int check_extent_data_item(struct btrfs_root *root,
8796 struct extent_buffer *eb, int slot)
8798 struct btrfs_file_extent_item *fi;
8799 struct btrfs_path path;
8800 struct btrfs_root *extent_root = root->fs_info->extent_root;
8801 struct btrfs_key fi_key;
8802 struct btrfs_key dbref_key;
8803 struct extent_buffer *leaf;
8804 struct btrfs_extent_item *ei;
8805 struct btrfs_extent_inline_ref *iref;
8806 struct btrfs_extent_data_ref *dref;
8808 u64 file_extent_gen;
8811 u64 extent_num_bytes;
8819 int found_dbackref = 0;
8823 btrfs_item_key_to_cpu(eb, &fi_key, slot);
8824 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
8825 file_extent_gen = btrfs_file_extent_generation(eb, fi);
8827 /* Nothing to check for hole and inline data extents */
8828 if (btrfs_file_extent_type(eb, fi) == BTRFS_FILE_EXTENT_INLINE ||
8829 btrfs_file_extent_disk_bytenr(eb, fi) == 0)
8832 disk_bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8833 disk_num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8834 extent_num_bytes = btrfs_file_extent_num_bytes(eb, fi);
8836 /* Check unaligned disk_num_bytes and num_bytes */
8837 if (!IS_ALIGNED(disk_num_bytes, root->sectorsize)) {
8839 "file extent [%llu, %llu] has unaligned disk num bytes: %llu, should be aligned to %u",
8840 fi_key.objectid, fi_key.offset, disk_num_bytes,
8842 err |= BYTES_UNALIGNED;
8844 data_bytes_allocated += disk_num_bytes;
8846 if (!IS_ALIGNED(extent_num_bytes, root->sectorsize)) {
8848 "file extent [%llu, %llu] has unaligned num bytes: %llu, should be aligned to %u",
8849 fi_key.objectid, fi_key.offset, extent_num_bytes,
8851 err |= BYTES_UNALIGNED;
8853 data_bytes_referenced += extent_num_bytes;
8855 owner = btrfs_header_owner(eb);
8857 /* Check the extent item of the file extent in extent tree */
8858 btrfs_init_path(&path);
8859 dbref_key.objectid = btrfs_file_extent_disk_bytenr(eb, fi);
8860 dbref_key.type = BTRFS_EXTENT_ITEM_KEY;
8861 dbref_key.offset = btrfs_file_extent_disk_num_bytes(eb, fi);
8863 ret = btrfs_search_slot(NULL, extent_root, &dbref_key, &path, 0, 0);
8865 err |= BACKREF_MISSING;
8869 leaf = path.nodes[0];
8870 slot = path.slots[0];
8871 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
8873 extent_flags = btrfs_extent_flags(leaf, ei);
8874 extent_gen = btrfs_extent_generation(leaf, ei);
8876 if (!(extent_flags & BTRFS_EXTENT_FLAG_DATA)) {
8878 "extent[%llu %llu] backref type mismatch, wanted bit: %llx",
8879 disk_bytenr, disk_num_bytes,
8880 BTRFS_EXTENT_FLAG_DATA);
8881 err |= BACKREF_MISMATCH;
8884 if (file_extent_gen < extent_gen) {
8886 "extent[%llu %llu] backref generation mismatch, wanted: <=%llu, have: %llu",
8887 disk_bytenr, disk_num_bytes, file_extent_gen,
8889 err |= BACKREF_MISMATCH;
8892 /* Check data backref inside that extent item */
8893 item_size = btrfs_item_size_nr(leaf, path.slots[0]);
8894 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
8895 ptr = (unsigned long)iref;
8896 end = (unsigned long)ei + item_size;
8898 iref = (struct btrfs_extent_inline_ref *)ptr;
8899 type = btrfs_extent_inline_ref_type(leaf, iref);
8900 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
8902 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
8903 ref_root = btrfs_extent_data_ref_root(leaf, dref);
8904 if (ref_root == owner || ref_root == root->objectid)
8906 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
8907 found_dbackref = !check_tree_block_ref(root, NULL,
8908 btrfs_extent_inline_ref_offset(leaf, iref),
8914 ptr += btrfs_extent_inline_ref_size(type);
8917 /* Didn't found inlined data backref, try EXTENT_DATA_REF_KEY */
8918 if (!found_dbackref) {
8919 btrfs_release_path(&path);
8921 btrfs_init_path(&path);
8922 dbref_key.objectid = btrfs_file_extent_disk_bytenr(eb, fi);
8923 dbref_key.type = BTRFS_EXTENT_DATA_REF_KEY;
8924 dbref_key.offset = hash_extent_data_ref(root->objectid,
8925 fi_key.objectid, fi_key.offset);
8927 ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
8928 &dbref_key, &path, 0, 0);
8933 if (!found_dbackref)
8934 err |= BACKREF_MISSING;
8936 btrfs_release_path(&path);
8937 if (err & BACKREF_MISSING) {
8938 error("data extent[%llu %llu] backref lost",
8939 disk_bytenr, disk_num_bytes);
8945 * Get real tree block level for the case like shared block
8946 * Return >= 0 as tree level
8947 * Return <0 for error
8949 static int query_tree_block_level(struct btrfs_fs_info *fs_info, u64 bytenr)
8951 struct extent_buffer *eb;
8952 struct btrfs_path path;
8953 struct btrfs_key key;
8954 struct btrfs_extent_item *ei;
8957 u32 nodesize = btrfs_super_nodesize(fs_info->super_copy);
8962 /* Search extent tree for extent generation and level */
8963 key.objectid = bytenr;
8964 key.type = BTRFS_METADATA_ITEM_KEY;
8965 key.offset = (u64)-1;
8967 btrfs_init_path(&path);
8968 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, &path, 0, 0);
8971 ret = btrfs_previous_extent_item(fs_info->extent_root, &path, bytenr);
8979 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
8980 ei = btrfs_item_ptr(path.nodes[0], path.slots[0],
8981 struct btrfs_extent_item);
8982 flags = btrfs_extent_flags(path.nodes[0], ei);
8983 if (!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
8988 /* Get transid for later read_tree_block() check */
8989 transid = btrfs_extent_generation(path.nodes[0], ei);
8991 /* Get backref level as one source */
8992 if (key.type == BTRFS_METADATA_ITEM_KEY) {
8993 backref_level = key.offset;
8995 struct btrfs_tree_block_info *info;
8997 info = (struct btrfs_tree_block_info *)(ei + 1);
8998 backref_level = btrfs_tree_block_level(path.nodes[0], info);
9000 btrfs_release_path(&path);
9002 /* Get level from tree block as an alternative source */
9003 eb = read_tree_block_fs_info(fs_info, bytenr, nodesize, transid);
9004 if (!extent_buffer_uptodate(eb)) {
9005 free_extent_buffer(eb);
9008 header_level = btrfs_header_level(eb);
9009 free_extent_buffer(eb);
9011 if (header_level != backref_level)
9013 return header_level;
9016 btrfs_release_path(&path);
9021 * Check if a tree block backref is valid (points to a valid tree block)
9022 * if level == -1, level will be resolved
9023 * Return >0 for any error found and print error message
9025 static int check_tree_block_backref(struct btrfs_fs_info *fs_info, u64 root_id,
9026 u64 bytenr, int level)
9028 struct btrfs_root *root;
9029 struct btrfs_key key;
9030 struct btrfs_path path;
9031 struct extent_buffer *eb;
9032 struct extent_buffer *node;
9033 u32 nodesize = btrfs_super_nodesize(fs_info->super_copy);
9037 /* Query level for level == -1 special case */
9039 level = query_tree_block_level(fs_info, bytenr);
9041 err |= REFERENCER_MISSING;
9045 key.objectid = root_id;
9046 key.type = BTRFS_ROOT_ITEM_KEY;
9047 key.offset = (u64)-1;
9049 root = btrfs_read_fs_root(fs_info, &key);
9051 err |= REFERENCER_MISSING;
9055 /* Read out the tree block to get item/node key */
9056 eb = read_tree_block(root, bytenr, root->nodesize, 0);
9057 if (!extent_buffer_uptodate(eb)) {
9058 err |= REFERENCER_MISSING;
9059 free_extent_buffer(eb);
9063 /* Empty tree, no need to check key */
9064 if (!btrfs_header_nritems(eb) && !level) {
9065 free_extent_buffer(eb);
9070 btrfs_node_key_to_cpu(eb, &key, 0);
9072 btrfs_item_key_to_cpu(eb, &key, 0);
9074 free_extent_buffer(eb);
9076 btrfs_init_path(&path);
9077 path.lowest_level = level;
9078 /* Search with the first key, to ensure we can reach it */
9079 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
9081 err |= REFERENCER_MISSING;
9085 node = path.nodes[level];
9086 if (btrfs_header_bytenr(node) != bytenr) {
9088 "extent [%llu %d] referencer bytenr mismatch, wanted: %llu, have: %llu",
9089 bytenr, nodesize, bytenr,
9090 btrfs_header_bytenr(node));
9091 err |= REFERENCER_MISMATCH;
9093 if (btrfs_header_level(node) != level) {
9095 "extent [%llu %d] referencer level mismatch, wanted: %d, have: %d",
9096 bytenr, nodesize, level,
9097 btrfs_header_level(node));
9098 err |= REFERENCER_MISMATCH;
9102 btrfs_release_path(&path);
9104 if (err & REFERENCER_MISSING) {
9106 error("extent [%llu %d] lost referencer (owner: %llu)",
9107 bytenr, nodesize, root_id);
9110 "extent [%llu %d] lost referencer (owner: %llu, level: %u)",
9111 bytenr, nodesize, root_id, level);
9118 * Check referencer for shared block backref
9119 * If level == -1, this function will resolve the level.
9121 static int check_shared_block_backref(struct btrfs_fs_info *fs_info,
9122 u64 parent, u64 bytenr, int level)
9124 struct extent_buffer *eb;
9125 u32 nodesize = btrfs_super_nodesize(fs_info->super_copy);
9127 int found_parent = 0;
9130 eb = read_tree_block_fs_info(fs_info, parent, nodesize, 0);
9131 if (!extent_buffer_uptodate(eb))
9135 level = query_tree_block_level(fs_info, bytenr);
9139 if (level + 1 != btrfs_header_level(eb))
9142 nr = btrfs_header_nritems(eb);
9143 for (i = 0; i < nr; i++) {
9144 if (bytenr == btrfs_node_blockptr(eb, i)) {
9150 free_extent_buffer(eb);
9151 if (!found_parent) {
9153 "shared extent[%llu %u] lost its parent (parent: %llu, level: %u)",
9154 bytenr, nodesize, parent, level);
9155 return REFERENCER_MISSING;
9161 * Check referencer for normal (inlined) data ref
9162 * If len == 0, it will be resolved by searching in extent tree
9164 static int check_extent_data_backref(struct btrfs_fs_info *fs_info,
9165 u64 root_id, u64 objectid, u64 offset,
9166 u64 bytenr, u64 len, u32 count)
9168 struct btrfs_root *root;
9169 struct btrfs_root *extent_root = fs_info->extent_root;
9170 struct btrfs_key key;
9171 struct btrfs_path path;
9172 struct extent_buffer *leaf;
9173 struct btrfs_file_extent_item *fi;
9174 u32 found_count = 0;
9179 key.objectid = bytenr;
9180 key.type = BTRFS_EXTENT_ITEM_KEY;
9181 key.offset = (u64)-1;
9183 btrfs_init_path(&path);
9184 ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
9187 ret = btrfs_previous_extent_item(extent_root, &path, bytenr);
9190 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
9191 if (key.objectid != bytenr ||
9192 key.type != BTRFS_EXTENT_ITEM_KEY)
9195 btrfs_release_path(&path);
9197 key.objectid = root_id;
9198 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
9199 key.offset = (u64)-1;
9200 btrfs_init_path(&path);
9202 root = btrfs_read_fs_root(fs_info, &key);
9206 key.objectid = objectid;
9207 key.type = BTRFS_EXTENT_DATA_KEY;
9209 * It can be nasty as data backref offset is
9210 * file offset - file extent offset, which is smaller or
9211 * equal to original backref offset. The only special case is
9212 * overflow. So we need to special check and do further search.
9214 key.offset = offset & (1ULL << 63) ? 0 : offset;
9216 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
9221 * Search afterwards to get correct one
9222 * NOTE: As we must do a comprehensive check on the data backref to
9223 * make sure the dref count also matches, we must iterate all file
9224 * extents for that inode.
9227 leaf = path.nodes[0];
9228 slot = path.slots[0];
9230 btrfs_item_key_to_cpu(leaf, &key, slot);
9231 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
9233 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
9235 * Except normal disk bytenr and disk num bytes, we still
9236 * need to do extra check on dbackref offset as
9237 * dbackref offset = file_offset - file_extent_offset
9239 if (btrfs_file_extent_disk_bytenr(leaf, fi) == bytenr &&
9240 btrfs_file_extent_disk_num_bytes(leaf, fi) == len &&
9241 (u64)(key.offset - btrfs_file_extent_offset(leaf, fi)) ==
9245 ret = btrfs_next_item(root, &path);
9250 btrfs_release_path(&path);
9251 if (found_count != count) {
9253 "extent[%llu, %llu] referencer count mismatch (root: %llu, owner: %llu, offset: %llu) wanted: %u, have: %u",
9254 bytenr, len, root_id, objectid, offset, count, found_count);
9255 return REFERENCER_MISSING;
9261 * Check if the referencer of a shared data backref exists
9263 static int check_shared_data_backref(struct btrfs_fs_info *fs_info,
9264 u64 parent, u64 bytenr)
9266 struct extent_buffer *eb;
9267 struct btrfs_key key;
9268 struct btrfs_file_extent_item *fi;
9269 u32 nodesize = btrfs_super_nodesize(fs_info->super_copy);
9271 int found_parent = 0;
9274 eb = read_tree_block_fs_info(fs_info, parent, nodesize, 0);
9275 if (!extent_buffer_uptodate(eb))
9278 nr = btrfs_header_nritems(eb);
9279 for (i = 0; i < nr; i++) {
9280 btrfs_item_key_to_cpu(eb, &key, i);
9281 if (key.type != BTRFS_EXTENT_DATA_KEY)
9284 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
9285 if (btrfs_file_extent_type(eb, fi) == BTRFS_FILE_EXTENT_INLINE)
9288 if (btrfs_file_extent_disk_bytenr(eb, fi) == bytenr) {
9295 free_extent_buffer(eb);
9296 if (!found_parent) {
9297 error("shared extent %llu referencer lost (parent: %llu)",
9299 return REFERENCER_MISSING;
9305 * This function will check a given extent item, including its backref and
9306 * itself (like crossing stripe boundary and type)
9308 * Since we don't use extent_record anymore, introduce new error bit
9310 static int check_extent_item(struct btrfs_fs_info *fs_info,
9311 struct extent_buffer *eb, int slot)
9313 struct btrfs_extent_item *ei;
9314 struct btrfs_extent_inline_ref *iref;
9315 struct btrfs_extent_data_ref *dref;
9319 u32 nodesize = btrfs_super_nodesize(fs_info->super_copy);
9320 u32 item_size = btrfs_item_size_nr(eb, slot);
9325 struct btrfs_key key;
9329 btrfs_item_key_to_cpu(eb, &key, slot);
9330 if (key.type == BTRFS_EXTENT_ITEM_KEY)
9331 bytes_used += key.offset;
9333 bytes_used += nodesize;
9335 if (item_size < sizeof(*ei)) {
9337 * COMPAT_EXTENT_TREE_V0 case, but it's already a super
9338 * old thing when on disk format is still un-determined.
9339 * No need to care about it anymore
9341 error("unsupported COMPAT_EXTENT_TREE_V0 detected");
9345 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
9346 flags = btrfs_extent_flags(eb, ei);
9348 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
9350 if (metadata && check_crossing_stripes(key.objectid, eb->len)) {
9351 error("bad metadata [%llu, %llu) crossing stripe boundary",
9352 key.objectid, key.objectid + nodesize);
9353 err |= CROSSING_STRIPE_BOUNDARY;
9356 ptr = (unsigned long)(ei + 1);
9358 if (metadata && key.type == BTRFS_EXTENT_ITEM_KEY) {
9359 /* Old EXTENT_ITEM metadata */
9360 struct btrfs_tree_block_info *info;
9362 info = (struct btrfs_tree_block_info *)ptr;
9363 level = btrfs_tree_block_level(eb, info);
9364 ptr += sizeof(struct btrfs_tree_block_info);
9366 /* New METADATA_ITEM */
9369 end = (unsigned long)ei + item_size;
9372 err |= ITEM_SIZE_MISMATCH;
9376 /* Now check every backref in this extent item */
9378 iref = (struct btrfs_extent_inline_ref *)ptr;
9379 type = btrfs_extent_inline_ref_type(eb, iref);
9380 offset = btrfs_extent_inline_ref_offset(eb, iref);
9382 case BTRFS_TREE_BLOCK_REF_KEY:
9383 ret = check_tree_block_backref(fs_info, offset, key.objectid,
9387 case BTRFS_SHARED_BLOCK_REF_KEY:
9388 ret = check_shared_block_backref(fs_info, offset, key.objectid,
9392 case BTRFS_EXTENT_DATA_REF_KEY:
9393 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
9394 ret = check_extent_data_backref(fs_info,
9395 btrfs_extent_data_ref_root(eb, dref),
9396 btrfs_extent_data_ref_objectid(eb, dref),
9397 btrfs_extent_data_ref_offset(eb, dref),
9398 key.objectid, key.offset,
9399 btrfs_extent_data_ref_count(eb, dref));
9402 case BTRFS_SHARED_DATA_REF_KEY:
9403 ret = check_shared_data_backref(fs_info, offset, key.objectid);
9407 error("extent[%llu %d %llu] has unknown ref type: %d",
9408 key.objectid, key.type, key.offset, type);
9409 err |= UNKNOWN_TYPE;
9413 ptr += btrfs_extent_inline_ref_size(type);
9422 * Check if a dev extent item is referred correctly by its chunk
9424 static int check_dev_extent_item(struct btrfs_fs_info *fs_info,
9425 struct extent_buffer *eb, int slot)
9427 struct btrfs_root *chunk_root = fs_info->chunk_root;
9428 struct btrfs_dev_extent *ptr;
9429 struct btrfs_path path;
9430 struct btrfs_key chunk_key;
9431 struct btrfs_key devext_key;
9432 struct btrfs_chunk *chunk;
9433 struct extent_buffer *l;
9437 int found_chunk = 0;
9440 btrfs_item_key_to_cpu(eb, &devext_key, slot);
9441 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_extent);
9442 length = btrfs_dev_extent_length(eb, ptr);
9444 chunk_key.objectid = btrfs_dev_extent_chunk_objectid(eb, ptr);
9445 chunk_key.type = BTRFS_CHUNK_ITEM_KEY;
9446 chunk_key.offset = btrfs_dev_extent_chunk_offset(eb, ptr);
9448 btrfs_init_path(&path);
9449 ret = btrfs_search_slot(NULL, chunk_root, &chunk_key, &path, 0, 0);
9454 chunk = btrfs_item_ptr(l, path.slots[0], struct btrfs_chunk);
9455 if (btrfs_chunk_length(l, chunk) != length)
9458 num_stripes = btrfs_chunk_num_stripes(l, chunk);
9459 for (i = 0; i < num_stripes; i++) {
9460 u64 devid = btrfs_stripe_devid_nr(l, chunk, i);
9461 u64 offset = btrfs_stripe_offset_nr(l, chunk, i);
9463 if (devid == devext_key.objectid &&
9464 offset == devext_key.offset) {
9470 btrfs_release_path(&path);
9473 "device extent[%llu, %llu, %llu] did not find the related chunk",
9474 devext_key.objectid, devext_key.offset, length);
9475 return REFERENCER_MISSING;
9481 * Check if the used space is correct with the dev item
9483 static int check_dev_item(struct btrfs_fs_info *fs_info,
9484 struct extent_buffer *eb, int slot)
9486 struct btrfs_root *dev_root = fs_info->dev_root;
9487 struct btrfs_dev_item *dev_item;
9488 struct btrfs_path path;
9489 struct btrfs_key key;
9490 struct btrfs_dev_extent *ptr;
9496 dev_item = btrfs_item_ptr(eb, slot, struct btrfs_dev_item);
9497 dev_id = btrfs_device_id(eb, dev_item);
9498 used = btrfs_device_bytes_used(eb, dev_item);
9500 key.objectid = dev_id;
9501 key.type = BTRFS_DEV_EXTENT_KEY;
9504 btrfs_init_path(&path);
9505 ret = btrfs_search_slot(NULL, dev_root, &key, &path, 0, 0);
9507 btrfs_item_key_to_cpu(eb, &key, slot);
9508 error("cannot find any related dev extent for dev[%llu, %u, %llu]",
9509 key.objectid, key.type, key.offset);
9510 btrfs_release_path(&path);
9511 return REFERENCER_MISSING;
9514 /* Iterate dev_extents to calculate the used space of a device */
9516 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
9518 if (key.objectid > dev_id)
9520 if (key.type != BTRFS_DEV_EXTENT_KEY || key.objectid != dev_id)
9523 ptr = btrfs_item_ptr(path.nodes[0], path.slots[0],
9524 struct btrfs_dev_extent);
9525 total += btrfs_dev_extent_length(path.nodes[0], ptr);
9527 ret = btrfs_next_item(dev_root, &path);
9531 btrfs_release_path(&path);
9533 if (used != total) {
9534 btrfs_item_key_to_cpu(eb, &key, slot);
9536 "Dev extent's total-byte %llu is not equal to bytes-used %llu in dev[%llu, %u, %llu]",
9537 total, used, BTRFS_ROOT_TREE_OBJECTID,
9538 BTRFS_DEV_EXTENT_KEY, dev_id);
9539 return ACCOUNTING_MISMATCH;
9545 * Check a block group item with its referener (chunk) and its used space
9546 * with extent/metadata item
9548 static int check_block_group_item(struct btrfs_fs_info *fs_info,
9549 struct extent_buffer *eb, int slot)
9551 struct btrfs_root *extent_root = fs_info->extent_root;
9552 struct btrfs_root *chunk_root = fs_info->chunk_root;
9553 struct btrfs_block_group_item *bi;
9554 struct btrfs_block_group_item bg_item;
9555 struct btrfs_path path;
9556 struct btrfs_key bg_key;
9557 struct btrfs_key chunk_key;
9558 struct btrfs_key extent_key;
9559 struct btrfs_chunk *chunk;
9560 struct extent_buffer *leaf;
9561 struct btrfs_extent_item *ei;
9562 u32 nodesize = btrfs_super_nodesize(fs_info->super_copy);
9570 btrfs_item_key_to_cpu(eb, &bg_key, slot);
9571 bi = btrfs_item_ptr(eb, slot, struct btrfs_block_group_item);
9572 read_extent_buffer(eb, &bg_item, (unsigned long)bi, sizeof(bg_item));
9573 used = btrfs_block_group_used(&bg_item);
9574 bg_flags = btrfs_block_group_flags(&bg_item);
9576 chunk_key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
9577 chunk_key.type = BTRFS_CHUNK_ITEM_KEY;
9578 chunk_key.offset = bg_key.objectid;
9580 btrfs_init_path(&path);
9581 /* Search for the referencer chunk */
9582 ret = btrfs_search_slot(NULL, chunk_root, &chunk_key, &path, 0, 0);
9585 "block group[%llu %llu] did not find the related chunk item",
9586 bg_key.objectid, bg_key.offset);
9587 err |= REFERENCER_MISSING;
9589 chunk = btrfs_item_ptr(path.nodes[0], path.slots[0],
9590 struct btrfs_chunk);
9591 if (btrfs_chunk_length(path.nodes[0], chunk) !=
9594 "block group[%llu %llu] related chunk item length does not match",
9595 bg_key.objectid, bg_key.offset);
9596 err |= REFERENCER_MISMATCH;
9599 btrfs_release_path(&path);
9601 /* Search from the block group bytenr */
9602 extent_key.objectid = bg_key.objectid;
9603 extent_key.type = 0;
9604 extent_key.offset = 0;
9606 btrfs_init_path(&path);
9607 ret = btrfs_search_slot(NULL, extent_root, &extent_key, &path, 0, 0);
9611 /* Iterate extent tree to account used space */
9613 leaf = path.nodes[0];
9614 btrfs_item_key_to_cpu(leaf, &extent_key, path.slots[0]);
9615 if (extent_key.objectid >= bg_key.objectid + bg_key.offset)
9618 if (extent_key.type != BTRFS_METADATA_ITEM_KEY &&
9619 extent_key.type != BTRFS_EXTENT_ITEM_KEY)
9621 if (extent_key.objectid < bg_key.objectid)
9624 if (extent_key.type == BTRFS_METADATA_ITEM_KEY)
9627 total += extent_key.offset;
9629 ei = btrfs_item_ptr(leaf, path.slots[0],
9630 struct btrfs_extent_item);
9631 flags = btrfs_extent_flags(leaf, ei);
9632 if (flags & BTRFS_EXTENT_FLAG_DATA) {
9633 if (!(bg_flags & BTRFS_BLOCK_GROUP_DATA)) {
9635 "bad extent[%llu, %llu) type mismatch with chunk",
9636 extent_key.objectid,
9637 extent_key.objectid + extent_key.offset);
9638 err |= CHUNK_TYPE_MISMATCH;
9640 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
9641 if (!(bg_flags & (BTRFS_BLOCK_GROUP_SYSTEM |
9642 BTRFS_BLOCK_GROUP_METADATA))) {
9644 "bad extent[%llu, %llu) type mismatch with chunk",
9645 extent_key.objectid,
9646 extent_key.objectid + nodesize);
9647 err |= CHUNK_TYPE_MISMATCH;
9651 ret = btrfs_next_item(extent_root, &path);
9657 btrfs_release_path(&path);
9659 if (total != used) {
9661 "block group[%llu %llu] used %llu but extent items used %llu",
9662 bg_key.objectid, bg_key.offset, used, total);
9663 err |= ACCOUNTING_MISMATCH;
9669 * Check a chunk item.
9670 * Including checking all referred dev_extents and block group
9672 static int check_chunk_item(struct btrfs_fs_info *fs_info,
9673 struct extent_buffer *eb, int slot)
9675 struct btrfs_root *extent_root = fs_info->extent_root;
9676 struct btrfs_root *dev_root = fs_info->dev_root;
9677 struct btrfs_path path;
9678 struct btrfs_key chunk_key;
9679 struct btrfs_key bg_key;
9680 struct btrfs_key devext_key;
9681 struct btrfs_chunk *chunk;
9682 struct extent_buffer *leaf;
9683 struct btrfs_block_group_item *bi;
9684 struct btrfs_block_group_item bg_item;
9685 struct btrfs_dev_extent *ptr;
9686 u32 sectorsize = btrfs_super_sectorsize(fs_info->super_copy);
9698 btrfs_item_key_to_cpu(eb, &chunk_key, slot);
9699 chunk = btrfs_item_ptr(eb, slot, struct btrfs_chunk);
9700 length = btrfs_chunk_length(eb, chunk);
9701 chunk_end = chunk_key.offset + length;
9702 if (!IS_ALIGNED(length, sectorsize)) {
9703 error("chunk[%llu %llu) not aligned to %u",
9704 chunk_key.offset, chunk_end, sectorsize);
9705 err |= BYTES_UNALIGNED;
9709 type = btrfs_chunk_type(eb, chunk);
9710 profile = type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
9711 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
9712 error("chunk[%llu %llu) has no chunk type",
9713 chunk_key.offset, chunk_end);
9714 err |= UNKNOWN_TYPE;
9716 if (profile && (profile & (profile - 1))) {
9717 error("chunk[%llu %llu) multiple profiles detected: %llx",
9718 chunk_key.offset, chunk_end, profile);
9719 err |= UNKNOWN_TYPE;
9722 bg_key.objectid = chunk_key.offset;
9723 bg_key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9724 bg_key.offset = length;
9726 btrfs_init_path(&path);
9727 ret = btrfs_search_slot(NULL, extent_root, &bg_key, &path, 0, 0);
9730 "chunk[%llu %llu) did not find the related block group item",
9731 chunk_key.offset, chunk_end);
9732 err |= REFERENCER_MISSING;
9734 leaf = path.nodes[0];
9735 bi = btrfs_item_ptr(leaf, path.slots[0],
9736 struct btrfs_block_group_item);
9737 read_extent_buffer(leaf, &bg_item, (unsigned long)bi,
9739 if (btrfs_block_group_flags(&bg_item) != type) {
9741 "chunk[%llu %llu) related block group item flags mismatch, wanted: %llu, have: %llu",
9742 chunk_key.offset, chunk_end, type,
9743 btrfs_block_group_flags(&bg_item));
9744 err |= REFERENCER_MISSING;
9748 num_stripes = btrfs_chunk_num_stripes(eb, chunk);
9749 for (i = 0; i < num_stripes; i++) {
9750 btrfs_release_path(&path);
9751 btrfs_init_path(&path);
9752 devext_key.objectid = btrfs_stripe_devid_nr(eb, chunk, i);
9753 devext_key.type = BTRFS_DEV_EXTENT_KEY;
9754 devext_key.offset = btrfs_stripe_offset_nr(eb, chunk, i);
9756 ret = btrfs_search_slot(NULL, dev_root, &devext_key, &path,
9761 leaf = path.nodes[0];
9762 ptr = btrfs_item_ptr(leaf, path.slots[0],
9763 struct btrfs_dev_extent);
9764 objectid = btrfs_dev_extent_chunk_objectid(leaf, ptr);
9765 offset = btrfs_dev_extent_chunk_offset(leaf, ptr);
9766 if (objectid != chunk_key.objectid ||
9767 offset != chunk_key.offset ||
9768 btrfs_dev_extent_length(leaf, ptr) != length)
9772 err |= BACKREF_MISSING;
9774 "chunk[%llu %llu) stripe %d did not find the related dev extent",
9775 chunk_key.objectid, chunk_end, i);
9778 btrfs_release_path(&path);
9784 * Main entry function to check known items and update related accounting info
9786 static int check_leaf_items(struct btrfs_root *root, struct extent_buffer *eb)
9788 struct btrfs_fs_info *fs_info = root->fs_info;
9789 struct btrfs_key key;
9792 struct btrfs_extent_data_ref *dref;
9797 btrfs_item_key_to_cpu(eb, &key, slot);
9798 type = btrfs_key_type(&key);
9801 case BTRFS_EXTENT_DATA_KEY:
9802 ret = check_extent_data_item(root, eb, slot);
9805 case BTRFS_BLOCK_GROUP_ITEM_KEY:
9806 ret = check_block_group_item(fs_info, eb, slot);
9809 case BTRFS_DEV_ITEM_KEY:
9810 ret = check_dev_item(fs_info, eb, slot);
9813 case BTRFS_CHUNK_ITEM_KEY:
9814 ret = check_chunk_item(fs_info, eb, slot);
9817 case BTRFS_DEV_EXTENT_KEY:
9818 ret = check_dev_extent_item(fs_info, eb, slot);
9821 case BTRFS_EXTENT_ITEM_KEY:
9822 case BTRFS_METADATA_ITEM_KEY:
9823 ret = check_extent_item(fs_info, eb, slot);
9826 case BTRFS_EXTENT_CSUM_KEY:
9827 total_csum_bytes += btrfs_item_size_nr(eb, slot);
9829 case BTRFS_TREE_BLOCK_REF_KEY:
9830 ret = check_tree_block_backref(fs_info, key.offset,
9834 case BTRFS_EXTENT_DATA_REF_KEY:
9835 dref = btrfs_item_ptr(eb, slot, struct btrfs_extent_data_ref);
9836 ret = check_extent_data_backref(fs_info,
9837 btrfs_extent_data_ref_root(eb, dref),
9838 btrfs_extent_data_ref_objectid(eb, dref),
9839 btrfs_extent_data_ref_offset(eb, dref),
9841 btrfs_extent_data_ref_count(eb, dref));
9844 case BTRFS_SHARED_BLOCK_REF_KEY:
9845 ret = check_shared_block_backref(fs_info, key.offset,
9849 case BTRFS_SHARED_DATA_REF_KEY:
9850 ret = check_shared_data_backref(fs_info, key.offset,
9858 if (++slot < btrfs_header_nritems(eb))
9865 * Helper function for later fs/subvol tree check. To determine if a tree
9866 * block should be checked.
9867 * This function will ensure only the direct referencer with lowest rootid to
9868 * check a fs/subvolume tree block.
9870 * Backref check at extent tree would detect errors like missing subvolume
9871 * tree, so we can do aggressive check to reduce duplicated checks.
9873 static int should_check(struct btrfs_root *root, struct extent_buffer *eb)
9875 struct btrfs_root *extent_root = root->fs_info->extent_root;
9876 struct btrfs_key key;
9877 struct btrfs_path path;
9878 struct extent_buffer *leaf;
9880 struct btrfs_extent_item *ei;
9886 struct btrfs_extent_inline_ref *iref;
9889 btrfs_init_path(&path);
9890 key.objectid = btrfs_header_bytenr(eb);
9891 key.type = BTRFS_METADATA_ITEM_KEY;
9892 key.offset = (u64)-1;
9895 * Any failure in backref resolving means we can't determine
9896 * whom the tree block belongs to.
9897 * So in that case, we need to check that tree block
9899 ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
9903 ret = btrfs_previous_extent_item(extent_root, &path,
9904 btrfs_header_bytenr(eb));
9908 leaf = path.nodes[0];
9909 slot = path.slots[0];
9910 btrfs_item_key_to_cpu(leaf, &key, slot);
9911 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
9913 if (key.type == BTRFS_METADATA_ITEM_KEY) {
9914 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
9916 struct btrfs_tree_block_info *info;
9918 info = (struct btrfs_tree_block_info *)(ei + 1);
9919 iref = (struct btrfs_extent_inline_ref *)(info + 1);
9922 item_size = btrfs_item_size_nr(leaf, slot);
9923 ptr = (unsigned long)iref;
9924 end = (unsigned long)ei + item_size;
9926 iref = (struct btrfs_extent_inline_ref *)ptr;
9927 type = btrfs_extent_inline_ref_type(leaf, iref);
9928 offset = btrfs_extent_inline_ref_offset(leaf, iref);
9931 * We only check the tree block if current root is
9932 * the lowest referencer of it.
9934 if (type == BTRFS_TREE_BLOCK_REF_KEY &&
9935 offset < root->objectid) {
9936 btrfs_release_path(&path);
9940 ptr += btrfs_extent_inline_ref_size(type);
9943 * Normally we should also check keyed tree block ref, but that may be
9944 * very time consuming. Inlined ref should already make us skip a lot
9945 * of refs now. So skip search keyed tree block ref.
9949 btrfs_release_path(&path);
9954 * Traversal function for tree block. We will do:
9955 * 1) Skip shared fs/subvolume tree blocks
9956 * 2) Update related bytes accounting
9957 * 3) Pre-order traversal
9959 static int traverse_tree_block(struct btrfs_root *root,
9960 struct extent_buffer *node)
9962 struct extent_buffer *eb;
9963 struct btrfs_key key;
9964 struct btrfs_key drop_key;
9972 * Skip shared fs/subvolume tree block, in that case they will
9973 * be checked by referencer with lowest rootid
9975 if (is_fstree(root->objectid) && !should_check(root, node))
9978 /* Update bytes accounting */
9979 total_btree_bytes += node->len;
9980 if (fs_root_objectid(btrfs_header_owner(node)))
9981 total_fs_tree_bytes += node->len;
9982 if (btrfs_header_owner(node) == BTRFS_EXTENT_TREE_OBJECTID)
9983 total_extent_tree_bytes += node->len;
9984 if (!found_old_backref &&
9985 btrfs_header_owner(node) == BTRFS_TREE_RELOC_OBJECTID &&
9986 btrfs_header_backref_rev(node) == BTRFS_MIXED_BACKREF_REV &&
9987 !btrfs_header_flag(node, BTRFS_HEADER_FLAG_RELOC))
9988 found_old_backref = 1;
9990 /* pre-order tranversal, check itself first */
9991 level = btrfs_header_level(node);
9992 ret = check_tree_block_ref(root, node, btrfs_header_bytenr(node),
9993 btrfs_header_level(node),
9994 btrfs_header_owner(node));
9998 "check %s failed root %llu bytenr %llu level %d, force continue check",
9999 level ? "node":"leaf", root->objectid,
10000 btrfs_header_bytenr(node), btrfs_header_level(node));
10003 btree_space_waste += btrfs_leaf_free_space(root, node);
10004 ret = check_leaf_items(root, node);
10009 nr = btrfs_header_nritems(node);
10010 btrfs_disk_key_to_cpu(&drop_key, &root->root_item.drop_progress);
10011 btree_space_waste += (BTRFS_NODEPTRS_PER_BLOCK(root) - nr) *
10012 sizeof(struct btrfs_key_ptr);
10014 /* Then check all its children */
10015 for (i = 0; i < nr; i++) {
10016 u64 blocknr = btrfs_node_blockptr(node, i);
10018 btrfs_node_key_to_cpu(node, &key, i);
10019 if (level == root->root_item.drop_level &&
10020 is_dropped_key(&key, &drop_key))
10024 * As a btrfs tree has most 8 levels (0..7), so it's quite safe
10025 * to call the function itself.
10027 eb = read_tree_block(root, blocknr, root->nodesize, 0);
10028 if (extent_buffer_uptodate(eb)) {
10029 ret = traverse_tree_block(root, eb);
10032 free_extent_buffer(eb);
10039 * Low memory usage version check_chunks_and_extents.
10041 static int check_chunks_and_extents_v2(struct btrfs_root *root)
10043 struct btrfs_path path;
10044 struct btrfs_key key;
10045 struct btrfs_root *root1;
10046 struct btrfs_root *cur_root;
10050 root1 = root->fs_info->chunk_root;
10051 ret = traverse_tree_block(root1, root1->node);
10054 root1 = root->fs_info->tree_root;
10055 ret = traverse_tree_block(root1, root1->node);
10058 btrfs_init_path(&path);
10059 key.objectid = BTRFS_EXTENT_TREE_OBJECTID;
10061 key.type = BTRFS_ROOT_ITEM_KEY;
10063 ret = btrfs_search_slot(NULL, root1, &key, &path, 0, 0);
10065 error("cannot find extent treet in tree_root");
10070 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
10071 if (key.type != BTRFS_ROOT_ITEM_KEY)
10073 key.offset = (u64)-1;
10075 cur_root = btrfs_read_fs_root(root->fs_info, &key);
10076 if (IS_ERR(cur_root) || !cur_root) {
10077 error("failed to read tree: %lld", key.objectid);
10081 ret = traverse_tree_block(cur_root, cur_root->node);
10085 ret = btrfs_next_item(root1, &path);
10091 btrfs_release_path(&path);
10095 static int btrfs_fsck_reinit_root(struct btrfs_trans_handle *trans,
10096 struct btrfs_root *root, int overwrite)
10098 struct extent_buffer *c;
10099 struct extent_buffer *old = root->node;
10102 struct btrfs_disk_key disk_key = {0,0,0};
10108 extent_buffer_get(c);
10111 c = btrfs_alloc_free_block(trans, root,
10113 root->root_key.objectid,
10114 &disk_key, level, 0, 0);
10117 extent_buffer_get(c);
10121 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
10122 btrfs_set_header_level(c, level);
10123 btrfs_set_header_bytenr(c, c->start);
10124 btrfs_set_header_generation(c, trans->transid);
10125 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
10126 btrfs_set_header_owner(c, root->root_key.objectid);
10128 write_extent_buffer(c, root->fs_info->fsid,
10129 btrfs_header_fsid(), BTRFS_FSID_SIZE);
10131 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
10132 btrfs_header_chunk_tree_uuid(c),
10135 btrfs_mark_buffer_dirty(c);
10137 * this case can happen in the following case:
10139 * 1.overwrite previous root.
10141 * 2.reinit reloc data root, this is because we skip pin
10142 * down reloc data tree before which means we can allocate
10143 * same block bytenr here.
10145 if (old->start == c->start) {
10146 btrfs_set_root_generation(&root->root_item,
10148 root->root_item.level = btrfs_header_level(root->node);
10149 ret = btrfs_update_root(trans, root->fs_info->tree_root,
10150 &root->root_key, &root->root_item);
10152 free_extent_buffer(c);
10156 free_extent_buffer(old);
10158 add_root_to_dirty_list(root);
10162 static int pin_down_tree_blocks(struct btrfs_fs_info *fs_info,
10163 struct extent_buffer *eb, int tree_root)
10165 struct extent_buffer *tmp;
10166 struct btrfs_root_item *ri;
10167 struct btrfs_key key;
10170 int level = btrfs_header_level(eb);
10176 * If we have pinned this block before, don't pin it again.
10177 * This can not only avoid forever loop with broken filesystem
10178 * but also give us some speedups.
10180 if (test_range_bit(&fs_info->pinned_extents, eb->start,
10181 eb->start + eb->len - 1, EXTENT_DIRTY, 0))
10184 btrfs_pin_extent(fs_info, eb->start, eb->len);
10186 nodesize = btrfs_super_nodesize(fs_info->super_copy);
10187 nritems = btrfs_header_nritems(eb);
10188 for (i = 0; i < nritems; i++) {
10190 btrfs_item_key_to_cpu(eb, &key, i);
10191 if (key.type != BTRFS_ROOT_ITEM_KEY)
10193 /* Skip the extent root and reloc roots */
10194 if (key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
10195 key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
10196 key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
10198 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
10199 bytenr = btrfs_disk_root_bytenr(eb, ri);
10202 * If at any point we start needing the real root we
10203 * will have to build a stump root for the root we are
10204 * in, but for now this doesn't actually use the root so
10205 * just pass in extent_root.
10207 tmp = read_tree_block(fs_info->extent_root, bytenr,
10209 if (!extent_buffer_uptodate(tmp)) {
10210 fprintf(stderr, "Error reading root block\n");
10213 ret = pin_down_tree_blocks(fs_info, tmp, 0);
10214 free_extent_buffer(tmp);
10218 bytenr = btrfs_node_blockptr(eb, i);
10220 /* If we aren't the tree root don't read the block */
10221 if (level == 1 && !tree_root) {
10222 btrfs_pin_extent(fs_info, bytenr, nodesize);
10226 tmp = read_tree_block(fs_info->extent_root, bytenr,
10228 if (!extent_buffer_uptodate(tmp)) {
10229 fprintf(stderr, "Error reading tree block\n");
10232 ret = pin_down_tree_blocks(fs_info, tmp, tree_root);
10233 free_extent_buffer(tmp);
10242 static int pin_metadata_blocks(struct btrfs_fs_info *fs_info)
10246 ret = pin_down_tree_blocks(fs_info, fs_info->chunk_root->node, 0);
10250 return pin_down_tree_blocks(fs_info, fs_info->tree_root->node, 1);
10253 static int reset_block_groups(struct btrfs_fs_info *fs_info)
10255 struct btrfs_block_group_cache *cache;
10256 struct btrfs_path *path;
10257 struct extent_buffer *leaf;
10258 struct btrfs_chunk *chunk;
10259 struct btrfs_key key;
10263 path = btrfs_alloc_path();
10268 key.type = BTRFS_CHUNK_ITEM_KEY;
10271 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
10273 btrfs_free_path(path);
10278 * We do this in case the block groups were screwed up and had alloc
10279 * bits that aren't actually set on the chunks. This happens with
10280 * restored images every time and could happen in real life I guess.
10282 fs_info->avail_data_alloc_bits = 0;
10283 fs_info->avail_metadata_alloc_bits = 0;
10284 fs_info->avail_system_alloc_bits = 0;
10286 /* First we need to create the in-memory block groups */
10288 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
10289 ret = btrfs_next_leaf(fs_info->chunk_root, path);
10291 btrfs_free_path(path);
10299 leaf = path->nodes[0];
10300 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
10301 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
10306 chunk = btrfs_item_ptr(leaf, path->slots[0],
10307 struct btrfs_chunk);
10308 btrfs_add_block_group(fs_info, 0,
10309 btrfs_chunk_type(leaf, chunk),
10310 key.objectid, key.offset,
10311 btrfs_chunk_length(leaf, chunk));
10312 set_extent_dirty(&fs_info->free_space_cache, key.offset,
10313 key.offset + btrfs_chunk_length(leaf, chunk),
10319 cache = btrfs_lookup_first_block_group(fs_info, start);
10323 start = cache->key.objectid + cache->key.offset;
10326 btrfs_free_path(path);
10330 static int reset_balance(struct btrfs_trans_handle *trans,
10331 struct btrfs_fs_info *fs_info)
10333 struct btrfs_root *root = fs_info->tree_root;
10334 struct btrfs_path *path;
10335 struct extent_buffer *leaf;
10336 struct btrfs_key key;
10337 int del_slot, del_nr = 0;
10341 path = btrfs_alloc_path();
10345 key.objectid = BTRFS_BALANCE_OBJECTID;
10346 key.type = BTRFS_BALANCE_ITEM_KEY;
10349 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10354 goto reinit_data_reloc;
10359 ret = btrfs_del_item(trans, root, path);
10362 btrfs_release_path(path);
10364 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
10365 key.type = BTRFS_ROOT_ITEM_KEY;
10368 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10372 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
10377 ret = btrfs_del_items(trans, root, path,
10384 btrfs_release_path(path);
10387 ret = btrfs_search_slot(trans, root, &key, path,
10394 leaf = path->nodes[0];
10395 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
10396 if (key.objectid > BTRFS_TREE_RELOC_OBJECTID)
10398 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
10403 del_slot = path->slots[0];
10412 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
10416 btrfs_release_path(path);
10419 key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
10420 key.type = BTRFS_ROOT_ITEM_KEY;
10421 key.offset = (u64)-1;
10422 root = btrfs_read_fs_root(fs_info, &key);
10423 if (IS_ERR(root)) {
10424 fprintf(stderr, "Error reading data reloc tree\n");
10425 ret = PTR_ERR(root);
10428 record_root_in_trans(trans, root);
10429 ret = btrfs_fsck_reinit_root(trans, root, 0);
10432 ret = btrfs_make_root_dir(trans, root, BTRFS_FIRST_FREE_OBJECTID);
10434 btrfs_free_path(path);
10438 static int reinit_extent_tree(struct btrfs_trans_handle *trans,
10439 struct btrfs_fs_info *fs_info)
10445 * The only reason we don't do this is because right now we're just
10446 * walking the trees we find and pinning down their bytes, we don't look
10447 * at any of the leaves. In order to do mixed groups we'd have to check
10448 * the leaves of any fs roots and pin down the bytes for any file
10449 * extents we find. Not hard but why do it if we don't have to?
10451 if (btrfs_fs_incompat(fs_info, BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)) {
10452 fprintf(stderr, "We don't support re-initing the extent tree "
10453 "for mixed block groups yet, please notify a btrfs "
10454 "developer you want to do this so they can add this "
10455 "functionality.\n");
10460 * first we need to walk all of the trees except the extent tree and pin
10461 * down the bytes that are in use so we don't overwrite any existing
10464 ret = pin_metadata_blocks(fs_info);
10466 fprintf(stderr, "error pinning down used bytes\n");
10471 * Need to drop all the block groups since we're going to recreate all
10474 btrfs_free_block_groups(fs_info);
10475 ret = reset_block_groups(fs_info);
10477 fprintf(stderr, "error resetting the block groups\n");
10481 /* Ok we can allocate now, reinit the extent root */
10482 ret = btrfs_fsck_reinit_root(trans, fs_info->extent_root, 0);
10484 fprintf(stderr, "extent root initialization failed\n");
10486 * When the transaction code is updated we should end the
10487 * transaction, but for now progs only knows about commit so
10488 * just return an error.
10494 * Now we have all the in-memory block groups setup so we can make
10495 * allocations properly, and the metadata we care about is safe since we
10496 * pinned all of it above.
10499 struct btrfs_block_group_cache *cache;
10501 cache = btrfs_lookup_first_block_group(fs_info, start);
10504 start = cache->key.objectid + cache->key.offset;
10505 ret = btrfs_insert_item(trans, fs_info->extent_root,
10506 &cache->key, &cache->item,
10507 sizeof(cache->item));
10509 fprintf(stderr, "Error adding block group\n");
10512 btrfs_extent_post_op(trans, fs_info->extent_root);
10515 ret = reset_balance(trans, fs_info);
10517 fprintf(stderr, "error resetting the pending balance\n");
10522 static int recow_extent_buffer(struct btrfs_root *root, struct extent_buffer *eb)
10524 struct btrfs_path *path;
10525 struct btrfs_trans_handle *trans;
10526 struct btrfs_key key;
10529 printf("Recowing metadata block %llu\n", eb->start);
10530 key.objectid = btrfs_header_owner(eb);
10531 key.type = BTRFS_ROOT_ITEM_KEY;
10532 key.offset = (u64)-1;
10534 root = btrfs_read_fs_root(root->fs_info, &key);
10535 if (IS_ERR(root)) {
10536 fprintf(stderr, "Couldn't find owner root %llu\n",
10538 return PTR_ERR(root);
10541 path = btrfs_alloc_path();
10545 trans = btrfs_start_transaction(root, 1);
10546 if (IS_ERR(trans)) {
10547 btrfs_free_path(path);
10548 return PTR_ERR(trans);
10551 path->lowest_level = btrfs_header_level(eb);
10552 if (path->lowest_level)
10553 btrfs_node_key_to_cpu(eb, &key, 0);
10555 btrfs_item_key_to_cpu(eb, &key, 0);
10557 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
10558 btrfs_commit_transaction(trans, root);
10559 btrfs_free_path(path);
10563 static int delete_bad_item(struct btrfs_root *root, struct bad_item *bad)
10565 struct btrfs_path *path;
10566 struct btrfs_trans_handle *trans;
10567 struct btrfs_key key;
10570 printf("Deleting bad item [%llu,%u,%llu]\n", bad->key.objectid,
10571 bad->key.type, bad->key.offset);
10572 key.objectid = bad->root_id;
10573 key.type = BTRFS_ROOT_ITEM_KEY;
10574 key.offset = (u64)-1;
10576 root = btrfs_read_fs_root(root->fs_info, &key);
10577 if (IS_ERR(root)) {
10578 fprintf(stderr, "Couldn't find owner root %llu\n",
10580 return PTR_ERR(root);
10583 path = btrfs_alloc_path();
10587 trans = btrfs_start_transaction(root, 1);
10588 if (IS_ERR(trans)) {
10589 btrfs_free_path(path);
10590 return PTR_ERR(trans);
10593 ret = btrfs_search_slot(trans, root, &bad->key, path, -1, 1);
10599 ret = btrfs_del_item(trans, root, path);
10601 btrfs_commit_transaction(trans, root);
10602 btrfs_free_path(path);
10606 static int zero_log_tree(struct btrfs_root *root)
10608 struct btrfs_trans_handle *trans;
10611 trans = btrfs_start_transaction(root, 1);
10612 if (IS_ERR(trans)) {
10613 ret = PTR_ERR(trans);
10616 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
10617 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
10618 ret = btrfs_commit_transaction(trans, root);
10622 static int populate_csum(struct btrfs_trans_handle *trans,
10623 struct btrfs_root *csum_root, char *buf, u64 start,
10630 while (offset < len) {
10631 sectorsize = csum_root->sectorsize;
10632 ret = read_extent_data(csum_root, buf, start + offset,
10636 ret = btrfs_csum_file_block(trans, csum_root, start + len,
10637 start + offset, buf, sectorsize);
10640 offset += sectorsize;
10645 static int fill_csum_tree_from_one_fs_root(struct btrfs_trans_handle *trans,
10646 struct btrfs_root *csum_root,
10647 struct btrfs_root *cur_root)
10649 struct btrfs_path *path;
10650 struct btrfs_key key;
10651 struct extent_buffer *node;
10652 struct btrfs_file_extent_item *fi;
10659 path = btrfs_alloc_path();
10662 buf = malloc(cur_root->fs_info->csum_root->sectorsize);
10672 ret = btrfs_search_slot(NULL, cur_root, &key, path, 0, 0);
10675 /* Iterate all regular file extents and fill its csum */
10677 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
10679 if (key.type != BTRFS_EXTENT_DATA_KEY)
10681 node = path->nodes[0];
10682 slot = path->slots[0];
10683 fi = btrfs_item_ptr(node, slot, struct btrfs_file_extent_item);
10684 if (btrfs_file_extent_type(node, fi) != BTRFS_FILE_EXTENT_REG)
10686 start = btrfs_file_extent_disk_bytenr(node, fi);
10687 len = btrfs_file_extent_disk_num_bytes(node, fi);
10689 ret = populate_csum(trans, csum_root, buf, start, len);
10690 if (ret == -EEXIST)
10696 * TODO: if next leaf is corrupted, jump to nearest next valid
10699 ret = btrfs_next_item(cur_root, path);
10709 btrfs_free_path(path);
10714 static int fill_csum_tree_from_fs(struct btrfs_trans_handle *trans,
10715 struct btrfs_root *csum_root)
10717 struct btrfs_fs_info *fs_info = csum_root->fs_info;
10718 struct btrfs_path *path;
10719 struct btrfs_root *tree_root = fs_info->tree_root;
10720 struct btrfs_root *cur_root;
10721 struct extent_buffer *node;
10722 struct btrfs_key key;
10726 path = btrfs_alloc_path();
10730 key.objectid = BTRFS_FS_TREE_OBJECTID;
10732 key.type = BTRFS_ROOT_ITEM_KEY;
10734 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
10743 node = path->nodes[0];
10744 slot = path->slots[0];
10745 btrfs_item_key_to_cpu(node, &key, slot);
10746 if (key.objectid > BTRFS_LAST_FREE_OBJECTID)
10748 if (key.type != BTRFS_ROOT_ITEM_KEY)
10750 if (!is_fstree(key.objectid))
10752 key.offset = (u64)-1;
10754 cur_root = btrfs_read_fs_root(fs_info, &key);
10755 if (IS_ERR(cur_root) || !cur_root) {
10756 fprintf(stderr, "Fail to read fs/subvol tree: %lld\n",
10760 ret = fill_csum_tree_from_one_fs_root(trans, csum_root,
10765 ret = btrfs_next_item(tree_root, path);
10775 btrfs_free_path(path);
10779 static int fill_csum_tree_from_extent(struct btrfs_trans_handle *trans,
10780 struct btrfs_root *csum_root)
10782 struct btrfs_root *extent_root = csum_root->fs_info->extent_root;
10783 struct btrfs_path *path;
10784 struct btrfs_extent_item *ei;
10785 struct extent_buffer *leaf;
10787 struct btrfs_key key;
10790 path = btrfs_alloc_path();
10795 key.type = BTRFS_EXTENT_ITEM_KEY;
10798 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
10800 btrfs_free_path(path);
10804 buf = malloc(csum_root->sectorsize);
10806 btrfs_free_path(path);
10811 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
10812 ret = btrfs_next_leaf(extent_root, path);
10820 leaf = path->nodes[0];
10822 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
10823 if (key.type != BTRFS_EXTENT_ITEM_KEY) {
10828 ei = btrfs_item_ptr(leaf, path->slots[0],
10829 struct btrfs_extent_item);
10830 if (!(btrfs_extent_flags(leaf, ei) &
10831 BTRFS_EXTENT_FLAG_DATA)) {
10836 ret = populate_csum(trans, csum_root, buf, key.objectid,
10843 btrfs_free_path(path);
10849 * Recalculate the csum and put it into the csum tree.
10851 * Extent tree init will wipe out all the extent info, so in that case, we
10852 * can't depend on extent tree, but use fs tree. If search_fs_tree is set, we
10853 * will use fs/subvol trees to init the csum tree.
10855 static int fill_csum_tree(struct btrfs_trans_handle *trans,
10856 struct btrfs_root *csum_root,
10857 int search_fs_tree)
10859 if (search_fs_tree)
10860 return fill_csum_tree_from_fs(trans, csum_root);
10862 return fill_csum_tree_from_extent(trans, csum_root);
10865 static void free_roots_info_cache(void)
10867 if (!roots_info_cache)
10870 while (!cache_tree_empty(roots_info_cache)) {
10871 struct cache_extent *entry;
10872 struct root_item_info *rii;
10874 entry = first_cache_extent(roots_info_cache);
10877 remove_cache_extent(roots_info_cache, entry);
10878 rii = container_of(entry, struct root_item_info, cache_extent);
10882 free(roots_info_cache);
10883 roots_info_cache = NULL;
10886 static int build_roots_info_cache(struct btrfs_fs_info *info)
10889 struct btrfs_key key;
10890 struct extent_buffer *leaf;
10891 struct btrfs_path *path;
10893 if (!roots_info_cache) {
10894 roots_info_cache = malloc(sizeof(*roots_info_cache));
10895 if (!roots_info_cache)
10897 cache_tree_init(roots_info_cache);
10900 path = btrfs_alloc_path();
10905 key.type = BTRFS_EXTENT_ITEM_KEY;
10908 ret = btrfs_search_slot(NULL, info->extent_root, &key, path, 0, 0);
10911 leaf = path->nodes[0];
10914 struct btrfs_key found_key;
10915 struct btrfs_extent_item *ei;
10916 struct btrfs_extent_inline_ref *iref;
10917 int slot = path->slots[0];
10922 struct cache_extent *entry;
10923 struct root_item_info *rii;
10925 if (slot >= btrfs_header_nritems(leaf)) {
10926 ret = btrfs_next_leaf(info->extent_root, path);
10933 leaf = path->nodes[0];
10934 slot = path->slots[0];
10937 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
10939 if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
10940 found_key.type != BTRFS_METADATA_ITEM_KEY)
10943 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
10944 flags = btrfs_extent_flags(leaf, ei);
10946 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
10947 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
10950 if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
10951 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
10952 level = found_key.offset;
10954 struct btrfs_tree_block_info *binfo;
10956 binfo = (struct btrfs_tree_block_info *)(ei + 1);
10957 iref = (struct btrfs_extent_inline_ref *)(binfo + 1);
10958 level = btrfs_tree_block_level(leaf, binfo);
10962 * For a root extent, it must be of the following type and the
10963 * first (and only one) iref in the item.
10965 type = btrfs_extent_inline_ref_type(leaf, iref);
10966 if (type != BTRFS_TREE_BLOCK_REF_KEY)
10969 root_id = btrfs_extent_inline_ref_offset(leaf, iref);
10970 entry = lookup_cache_extent(roots_info_cache, root_id, 1);
10972 rii = malloc(sizeof(struct root_item_info));
10977 rii->cache_extent.start = root_id;
10978 rii->cache_extent.size = 1;
10979 rii->level = (u8)-1;
10980 entry = &rii->cache_extent;
10981 ret = insert_cache_extent(roots_info_cache, entry);
10984 rii = container_of(entry, struct root_item_info,
10988 ASSERT(rii->cache_extent.start == root_id);
10989 ASSERT(rii->cache_extent.size == 1);
10991 if (level > rii->level || rii->level == (u8)-1) {
10992 rii->level = level;
10993 rii->bytenr = found_key.objectid;
10994 rii->gen = btrfs_extent_generation(leaf, ei);
10995 rii->node_count = 1;
10996 } else if (level == rii->level) {
11004 btrfs_free_path(path);
11009 static int maybe_repair_root_item(struct btrfs_fs_info *info,
11010 struct btrfs_path *path,
11011 const struct btrfs_key *root_key,
11012 const int read_only_mode)
11014 const u64 root_id = root_key->objectid;
11015 struct cache_extent *entry;
11016 struct root_item_info *rii;
11017 struct btrfs_root_item ri;
11018 unsigned long offset;
11020 entry = lookup_cache_extent(roots_info_cache, root_id, 1);
11023 "Error: could not find extent items for root %llu\n",
11024 root_key->objectid);
11028 rii = container_of(entry, struct root_item_info, cache_extent);
11029 ASSERT(rii->cache_extent.start == root_id);
11030 ASSERT(rii->cache_extent.size == 1);
11032 if (rii->node_count != 1) {
11034 "Error: could not find btree root extent for root %llu\n",
11039 offset = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
11040 read_extent_buffer(path->nodes[0], &ri, offset, sizeof(ri));
11042 if (btrfs_root_bytenr(&ri) != rii->bytenr ||
11043 btrfs_root_level(&ri) != rii->level ||
11044 btrfs_root_generation(&ri) != rii->gen) {
11047 * If we're in repair mode but our caller told us to not update
11048 * the root item, i.e. just check if it needs to be updated, don't
11049 * print this message, since the caller will call us again shortly
11050 * for the same root item without read only mode (the caller will
11051 * open a transaction first).
11053 if (!(read_only_mode && repair))
11055 "%sroot item for root %llu,"
11056 " current bytenr %llu, current gen %llu, current level %u,"
11057 " new bytenr %llu, new gen %llu, new level %u\n",
11058 (read_only_mode ? "" : "fixing "),
11060 btrfs_root_bytenr(&ri), btrfs_root_generation(&ri),
11061 btrfs_root_level(&ri),
11062 rii->bytenr, rii->gen, rii->level);
11064 if (btrfs_root_generation(&ri) > rii->gen) {
11066 "root %llu has a root item with a more recent gen (%llu) compared to the found root node (%llu)\n",
11067 root_id, btrfs_root_generation(&ri), rii->gen);
11071 if (!read_only_mode) {
11072 btrfs_set_root_bytenr(&ri, rii->bytenr);
11073 btrfs_set_root_level(&ri, rii->level);
11074 btrfs_set_root_generation(&ri, rii->gen);
11075 write_extent_buffer(path->nodes[0], &ri,
11076 offset, sizeof(ri));
11086 * A regression introduced in the 3.17 kernel (more specifically in 3.17-rc2),
11087 * caused read-only snapshots to be corrupted if they were created at a moment
11088 * when the source subvolume/snapshot had orphan items. The issue was that the
11089 * on-disk root items became incorrect, referring to the pre orphan cleanup root
11090 * node instead of the post orphan cleanup root node.
11091 * So this function, and its callees, just detects and fixes those cases. Even
11092 * though the regression was for read-only snapshots, this function applies to
11093 * any snapshot/subvolume root.
11094 * This must be run before any other repair code - not doing it so, makes other
11095 * repair code delete or modify backrefs in the extent tree for example, which
11096 * will result in an inconsistent fs after repairing the root items.
11098 static int repair_root_items(struct btrfs_fs_info *info)
11100 struct btrfs_path *path = NULL;
11101 struct btrfs_key key;
11102 struct extent_buffer *leaf;
11103 struct btrfs_trans_handle *trans = NULL;
11106 int need_trans = 0;
11108 ret = build_roots_info_cache(info);
11112 path = btrfs_alloc_path();
11118 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
11119 key.type = BTRFS_ROOT_ITEM_KEY;
11124 * Avoid opening and committing transactions if a leaf doesn't have
11125 * any root items that need to be fixed, so that we avoid rotating
11126 * backup roots unnecessarily.
11129 trans = btrfs_start_transaction(info->tree_root, 1);
11130 if (IS_ERR(trans)) {
11131 ret = PTR_ERR(trans);
11136 ret = btrfs_search_slot(trans, info->tree_root, &key, path,
11140 leaf = path->nodes[0];
11143 struct btrfs_key found_key;
11145 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
11146 int no_more_keys = find_next_key(path, &key);
11148 btrfs_release_path(path);
11150 ret = btrfs_commit_transaction(trans,
11162 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
11164 if (found_key.type != BTRFS_ROOT_ITEM_KEY)
11166 if (found_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
11169 ret = maybe_repair_root_item(info, path, &found_key,
11174 if (!trans && repair) {
11177 btrfs_release_path(path);
11187 free_roots_info_cache();
11188 btrfs_free_path(path);
11190 btrfs_commit_transaction(trans, info->tree_root);
11197 const char * const cmd_check_usage[] = {
11198 "btrfs check [options] <device>",
11199 "Check structural integrity of a filesystem (unmounted).",
11200 "Check structural integrity of an unmounted filesystem. Verify internal",
11201 "trees' consistency and item connectivity. In the repair mode try to",
11202 "fix the problems found. ",
11203 "WARNING: the repair mode is considered dangerous",
11205 "-s|--super <superblock> use this superblock copy",
11206 "-b|--backup use the first valid backup root copy",
11207 "--repair try to repair the filesystem",
11208 "--readonly run in read-only mode (default)",
11209 "--init-csum-tree create a new CRC tree",
11210 "--init-extent-tree create a new extent tree",
11211 "--mode <MODE> select mode, allows to make some memory/IO",
11212 " trade-offs, where MODE is one of:",
11213 " original - read inodes and extents to memory (requires",
11214 " more memory, does less IO)",
11215 " lowmem - try to use less memory but read blocks again",
11217 "--check-data-csum verify checksums of data blocks",
11218 "-Q|--qgroup-report print a report on qgroup consistency",
11219 "-E|--subvol-extents <subvolid>",
11220 " print subvolume extents and sharing state",
11221 "-r|--tree-root <bytenr> use the given bytenr for the tree root",
11222 "--chunk-root <bytenr> use the given bytenr for the chunk tree root",
11223 "-p|--progress indicate progress",
11227 int cmd_check(int argc, char **argv)
11229 struct cache_tree root_cache;
11230 struct btrfs_root *root;
11231 struct btrfs_fs_info *info;
11234 u64 tree_root_bytenr = 0;
11235 u64 chunk_root_bytenr = 0;
11236 char uuidbuf[BTRFS_UUID_UNPARSED_SIZE];
11239 int init_csum_tree = 0;
11241 int qgroup_report = 0;
11242 int qgroups_repaired = 0;
11243 unsigned ctree_flags = OPEN_CTREE_EXCLUSIVE;
11247 enum { GETOPT_VAL_REPAIR = 257, GETOPT_VAL_INIT_CSUM,
11248 GETOPT_VAL_INIT_EXTENT, GETOPT_VAL_CHECK_CSUM,
11249 GETOPT_VAL_READONLY, GETOPT_VAL_CHUNK_TREE,
11251 static const struct option long_options[] = {
11252 { "super", required_argument, NULL, 's' },
11253 { "repair", no_argument, NULL, GETOPT_VAL_REPAIR },
11254 { "readonly", no_argument, NULL, GETOPT_VAL_READONLY },
11255 { "init-csum-tree", no_argument, NULL,
11256 GETOPT_VAL_INIT_CSUM },
11257 { "init-extent-tree", no_argument, NULL,
11258 GETOPT_VAL_INIT_EXTENT },
11259 { "check-data-csum", no_argument, NULL,
11260 GETOPT_VAL_CHECK_CSUM },
11261 { "backup", no_argument, NULL, 'b' },
11262 { "subvol-extents", required_argument, NULL, 'E' },
11263 { "qgroup-report", no_argument, NULL, 'Q' },
11264 { "tree-root", required_argument, NULL, 'r' },
11265 { "chunk-root", required_argument, NULL,
11266 GETOPT_VAL_CHUNK_TREE },
11267 { "progress", no_argument, NULL, 'p' },
11268 { "mode", required_argument, NULL,
11270 { NULL, 0, NULL, 0}
11273 c = getopt_long(argc, argv, "as:br:p", long_options, NULL);
11277 case 'a': /* ignored */ break;
11279 ctree_flags |= OPEN_CTREE_BACKUP_ROOT;
11282 num = arg_strtou64(optarg);
11283 if (num >= BTRFS_SUPER_MIRROR_MAX) {
11285 "ERROR: super mirror should be less than: %d\n",
11286 BTRFS_SUPER_MIRROR_MAX);
11289 bytenr = btrfs_sb_offset(((int)num));
11290 printf("using SB copy %llu, bytenr %llu\n", num,
11291 (unsigned long long)bytenr);
11297 subvolid = arg_strtou64(optarg);
11300 tree_root_bytenr = arg_strtou64(optarg);
11302 case GETOPT_VAL_CHUNK_TREE:
11303 chunk_root_bytenr = arg_strtou64(optarg);
11306 ctx.progress_enabled = true;
11310 usage(cmd_check_usage);
11311 case GETOPT_VAL_REPAIR:
11312 printf("enabling repair mode\n");
11314 ctree_flags |= OPEN_CTREE_WRITES;
11316 case GETOPT_VAL_READONLY:
11319 case GETOPT_VAL_INIT_CSUM:
11320 printf("Creating a new CRC tree\n");
11321 init_csum_tree = 1;
11323 ctree_flags |= OPEN_CTREE_WRITES;
11325 case GETOPT_VAL_INIT_EXTENT:
11326 init_extent_tree = 1;
11327 ctree_flags |= (OPEN_CTREE_WRITES |
11328 OPEN_CTREE_NO_BLOCK_GROUPS);
11331 case GETOPT_VAL_CHECK_CSUM:
11332 check_data_csum = 1;
11334 case GETOPT_VAL_MODE:
11335 check_mode = parse_check_mode(optarg);
11336 if (check_mode == CHECK_MODE_UNKNOWN) {
11337 error("unknown mode: %s", optarg);
11344 if (check_argc_exact(argc - optind, 1))
11345 usage(cmd_check_usage);
11347 if (ctx.progress_enabled) {
11348 ctx.tp = TASK_NOTHING;
11349 ctx.info = task_init(print_status_check, print_status_return, &ctx);
11352 /* This check is the only reason for --readonly to exist */
11353 if (readonly && repair) {
11354 fprintf(stderr, "Repair options are not compatible with --readonly\n");
11359 * Not supported yet
11361 if (repair && check_mode == CHECK_MODE_LOWMEM) {
11362 error("Low memory mode doesn't support repair yet");
11367 cache_tree_init(&root_cache);
11369 if((ret = check_mounted(argv[optind])) < 0) {
11370 fprintf(stderr, "Could not check mount status: %s\n", strerror(-ret));
11373 fprintf(stderr, "%s is currently mounted. Aborting.\n", argv[optind]);
11378 /* only allow partial opening under repair mode */
11380 ctree_flags |= OPEN_CTREE_PARTIAL;
11382 info = open_ctree_fs_info(argv[optind], bytenr, tree_root_bytenr,
11383 chunk_root_bytenr, ctree_flags);
11385 fprintf(stderr, "Couldn't open file system\n");
11390 global_info = info;
11391 root = info->fs_root;
11394 * repair mode will force us to commit transaction which
11395 * will make us fail to load log tree when mounting.
11397 if (repair && btrfs_super_log_root(info->super_copy)) {
11398 ret = ask_user("repair mode will force to clear out log tree, Are you sure?");
11403 ret = zero_log_tree(root);
11405 fprintf(stderr, "fail to zero log tree\n");
11410 uuid_unparse(info->super_copy->fsid, uuidbuf);
11411 if (qgroup_report) {
11412 printf("Print quota groups for %s\nUUID: %s\n", argv[optind],
11414 ret = qgroup_verify_all(info);
11420 printf("Print extent state for subvolume %llu on %s\nUUID: %s\n",
11421 subvolid, argv[optind], uuidbuf);
11422 ret = print_extent_state(info, subvolid);
11425 printf("Checking filesystem on %s\nUUID: %s\n", argv[optind], uuidbuf);
11427 if (!extent_buffer_uptodate(info->tree_root->node) ||
11428 !extent_buffer_uptodate(info->dev_root->node) ||
11429 !extent_buffer_uptodate(info->chunk_root->node)) {
11430 fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
11435 if (init_extent_tree || init_csum_tree) {
11436 struct btrfs_trans_handle *trans;
11438 trans = btrfs_start_transaction(info->extent_root, 0);
11439 if (IS_ERR(trans)) {
11440 fprintf(stderr, "Error starting transaction\n");
11441 ret = PTR_ERR(trans);
11445 if (init_extent_tree) {
11446 printf("Creating a new extent tree\n");
11447 ret = reinit_extent_tree(trans, info);
11452 if (init_csum_tree) {
11453 fprintf(stderr, "Reinit crc root\n");
11454 ret = btrfs_fsck_reinit_root(trans, info->csum_root, 0);
11456 fprintf(stderr, "crc root initialization failed\n");
11461 ret = fill_csum_tree(trans, info->csum_root,
11464 fprintf(stderr, "crc refilling failed\n");
11469 * Ok now we commit and run the normal fsck, which will add
11470 * extent entries for all of the items it finds.
11472 ret = btrfs_commit_transaction(trans, info->extent_root);
11476 if (!extent_buffer_uptodate(info->extent_root->node)) {
11477 fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
11481 if (!extent_buffer_uptodate(info->csum_root->node)) {
11482 fprintf(stderr, "Checksum root corrupted, rerun with --init-csum-tree option\n");
11487 if (!ctx.progress_enabled)
11488 fprintf(stderr, "checking extents\n");
11489 if (check_mode == CHECK_MODE_LOWMEM)
11490 ret = check_chunks_and_extents_v2(root);
11492 ret = check_chunks_and_extents(root);
11494 fprintf(stderr, "Errors found in extent allocation tree or chunk allocation\n");
11496 ret = repair_root_items(info);
11500 fprintf(stderr, "Fixed %d roots.\n", ret);
11502 } else if (ret > 0) {
11504 "Found %d roots with an outdated root item.\n",
11507 "Please run a filesystem check with the option --repair to fix them.\n");
11512 if (!ctx.progress_enabled) {
11513 if (btrfs_fs_compat_ro(info, BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE))
11514 fprintf(stderr, "checking free space tree\n");
11516 fprintf(stderr, "checking free space cache\n");
11518 ret = check_space_cache(root);
11523 * We used to have to have these hole extents in between our real
11524 * extents so if we don't have this flag set we need to make sure there
11525 * are no gaps in the file extents for inodes, otherwise we can just
11526 * ignore it when this happens.
11528 no_holes = btrfs_fs_incompat(root->fs_info,
11529 BTRFS_FEATURE_INCOMPAT_NO_HOLES);
11530 if (!ctx.progress_enabled)
11531 fprintf(stderr, "checking fs roots\n");
11532 ret = check_fs_roots(root, &root_cache);
11536 fprintf(stderr, "checking csums\n");
11537 ret = check_csums(root);
11541 fprintf(stderr, "checking root refs\n");
11542 ret = check_root_refs(root, &root_cache);
11546 while (repair && !list_empty(&root->fs_info->recow_ebs)) {
11547 struct extent_buffer *eb;
11549 eb = list_first_entry(&root->fs_info->recow_ebs,
11550 struct extent_buffer, recow);
11551 list_del_init(&eb->recow);
11552 ret = recow_extent_buffer(root, eb);
11557 while (!list_empty(&delete_items)) {
11558 struct bad_item *bad;
11560 bad = list_first_entry(&delete_items, struct bad_item, list);
11561 list_del_init(&bad->list);
11563 ret = delete_bad_item(root, bad);
11567 if (info->quota_enabled) {
11569 fprintf(stderr, "checking quota groups\n");
11570 err = qgroup_verify_all(info);
11574 err = repair_qgroups(info, &qgroups_repaired);
11579 if (!list_empty(&root->fs_info->recow_ebs)) {
11580 fprintf(stderr, "Transid errors in file system\n");
11584 /* Don't override original ret */
11585 if (!ret && qgroups_repaired)
11586 ret = qgroups_repaired;
11588 if (found_old_backref) { /*
11589 * there was a disk format change when mixed
11590 * backref was in testing tree. The old format
11591 * existed about one week.
11593 printf("\n * Found old mixed backref format. "
11594 "The old format is not supported! *"
11595 "\n * Please mount the FS in readonly mode, "
11596 "backup data and re-format the FS. *\n\n");
11599 printf("found %llu bytes used err is %d\n",
11600 (unsigned long long)bytes_used, ret);
11601 printf("total csum bytes: %llu\n",(unsigned long long)total_csum_bytes);
11602 printf("total tree bytes: %llu\n",
11603 (unsigned long long)total_btree_bytes);
11604 printf("total fs tree bytes: %llu\n",
11605 (unsigned long long)total_fs_tree_bytes);
11606 printf("total extent tree bytes: %llu\n",
11607 (unsigned long long)total_extent_tree_bytes);
11608 printf("btree space waste bytes: %llu\n",
11609 (unsigned long long)btree_space_waste);
11610 printf("file data blocks allocated: %llu\n referenced %llu\n",
11611 (unsigned long long)data_bytes_allocated,
11612 (unsigned long long)data_bytes_referenced);
11614 free_qgroup_counts();
11615 free_root_recs_tree(&root_cache);
11619 if (ctx.progress_enabled)
11620 task_deinit(ctx.info);