2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
23 #include <sys/types.h>
27 #include <uuid/uuid.h>
32 #include "print-tree.h"
33 #include "transaction.h"
36 #include "free-space-cache.h"
38 #include "qgroup-verify.h"
39 #include "rbtree-utils.h"
43 static u64 bytes_used = 0;
44 static u64 total_csum_bytes = 0;
45 static u64 total_btree_bytes = 0;
46 static u64 total_fs_tree_bytes = 0;
47 static u64 total_extent_tree_bytes = 0;
48 static u64 btree_space_waste = 0;
49 static u64 data_bytes_allocated = 0;
50 static u64 data_bytes_referenced = 0;
51 static int found_old_backref = 0;
52 static LIST_HEAD(duplicate_extents);
53 static LIST_HEAD(delete_items);
54 static int repair = 0;
55 static int no_holes = 0;
56 static int init_extent_tree = 0;
57 static int check_data_csum = 0;
59 struct extent_backref {
60 struct list_head list;
61 unsigned int is_data:1;
62 unsigned int found_extent_tree:1;
63 unsigned int full_backref:1;
64 unsigned int found_ref:1;
65 unsigned int broken:1;
69 struct extent_backref node;
84 * Much like data_backref, just removed the undetermined members
85 * and change it to use list_head.
86 * During extent scan, it is stored in root->orphan_data_extent.
87 * During fs tree scan, it is then moved to inode_rec->orphan_data_extents.
89 struct orphan_data_extent {
90 struct list_head list;
99 struct extent_backref node;
106 struct extent_record {
107 struct list_head backrefs;
108 struct list_head dups;
109 struct list_head list;
110 struct cache_extent cache;
111 struct btrfs_disk_key parent_key;
116 u64 extent_item_refs;
118 u64 parent_generation;
122 int flag_block_full_backref;
123 unsigned int found_rec:1;
124 unsigned int content_checked:1;
125 unsigned int owner_ref_checked:1;
126 unsigned int is_root:1;
127 unsigned int metadata:1;
128 unsigned int bad_full_backref:1;
129 unsigned int crossing_stripes:1;
132 struct inode_backref {
133 struct list_head list;
134 unsigned int found_dir_item:1;
135 unsigned int found_dir_index:1;
136 unsigned int found_inode_ref:1;
137 unsigned int filetype:8;
139 unsigned int ref_type;
146 struct root_item_record {
147 struct list_head list;
154 struct btrfs_key drop_key;
157 #define REF_ERR_NO_DIR_ITEM (1 << 0)
158 #define REF_ERR_NO_DIR_INDEX (1 << 1)
159 #define REF_ERR_NO_INODE_REF (1 << 2)
160 #define REF_ERR_DUP_DIR_ITEM (1 << 3)
161 #define REF_ERR_DUP_DIR_INDEX (1 << 4)
162 #define REF_ERR_DUP_INODE_REF (1 << 5)
163 #define REF_ERR_INDEX_UNMATCH (1 << 6)
164 #define REF_ERR_FILETYPE_UNMATCH (1 << 7)
165 #define REF_ERR_NAME_TOO_LONG (1 << 8) // 100
166 #define REF_ERR_NO_ROOT_REF (1 << 9)
167 #define REF_ERR_NO_ROOT_BACKREF (1 << 10)
168 #define REF_ERR_DUP_ROOT_REF (1 << 11)
169 #define REF_ERR_DUP_ROOT_BACKREF (1 << 12)
171 struct file_extent_hole {
177 /* Compatible function to allow reuse of old codes */
178 static u64 first_extent_gap(struct rb_root *holes)
180 struct file_extent_hole *hole;
182 if (RB_EMPTY_ROOT(holes))
185 hole = rb_entry(rb_first(holes), struct file_extent_hole, node);
189 int compare_hole(struct rb_node *node1, struct rb_node *node2)
191 struct file_extent_hole *hole1;
192 struct file_extent_hole *hole2;
194 hole1 = rb_entry(node1, struct file_extent_hole, node);
195 hole2 = rb_entry(node2, struct file_extent_hole, node);
197 if (hole1->start > hole2->start)
199 if (hole1->start < hole2->start)
201 /* Now hole1->start == hole2->start */
202 if (hole1->len >= hole2->len)
204 * Hole 1 will be merge center
205 * Same hole will be merged later
208 /* Hole 2 will be merge center */
213 * Add a hole to the record
215 * This will do hole merge for copy_file_extent_holes(),
216 * which will ensure there won't be continuous holes.
218 static int add_file_extent_hole(struct rb_root *holes,
221 struct file_extent_hole *hole;
222 struct file_extent_hole *prev = NULL;
223 struct file_extent_hole *next = NULL;
225 hole = malloc(sizeof(*hole));
230 /* Since compare will not return 0, no -EEXIST will happen */
231 rb_insert(holes, &hole->node, compare_hole);
233 /* simple merge with previous hole */
234 if (rb_prev(&hole->node))
235 prev = rb_entry(rb_prev(&hole->node), struct file_extent_hole,
237 if (prev && prev->start + prev->len >= hole->start) {
238 hole->len = hole->start + hole->len - prev->start;
239 hole->start = prev->start;
240 rb_erase(&prev->node, holes);
245 /* iterate merge with next holes */
247 if (!rb_next(&hole->node))
249 next = rb_entry(rb_next(&hole->node), struct file_extent_hole,
251 if (hole->start + hole->len >= next->start) {
252 if (hole->start + hole->len <= next->start + next->len)
253 hole->len = next->start + next->len -
255 rb_erase(&next->node, holes);
264 static int compare_hole_range(struct rb_node *node, void *data)
266 struct file_extent_hole *hole;
269 hole = (struct file_extent_hole *)data;
272 hole = rb_entry(node, struct file_extent_hole, node);
273 if (start < hole->start)
275 if (start >= hole->start && start < hole->start + hole->len)
281 * Delete a hole in the record
283 * This will do the hole split and is much restrict than add.
285 static int del_file_extent_hole(struct rb_root *holes,
288 struct file_extent_hole *hole;
289 struct file_extent_hole tmp;
294 struct rb_node *node;
301 node = rb_search(holes, &tmp, compare_hole_range, NULL);
304 hole = rb_entry(node, struct file_extent_hole, node);
305 if (start + len > hole->start + hole->len)
309 * Now there will be no overflap, delete the hole and re-add the
310 * split(s) if they exists.
312 if (start > hole->start) {
313 prev_start = hole->start;
314 prev_len = start - hole->start;
317 if (hole->start + hole->len > start + len) {
318 next_start = start + len;
319 next_len = hole->start + hole->len - start - len;
322 rb_erase(node, holes);
325 ret = add_file_extent_hole(holes, prev_start, prev_len);
330 ret = add_file_extent_hole(holes, next_start, next_len);
337 static int copy_file_extent_holes(struct rb_root *dst,
340 struct file_extent_hole *hole;
341 struct rb_node *node;
344 node = rb_first(src);
346 hole = rb_entry(node, struct file_extent_hole, node);
347 ret = add_file_extent_hole(dst, hole->start, hole->len);
350 node = rb_next(node);
355 static void free_file_extent_holes(struct rb_root *holes)
357 struct rb_node *node;
358 struct file_extent_hole *hole;
360 node = rb_first(holes);
362 hole = rb_entry(node, struct file_extent_hole, node);
363 rb_erase(node, holes);
365 node = rb_first(holes);
369 struct inode_record {
370 struct list_head backrefs;
371 unsigned int checked:1;
372 unsigned int merging:1;
373 unsigned int found_inode_item:1;
374 unsigned int found_dir_item:1;
375 unsigned int found_file_extent:1;
376 unsigned int found_csum_item:1;
377 unsigned int some_csum_missing:1;
378 unsigned int nodatasum:1;
391 struct rb_root holes;
392 struct list_head orphan_extents;
397 #define I_ERR_NO_INODE_ITEM (1 << 0)
398 #define I_ERR_NO_ORPHAN_ITEM (1 << 1)
399 #define I_ERR_DUP_INODE_ITEM (1 << 2)
400 #define I_ERR_DUP_DIR_INDEX (1 << 3)
401 #define I_ERR_ODD_DIR_ITEM (1 << 4)
402 #define I_ERR_ODD_FILE_EXTENT (1 << 5)
403 #define I_ERR_BAD_FILE_EXTENT (1 << 6)
404 #define I_ERR_FILE_EXTENT_OVERLAP (1 << 7)
405 #define I_ERR_FILE_EXTENT_DISCOUNT (1 << 8) // 100
406 #define I_ERR_DIR_ISIZE_WRONG (1 << 9)
407 #define I_ERR_FILE_NBYTES_WRONG (1 << 10) // 400
408 #define I_ERR_ODD_CSUM_ITEM (1 << 11)
409 #define I_ERR_SOME_CSUM_MISSING (1 << 12)
410 #define I_ERR_LINK_COUNT_WRONG (1 << 13)
411 #define I_ERR_FILE_EXTENT_ORPHAN (1 << 14)
413 struct root_backref {
414 struct list_head list;
415 unsigned int found_dir_item:1;
416 unsigned int found_dir_index:1;
417 unsigned int found_back_ref:1;
418 unsigned int found_forward_ref:1;
419 unsigned int reachable:1;
429 struct list_head backrefs;
430 struct cache_extent cache;
431 unsigned int found_root_item:1;
437 struct cache_extent cache;
442 struct cache_extent cache;
443 struct cache_tree root_cache;
444 struct cache_tree inode_cache;
445 struct inode_record *current;
454 struct walk_control {
455 struct cache_tree shared;
456 struct shared_node *nodes[BTRFS_MAX_LEVEL];
462 struct btrfs_key key;
464 struct list_head list;
467 static void reset_cached_block_groups(struct btrfs_fs_info *fs_info);
469 static void record_root_in_trans(struct btrfs_trans_handle *trans,
470 struct btrfs_root *root)
472 if (root->last_trans != trans->transid) {
473 root->track_dirty = 1;
474 root->last_trans = trans->transid;
475 root->commit_root = root->node;
476 extent_buffer_get(root->node);
480 static u8 imode_to_type(u32 imode)
483 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
484 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
485 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
486 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
487 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
488 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
489 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
490 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
493 return btrfs_type_by_mode[(imode & S_IFMT) >> S_SHIFT];
497 static int device_record_compare(struct rb_node *node1, struct rb_node *node2)
499 struct device_record *rec1;
500 struct device_record *rec2;
502 rec1 = rb_entry(node1, struct device_record, node);
503 rec2 = rb_entry(node2, struct device_record, node);
504 if (rec1->devid > rec2->devid)
506 else if (rec1->devid < rec2->devid)
512 static struct inode_record *clone_inode_rec(struct inode_record *orig_rec)
514 struct inode_record *rec;
515 struct inode_backref *backref;
516 struct inode_backref *orig;
517 struct orphan_data_extent *src_orphan;
518 struct orphan_data_extent *dst_orphan;
522 rec = malloc(sizeof(*rec));
523 memcpy(rec, orig_rec, sizeof(*rec));
525 INIT_LIST_HEAD(&rec->backrefs);
526 INIT_LIST_HEAD(&rec->orphan_extents);
527 rec->holes = RB_ROOT;
529 list_for_each_entry(orig, &orig_rec->backrefs, list) {
530 size = sizeof(*orig) + orig->namelen + 1;
531 backref = malloc(size);
532 memcpy(backref, orig, size);
533 list_add_tail(&backref->list, &rec->backrefs);
535 list_for_each_entry(src_orphan, &orig_rec->orphan_extents, list) {
536 dst_orphan = malloc(sizeof(*dst_orphan));
537 /* TODO: Fix all the HELL of un-catched -ENOMEM case */
539 memcpy(dst_orphan, src_orphan, sizeof(*src_orphan));
540 list_add_tail(&dst_orphan->list, &rec->orphan_extents);
542 ret = copy_file_extent_holes(&rec->holes, &orig_rec->holes);
548 static void print_orphan_data_extents(struct list_head *orphan_extents,
551 struct orphan_data_extent *orphan;
553 if (list_empty(orphan_extents))
555 printf("The following data extent is lost in tree %llu:\n",
557 list_for_each_entry(orphan, orphan_extents, list) {
558 printf("\tinode: %llu, offset:%llu, disk_bytenr: %llu, disk_len: %llu\n",
559 orphan->objectid, orphan->offset, orphan->disk_bytenr,
564 static void print_inode_error(struct btrfs_root *root, struct inode_record *rec)
566 u64 root_objectid = root->root_key.objectid;
567 int errors = rec->errors;
571 /* reloc root errors, we print its corresponding fs root objectid*/
572 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
573 root_objectid = root->root_key.offset;
574 fprintf(stderr, "reloc");
576 fprintf(stderr, "root %llu inode %llu errors %x",
577 (unsigned long long) root_objectid,
578 (unsigned long long) rec->ino, rec->errors);
580 if (errors & I_ERR_NO_INODE_ITEM)
581 fprintf(stderr, ", no inode item");
582 if (errors & I_ERR_NO_ORPHAN_ITEM)
583 fprintf(stderr, ", no orphan item");
584 if (errors & I_ERR_DUP_INODE_ITEM)
585 fprintf(stderr, ", dup inode item");
586 if (errors & I_ERR_DUP_DIR_INDEX)
587 fprintf(stderr, ", dup dir index");
588 if (errors & I_ERR_ODD_DIR_ITEM)
589 fprintf(stderr, ", odd dir item");
590 if (errors & I_ERR_ODD_FILE_EXTENT)
591 fprintf(stderr, ", odd file extent");
592 if (errors & I_ERR_BAD_FILE_EXTENT)
593 fprintf(stderr, ", bad file extent");
594 if (errors & I_ERR_FILE_EXTENT_OVERLAP)
595 fprintf(stderr, ", file extent overlap");
596 if (errors & I_ERR_FILE_EXTENT_DISCOUNT)
597 fprintf(stderr, ", file extent discount");
598 if (errors & I_ERR_DIR_ISIZE_WRONG)
599 fprintf(stderr, ", dir isize wrong");
600 if (errors & I_ERR_FILE_NBYTES_WRONG)
601 fprintf(stderr, ", nbytes wrong");
602 if (errors & I_ERR_ODD_CSUM_ITEM)
603 fprintf(stderr, ", odd csum item");
604 if (errors & I_ERR_SOME_CSUM_MISSING)
605 fprintf(stderr, ", some csum missing");
606 if (errors & I_ERR_LINK_COUNT_WRONG)
607 fprintf(stderr, ", link count wrong");
608 if (errors & I_ERR_FILE_EXTENT_ORPHAN)
609 fprintf(stderr, ", orphan file extent");
610 fprintf(stderr, "\n");
611 /* Print the orphan extents if needed */
612 if (errors & I_ERR_FILE_EXTENT_ORPHAN)
613 print_orphan_data_extents(&rec->orphan_extents, root->objectid);
615 /* Print the holes if needed */
616 if (errors & I_ERR_FILE_EXTENT_DISCOUNT) {
617 struct file_extent_hole *hole;
618 struct rb_node *node;
621 node = rb_first(&rec->holes);
622 fprintf(stderr, "Found file extent holes:\n");
625 hole = rb_entry(node, struct file_extent_hole, node);
626 fprintf(stderr, "\tstart: %llu, len: %llu\n",
627 hole->start, hole->len);
628 node = rb_next(node);
631 fprintf(stderr, "\tstart: 0, len: %llu\n",
632 round_up(rec->isize, root->sectorsize));
636 static void print_ref_error(int errors)
638 if (errors & REF_ERR_NO_DIR_ITEM)
639 fprintf(stderr, ", no dir item");
640 if (errors & REF_ERR_NO_DIR_INDEX)
641 fprintf(stderr, ", no dir index");
642 if (errors & REF_ERR_NO_INODE_REF)
643 fprintf(stderr, ", no inode ref");
644 if (errors & REF_ERR_DUP_DIR_ITEM)
645 fprintf(stderr, ", dup dir item");
646 if (errors & REF_ERR_DUP_DIR_INDEX)
647 fprintf(stderr, ", dup dir index");
648 if (errors & REF_ERR_DUP_INODE_REF)
649 fprintf(stderr, ", dup inode ref");
650 if (errors & REF_ERR_INDEX_UNMATCH)
651 fprintf(stderr, ", index unmatch");
652 if (errors & REF_ERR_FILETYPE_UNMATCH)
653 fprintf(stderr, ", filetype unmatch");
654 if (errors & REF_ERR_NAME_TOO_LONG)
655 fprintf(stderr, ", name too long");
656 if (errors & REF_ERR_NO_ROOT_REF)
657 fprintf(stderr, ", no root ref");
658 if (errors & REF_ERR_NO_ROOT_BACKREF)
659 fprintf(stderr, ", no root backref");
660 if (errors & REF_ERR_DUP_ROOT_REF)
661 fprintf(stderr, ", dup root ref");
662 if (errors & REF_ERR_DUP_ROOT_BACKREF)
663 fprintf(stderr, ", dup root backref");
664 fprintf(stderr, "\n");
667 static struct inode_record *get_inode_rec(struct cache_tree *inode_cache,
670 struct ptr_node *node;
671 struct cache_extent *cache;
672 struct inode_record *rec = NULL;
675 cache = lookup_cache_extent(inode_cache, ino, 1);
677 node = container_of(cache, struct ptr_node, cache);
679 if (mod && rec->refs > 1) {
680 node->data = clone_inode_rec(rec);
685 rec = calloc(1, sizeof(*rec));
687 rec->extent_start = (u64)-1;
689 INIT_LIST_HEAD(&rec->backrefs);
690 INIT_LIST_HEAD(&rec->orphan_extents);
691 rec->holes = RB_ROOT;
693 node = malloc(sizeof(*node));
694 node->cache.start = ino;
695 node->cache.size = 1;
698 if (ino == BTRFS_FREE_INO_OBJECTID)
701 ret = insert_cache_extent(inode_cache, &node->cache);
707 static void free_orphan_data_extents(struct list_head *orphan_extents)
709 struct orphan_data_extent *orphan;
711 while (!list_empty(orphan_extents)) {
712 orphan = list_entry(orphan_extents->next,
713 struct orphan_data_extent, list);
714 list_del(&orphan->list);
719 static void free_inode_rec(struct inode_record *rec)
721 struct inode_backref *backref;
726 while (!list_empty(&rec->backrefs)) {
727 backref = list_entry(rec->backrefs.next,
728 struct inode_backref, list);
729 list_del(&backref->list);
732 free_orphan_data_extents(&rec->orphan_extents);
733 free_file_extent_holes(&rec->holes);
737 static int can_free_inode_rec(struct inode_record *rec)
739 if (!rec->errors && rec->checked && rec->found_inode_item &&
740 rec->nlink == rec->found_link && list_empty(&rec->backrefs))
745 static void maybe_free_inode_rec(struct cache_tree *inode_cache,
746 struct inode_record *rec)
748 struct cache_extent *cache;
749 struct inode_backref *tmp, *backref;
750 struct ptr_node *node;
751 unsigned char filetype;
753 if (!rec->found_inode_item)
756 filetype = imode_to_type(rec->imode);
757 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
758 if (backref->found_dir_item && backref->found_dir_index) {
759 if (backref->filetype != filetype)
760 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
761 if (!backref->errors && backref->found_inode_ref) {
762 list_del(&backref->list);
768 if (!rec->checked || rec->merging)
771 if (S_ISDIR(rec->imode)) {
772 if (rec->found_size != rec->isize)
773 rec->errors |= I_ERR_DIR_ISIZE_WRONG;
774 if (rec->found_file_extent)
775 rec->errors |= I_ERR_ODD_FILE_EXTENT;
776 } else if (S_ISREG(rec->imode) || S_ISLNK(rec->imode)) {
777 if (rec->found_dir_item)
778 rec->errors |= I_ERR_ODD_DIR_ITEM;
779 if (rec->found_size != rec->nbytes)
780 rec->errors |= I_ERR_FILE_NBYTES_WRONG;
781 if (rec->nlink > 0 && !no_holes &&
782 (rec->extent_end < rec->isize ||
783 first_extent_gap(&rec->holes) < rec->isize))
784 rec->errors |= I_ERR_FILE_EXTENT_DISCOUNT;
787 if (S_ISREG(rec->imode) || S_ISLNK(rec->imode)) {
788 if (rec->found_csum_item && rec->nodatasum)
789 rec->errors |= I_ERR_ODD_CSUM_ITEM;
790 if (rec->some_csum_missing && !rec->nodatasum)
791 rec->errors |= I_ERR_SOME_CSUM_MISSING;
794 BUG_ON(rec->refs != 1);
795 if (can_free_inode_rec(rec)) {
796 cache = lookup_cache_extent(inode_cache, rec->ino, 1);
797 node = container_of(cache, struct ptr_node, cache);
798 BUG_ON(node->data != rec);
799 remove_cache_extent(inode_cache, &node->cache);
805 static int check_orphan_item(struct btrfs_root *root, u64 ino)
807 struct btrfs_path path;
808 struct btrfs_key key;
811 key.objectid = BTRFS_ORPHAN_OBJECTID;
812 key.type = BTRFS_ORPHAN_ITEM_KEY;
815 btrfs_init_path(&path);
816 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
817 btrfs_release_path(&path);
823 static int process_inode_item(struct extent_buffer *eb,
824 int slot, struct btrfs_key *key,
825 struct shared_node *active_node)
827 struct inode_record *rec;
828 struct btrfs_inode_item *item;
830 rec = active_node->current;
831 BUG_ON(rec->ino != key->objectid || rec->refs > 1);
832 if (rec->found_inode_item) {
833 rec->errors |= I_ERR_DUP_INODE_ITEM;
836 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
837 rec->nlink = btrfs_inode_nlink(eb, item);
838 rec->isize = btrfs_inode_size(eb, item);
839 rec->nbytes = btrfs_inode_nbytes(eb, item);
840 rec->imode = btrfs_inode_mode(eb, item);
841 if (btrfs_inode_flags(eb, item) & BTRFS_INODE_NODATASUM)
843 rec->found_inode_item = 1;
845 rec->errors |= I_ERR_NO_ORPHAN_ITEM;
846 maybe_free_inode_rec(&active_node->inode_cache, rec);
850 static struct inode_backref *get_inode_backref(struct inode_record *rec,
852 int namelen, u64 dir)
854 struct inode_backref *backref;
856 list_for_each_entry(backref, &rec->backrefs, list) {
857 if (rec->ino == BTRFS_MULTIPLE_OBJECTIDS)
859 if (backref->dir != dir || backref->namelen != namelen)
861 if (memcmp(name, backref->name, namelen))
866 backref = malloc(sizeof(*backref) + namelen + 1);
867 memset(backref, 0, sizeof(*backref));
869 backref->namelen = namelen;
870 memcpy(backref->name, name, namelen);
871 backref->name[namelen] = '\0';
872 list_add_tail(&backref->list, &rec->backrefs);
876 static int add_inode_backref(struct cache_tree *inode_cache,
877 u64 ino, u64 dir, u64 index,
878 const char *name, int namelen,
879 int filetype, int itemtype, int errors)
881 struct inode_record *rec;
882 struct inode_backref *backref;
884 rec = get_inode_rec(inode_cache, ino, 1);
885 backref = get_inode_backref(rec, name, namelen, dir);
887 backref->errors |= errors;
888 if (itemtype == BTRFS_DIR_INDEX_KEY) {
889 if (backref->found_dir_index)
890 backref->errors |= REF_ERR_DUP_DIR_INDEX;
891 if (backref->found_inode_ref && backref->index != index)
892 backref->errors |= REF_ERR_INDEX_UNMATCH;
893 if (backref->found_dir_item && backref->filetype != filetype)
894 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
896 backref->index = index;
897 backref->filetype = filetype;
898 backref->found_dir_index = 1;
899 } else if (itemtype == BTRFS_DIR_ITEM_KEY) {
901 if (backref->found_dir_item)
902 backref->errors |= REF_ERR_DUP_DIR_ITEM;
903 if (backref->found_dir_index && backref->filetype != filetype)
904 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
906 backref->filetype = filetype;
907 backref->found_dir_item = 1;
908 } else if ((itemtype == BTRFS_INODE_REF_KEY) ||
909 (itemtype == BTRFS_INODE_EXTREF_KEY)) {
910 if (backref->found_inode_ref)
911 backref->errors |= REF_ERR_DUP_INODE_REF;
912 if (backref->found_dir_index && backref->index != index)
913 backref->errors |= REF_ERR_INDEX_UNMATCH;
915 backref->index = index;
917 backref->ref_type = itemtype;
918 backref->found_inode_ref = 1;
923 maybe_free_inode_rec(inode_cache, rec);
927 static int merge_inode_recs(struct inode_record *src, struct inode_record *dst,
928 struct cache_tree *dst_cache)
930 struct inode_backref *backref;
935 list_for_each_entry(backref, &src->backrefs, list) {
936 if (backref->found_dir_index) {
937 add_inode_backref(dst_cache, dst->ino, backref->dir,
938 backref->index, backref->name,
939 backref->namelen, backref->filetype,
940 BTRFS_DIR_INDEX_KEY, backref->errors);
942 if (backref->found_dir_item) {
944 add_inode_backref(dst_cache, dst->ino,
945 backref->dir, 0, backref->name,
946 backref->namelen, backref->filetype,
947 BTRFS_DIR_ITEM_KEY, backref->errors);
949 if (backref->found_inode_ref) {
950 add_inode_backref(dst_cache, dst->ino,
951 backref->dir, backref->index,
952 backref->name, backref->namelen, 0,
953 backref->ref_type, backref->errors);
957 if (src->found_dir_item)
958 dst->found_dir_item = 1;
959 if (src->found_file_extent)
960 dst->found_file_extent = 1;
961 if (src->found_csum_item)
962 dst->found_csum_item = 1;
963 if (src->some_csum_missing)
964 dst->some_csum_missing = 1;
965 if (first_extent_gap(&dst->holes) > first_extent_gap(&src->holes)) {
966 ret = copy_file_extent_holes(&dst->holes, &src->holes);
971 BUG_ON(src->found_link < dir_count);
972 dst->found_link += src->found_link - dir_count;
973 dst->found_size += src->found_size;
974 if (src->extent_start != (u64)-1) {
975 if (dst->extent_start == (u64)-1) {
976 dst->extent_start = src->extent_start;
977 dst->extent_end = src->extent_end;
979 if (dst->extent_end > src->extent_start)
980 dst->errors |= I_ERR_FILE_EXTENT_OVERLAP;
981 else if (dst->extent_end < src->extent_start) {
982 ret = add_file_extent_hole(&dst->holes,
984 src->extent_start - dst->extent_end);
986 if (dst->extent_end < src->extent_end)
987 dst->extent_end = src->extent_end;
991 dst->errors |= src->errors;
992 if (src->found_inode_item) {
993 if (!dst->found_inode_item) {
994 dst->nlink = src->nlink;
995 dst->isize = src->isize;
996 dst->nbytes = src->nbytes;
997 dst->imode = src->imode;
998 dst->nodatasum = src->nodatasum;
999 dst->found_inode_item = 1;
1001 dst->errors |= I_ERR_DUP_INODE_ITEM;
1009 static int splice_shared_node(struct shared_node *src_node,
1010 struct shared_node *dst_node)
1012 struct cache_extent *cache;
1013 struct ptr_node *node, *ins;
1014 struct cache_tree *src, *dst;
1015 struct inode_record *rec, *conflict;
1016 u64 current_ino = 0;
1020 if (--src_node->refs == 0)
1022 if (src_node->current)
1023 current_ino = src_node->current->ino;
1025 src = &src_node->root_cache;
1026 dst = &dst_node->root_cache;
1028 cache = search_cache_extent(src, 0);
1030 node = container_of(cache, struct ptr_node, cache);
1032 cache = next_cache_extent(cache);
1035 remove_cache_extent(src, &node->cache);
1038 ins = malloc(sizeof(*ins));
1039 ins->cache.start = node->cache.start;
1040 ins->cache.size = node->cache.size;
1044 ret = insert_cache_extent(dst, &ins->cache);
1045 if (ret == -EEXIST) {
1046 conflict = get_inode_rec(dst, rec->ino, 1);
1047 merge_inode_recs(rec, conflict, dst);
1049 conflict->checked = 1;
1050 if (dst_node->current == conflict)
1051 dst_node->current = NULL;
1053 maybe_free_inode_rec(dst, conflict);
1054 free_inode_rec(rec);
1061 if (src == &src_node->root_cache) {
1062 src = &src_node->inode_cache;
1063 dst = &dst_node->inode_cache;
1067 if (current_ino > 0 && (!dst_node->current ||
1068 current_ino > dst_node->current->ino)) {
1069 if (dst_node->current) {
1070 dst_node->current->checked = 1;
1071 maybe_free_inode_rec(dst, dst_node->current);
1073 dst_node->current = get_inode_rec(dst, current_ino, 1);
1078 static void free_inode_ptr(struct cache_extent *cache)
1080 struct ptr_node *node;
1081 struct inode_record *rec;
1083 node = container_of(cache, struct ptr_node, cache);
1085 free_inode_rec(rec);
1089 FREE_EXTENT_CACHE_BASED_TREE(inode_recs, free_inode_ptr);
1091 static struct shared_node *find_shared_node(struct cache_tree *shared,
1094 struct cache_extent *cache;
1095 struct shared_node *node;
1097 cache = lookup_cache_extent(shared, bytenr, 1);
1099 node = container_of(cache, struct shared_node, cache);
1105 static int add_shared_node(struct cache_tree *shared, u64 bytenr, u32 refs)
1108 struct shared_node *node;
1110 node = calloc(1, sizeof(*node));
1111 node->cache.start = bytenr;
1112 node->cache.size = 1;
1113 cache_tree_init(&node->root_cache);
1114 cache_tree_init(&node->inode_cache);
1117 ret = insert_cache_extent(shared, &node->cache);
1122 static int enter_shared_node(struct btrfs_root *root, u64 bytenr, u32 refs,
1123 struct walk_control *wc, int level)
1125 struct shared_node *node;
1126 struct shared_node *dest;
1128 if (level == wc->active_node)
1131 BUG_ON(wc->active_node <= level);
1132 node = find_shared_node(&wc->shared, bytenr);
1134 add_shared_node(&wc->shared, bytenr, refs);
1135 node = find_shared_node(&wc->shared, bytenr);
1136 wc->nodes[level] = node;
1137 wc->active_node = level;
1141 if (wc->root_level == wc->active_node &&
1142 btrfs_root_refs(&root->root_item) == 0) {
1143 if (--node->refs == 0) {
1144 free_inode_recs_tree(&node->root_cache);
1145 free_inode_recs_tree(&node->inode_cache);
1146 remove_cache_extent(&wc->shared, &node->cache);
1152 dest = wc->nodes[wc->active_node];
1153 splice_shared_node(node, dest);
1154 if (node->refs == 0) {
1155 remove_cache_extent(&wc->shared, &node->cache);
1161 static int leave_shared_node(struct btrfs_root *root,
1162 struct walk_control *wc, int level)
1164 struct shared_node *node;
1165 struct shared_node *dest;
1168 if (level == wc->root_level)
1171 for (i = level + 1; i < BTRFS_MAX_LEVEL; i++) {
1175 BUG_ON(i >= BTRFS_MAX_LEVEL);
1177 node = wc->nodes[wc->active_node];
1178 wc->nodes[wc->active_node] = NULL;
1179 wc->active_node = i;
1181 dest = wc->nodes[wc->active_node];
1182 if (wc->active_node < wc->root_level ||
1183 btrfs_root_refs(&root->root_item) > 0) {
1184 BUG_ON(node->refs <= 1);
1185 splice_shared_node(node, dest);
1187 BUG_ON(node->refs < 2);
1196 * 1 - if the root with id child_root_id is a child of root parent_root_id
1197 * 0 - if the root child_root_id isn't a child of the root parent_root_id but
1198 * has other root(s) as parent(s)
1199 * 2 - if the root child_root_id doesn't have any parent roots
1201 static int is_child_root(struct btrfs_root *root, u64 parent_root_id,
1204 struct btrfs_path path;
1205 struct btrfs_key key;
1206 struct extent_buffer *leaf;
1210 btrfs_init_path(&path);
1212 key.objectid = parent_root_id;
1213 key.type = BTRFS_ROOT_REF_KEY;
1214 key.offset = child_root_id;
1215 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path,
1219 btrfs_release_path(&path);
1223 key.objectid = child_root_id;
1224 key.type = BTRFS_ROOT_BACKREF_KEY;
1226 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path,
1232 leaf = path.nodes[0];
1233 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
1234 ret = btrfs_next_leaf(root->fs_info->tree_root, &path);
1237 leaf = path.nodes[0];
1240 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1241 if (key.objectid != child_root_id ||
1242 key.type != BTRFS_ROOT_BACKREF_KEY)
1247 if (key.offset == parent_root_id) {
1248 btrfs_release_path(&path);
1255 btrfs_release_path(&path);
1258 return has_parent ? 0 : 2;
1261 static int process_dir_item(struct btrfs_root *root,
1262 struct extent_buffer *eb,
1263 int slot, struct btrfs_key *key,
1264 struct shared_node *active_node)
1274 struct btrfs_dir_item *di;
1275 struct inode_record *rec;
1276 struct cache_tree *root_cache;
1277 struct cache_tree *inode_cache;
1278 struct btrfs_key location;
1279 char namebuf[BTRFS_NAME_LEN];
1281 root_cache = &active_node->root_cache;
1282 inode_cache = &active_node->inode_cache;
1283 rec = active_node->current;
1284 rec->found_dir_item = 1;
1286 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1287 total = btrfs_item_size_nr(eb, slot);
1288 while (cur < total) {
1290 btrfs_dir_item_key_to_cpu(eb, di, &location);
1291 name_len = btrfs_dir_name_len(eb, di);
1292 data_len = btrfs_dir_data_len(eb, di);
1293 filetype = btrfs_dir_type(eb, di);
1295 rec->found_size += name_len;
1296 if (name_len <= BTRFS_NAME_LEN) {
1300 len = BTRFS_NAME_LEN;
1301 error = REF_ERR_NAME_TOO_LONG;
1303 read_extent_buffer(eb, namebuf, (unsigned long)(di + 1), len);
1305 if (location.type == BTRFS_INODE_ITEM_KEY) {
1306 add_inode_backref(inode_cache, location.objectid,
1307 key->objectid, key->offset, namebuf,
1308 len, filetype, key->type, error);
1309 } else if (location.type == BTRFS_ROOT_ITEM_KEY) {
1310 add_inode_backref(root_cache, location.objectid,
1311 key->objectid, key->offset,
1312 namebuf, len, filetype,
1315 fprintf(stderr, "invalid location in dir item %u\n",
1317 add_inode_backref(inode_cache, BTRFS_MULTIPLE_OBJECTIDS,
1318 key->objectid, key->offset, namebuf,
1319 len, filetype, key->type, error);
1322 len = sizeof(*di) + name_len + data_len;
1323 di = (struct btrfs_dir_item *)((char *)di + len);
1326 if (key->type == BTRFS_DIR_INDEX_KEY && nritems > 1)
1327 rec->errors |= I_ERR_DUP_DIR_INDEX;
1332 static int process_inode_ref(struct extent_buffer *eb,
1333 int slot, struct btrfs_key *key,
1334 struct shared_node *active_node)
1342 struct cache_tree *inode_cache;
1343 struct btrfs_inode_ref *ref;
1344 char namebuf[BTRFS_NAME_LEN];
1346 inode_cache = &active_node->inode_cache;
1348 ref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1349 total = btrfs_item_size_nr(eb, slot);
1350 while (cur < total) {
1351 name_len = btrfs_inode_ref_name_len(eb, ref);
1352 index = btrfs_inode_ref_index(eb, ref);
1353 if (name_len <= BTRFS_NAME_LEN) {
1357 len = BTRFS_NAME_LEN;
1358 error = REF_ERR_NAME_TOO_LONG;
1360 read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
1361 add_inode_backref(inode_cache, key->objectid, key->offset,
1362 index, namebuf, len, 0, key->type, error);
1364 len = sizeof(*ref) + name_len;
1365 ref = (struct btrfs_inode_ref *)((char *)ref + len);
1371 static int process_inode_extref(struct extent_buffer *eb,
1372 int slot, struct btrfs_key *key,
1373 struct shared_node *active_node)
1382 struct cache_tree *inode_cache;
1383 struct btrfs_inode_extref *extref;
1384 char namebuf[BTRFS_NAME_LEN];
1386 inode_cache = &active_node->inode_cache;
1388 extref = btrfs_item_ptr(eb, slot, struct btrfs_inode_extref);
1389 total = btrfs_item_size_nr(eb, slot);
1390 while (cur < total) {
1391 name_len = btrfs_inode_extref_name_len(eb, extref);
1392 index = btrfs_inode_extref_index(eb, extref);
1393 parent = btrfs_inode_extref_parent(eb, extref);
1394 if (name_len <= BTRFS_NAME_LEN) {
1398 len = BTRFS_NAME_LEN;
1399 error = REF_ERR_NAME_TOO_LONG;
1401 read_extent_buffer(eb, namebuf,
1402 (unsigned long)(extref + 1), len);
1403 add_inode_backref(inode_cache, key->objectid, parent,
1404 index, namebuf, len, 0, key->type, error);
1406 len = sizeof(*extref) + name_len;
1407 extref = (struct btrfs_inode_extref *)((char *)extref + len);
1414 static int count_csum_range(struct btrfs_root *root, u64 start,
1415 u64 len, u64 *found)
1417 struct btrfs_key key;
1418 struct btrfs_path path;
1419 struct extent_buffer *leaf;
1424 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
1426 btrfs_init_path(&path);
1428 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1430 key.type = BTRFS_EXTENT_CSUM_KEY;
1432 ret = btrfs_search_slot(NULL, root->fs_info->csum_root,
1436 if (ret > 0 && path.slots[0] > 0) {
1437 leaf = path.nodes[0];
1438 btrfs_item_key_to_cpu(leaf, &key, path.slots[0] - 1);
1439 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
1440 key.type == BTRFS_EXTENT_CSUM_KEY)
1445 leaf = path.nodes[0];
1446 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
1447 ret = btrfs_next_leaf(root->fs_info->csum_root, &path);
1452 leaf = path.nodes[0];
1455 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1456 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
1457 key.type != BTRFS_EXTENT_CSUM_KEY)
1460 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1461 if (key.offset >= start + len)
1464 if (key.offset > start)
1467 size = btrfs_item_size_nr(leaf, path.slots[0]);
1468 csum_end = key.offset + (size / csum_size) * root->sectorsize;
1469 if (csum_end > start) {
1470 size = min(csum_end - start, len);
1479 btrfs_release_path(&path);
1485 static int process_file_extent(struct btrfs_root *root,
1486 struct extent_buffer *eb,
1487 int slot, struct btrfs_key *key,
1488 struct shared_node *active_node)
1490 struct inode_record *rec;
1491 struct btrfs_file_extent_item *fi;
1493 u64 disk_bytenr = 0;
1494 u64 extent_offset = 0;
1495 u64 mask = root->sectorsize - 1;
1499 rec = active_node->current;
1500 BUG_ON(rec->ino != key->objectid || rec->refs > 1);
1501 rec->found_file_extent = 1;
1503 if (rec->extent_start == (u64)-1) {
1504 rec->extent_start = key->offset;
1505 rec->extent_end = key->offset;
1508 if (rec->extent_end > key->offset)
1509 rec->errors |= I_ERR_FILE_EXTENT_OVERLAP;
1510 else if (rec->extent_end < key->offset) {
1511 ret = add_file_extent_hole(&rec->holes, rec->extent_end,
1512 key->offset - rec->extent_end);
1517 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
1518 extent_type = btrfs_file_extent_type(eb, fi);
1520 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1521 num_bytes = btrfs_file_extent_inline_len(eb, slot, fi);
1523 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1524 rec->found_size += num_bytes;
1525 num_bytes = (num_bytes + mask) & ~mask;
1526 } else if (extent_type == BTRFS_FILE_EXTENT_REG ||
1527 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1528 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1529 disk_bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1530 extent_offset = btrfs_file_extent_offset(eb, fi);
1531 if (num_bytes == 0 || (num_bytes & mask))
1532 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1533 if (num_bytes + extent_offset >
1534 btrfs_file_extent_ram_bytes(eb, fi))
1535 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1536 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC &&
1537 (btrfs_file_extent_compression(eb, fi) ||
1538 btrfs_file_extent_encryption(eb, fi) ||
1539 btrfs_file_extent_other_encoding(eb, fi)))
1540 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1541 if (disk_bytenr > 0)
1542 rec->found_size += num_bytes;
1544 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1546 rec->extent_end = key->offset + num_bytes;
1549 * The data reloc tree will copy full extents into its inode and then
1550 * copy the corresponding csums. Because the extent it copied could be
1551 * a preallocated extent that hasn't been written to yet there may be no
1552 * csums to copy, ergo we won't have csums for our file extent. This is
1553 * ok so just don't bother checking csums if the inode belongs to the
1556 if (disk_bytenr > 0 &&
1557 btrfs_header_owner(eb) != BTRFS_DATA_RELOC_TREE_OBJECTID) {
1559 if (btrfs_file_extent_compression(eb, fi))
1560 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1562 disk_bytenr += extent_offset;
1564 ret = count_csum_range(root, disk_bytenr, num_bytes, &found);
1567 if (extent_type == BTRFS_FILE_EXTENT_REG) {
1569 rec->found_csum_item = 1;
1570 if (found < num_bytes)
1571 rec->some_csum_missing = 1;
1572 } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1574 rec->errors |= I_ERR_ODD_CSUM_ITEM;
1580 static int process_one_leaf(struct btrfs_root *root, struct extent_buffer *eb,
1581 struct walk_control *wc)
1583 struct btrfs_key key;
1587 struct cache_tree *inode_cache;
1588 struct shared_node *active_node;
1590 if (wc->root_level == wc->active_node &&
1591 btrfs_root_refs(&root->root_item) == 0)
1594 active_node = wc->nodes[wc->active_node];
1595 inode_cache = &active_node->inode_cache;
1596 nritems = btrfs_header_nritems(eb);
1597 for (i = 0; i < nritems; i++) {
1598 btrfs_item_key_to_cpu(eb, &key, i);
1600 if (key.objectid == BTRFS_FREE_SPACE_OBJECTID)
1602 if (key.type == BTRFS_ORPHAN_ITEM_KEY)
1605 if (active_node->current == NULL ||
1606 active_node->current->ino < key.objectid) {
1607 if (active_node->current) {
1608 active_node->current->checked = 1;
1609 maybe_free_inode_rec(inode_cache,
1610 active_node->current);
1612 active_node->current = get_inode_rec(inode_cache,
1616 case BTRFS_DIR_ITEM_KEY:
1617 case BTRFS_DIR_INDEX_KEY:
1618 ret = process_dir_item(root, eb, i, &key, active_node);
1620 case BTRFS_INODE_REF_KEY:
1621 ret = process_inode_ref(eb, i, &key, active_node);
1623 case BTRFS_INODE_EXTREF_KEY:
1624 ret = process_inode_extref(eb, i, &key, active_node);
1626 case BTRFS_INODE_ITEM_KEY:
1627 ret = process_inode_item(eb, i, &key, active_node);
1629 case BTRFS_EXTENT_DATA_KEY:
1630 ret = process_file_extent(root, eb, i, &key,
1640 static void reada_walk_down(struct btrfs_root *root,
1641 struct extent_buffer *node, int slot)
1650 level = btrfs_header_level(node);
1654 nritems = btrfs_header_nritems(node);
1655 blocksize = btrfs_level_size(root, level - 1);
1656 for (i = slot; i < nritems; i++) {
1657 bytenr = btrfs_node_blockptr(node, i);
1658 ptr_gen = btrfs_node_ptr_generation(node, i);
1659 readahead_tree_block(root, bytenr, blocksize, ptr_gen);
1664 * Check the child node/leaf by the following condition:
1665 * 1. the first item key of the node/leaf should be the same with the one
1667 * 2. block in parent node should match the child node/leaf.
1668 * 3. generation of parent node and child's header should be consistent.
1670 * Or the child node/leaf pointed by the key in parent is not valid.
1672 * We hope to check leaf owner too, but since subvol may share leaves,
1673 * which makes leaf owner check not so strong, key check should be
1674 * sufficient enough for that case.
1676 static int check_child_node(struct btrfs_root *root,
1677 struct extent_buffer *parent, int slot,
1678 struct extent_buffer *child)
1680 struct btrfs_key parent_key;
1681 struct btrfs_key child_key;
1684 btrfs_node_key_to_cpu(parent, &parent_key, slot);
1685 if (btrfs_header_level(child) == 0)
1686 btrfs_item_key_to_cpu(child, &child_key, 0);
1688 btrfs_node_key_to_cpu(child, &child_key, 0);
1690 if (memcmp(&parent_key, &child_key, sizeof(parent_key))) {
1693 "Wrong key of child node/leaf, wanted: (%llu, %u, %llu), have: (%llu, %u, %llu)\n",
1694 parent_key.objectid, parent_key.type, parent_key.offset,
1695 child_key.objectid, child_key.type, child_key.offset);
1697 if (btrfs_header_bytenr(child) != btrfs_node_blockptr(parent, slot)) {
1699 fprintf(stderr, "Wrong block of child node/leaf, wanted: %llu, have: %llu\n",
1700 btrfs_node_blockptr(parent, slot),
1701 btrfs_header_bytenr(child));
1703 if (btrfs_node_ptr_generation(parent, slot) !=
1704 btrfs_header_generation(child)) {
1706 fprintf(stderr, "Wrong generation of child node/leaf, wanted: %llu, have: %llu\n",
1707 btrfs_header_generation(child),
1708 btrfs_node_ptr_generation(parent, slot));
1713 static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
1714 struct walk_control *wc, int *level)
1716 enum btrfs_tree_block_status status;
1719 struct extent_buffer *next;
1720 struct extent_buffer *cur;
1725 WARN_ON(*level < 0);
1726 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1727 ret = btrfs_lookup_extent_info(NULL, root,
1728 path->nodes[*level]->start,
1729 *level, 1, &refs, NULL);
1736 ret = enter_shared_node(root, path->nodes[*level]->start,
1744 while (*level >= 0) {
1745 WARN_ON(*level < 0);
1746 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1747 cur = path->nodes[*level];
1749 if (btrfs_header_level(cur) != *level)
1752 if (path->slots[*level] >= btrfs_header_nritems(cur))
1755 ret = process_one_leaf(root, cur, wc);
1760 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1761 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1762 blocksize = btrfs_level_size(root, *level - 1);
1763 ret = btrfs_lookup_extent_info(NULL, root, bytenr, *level - 1,
1769 ret = enter_shared_node(root, bytenr, refs,
1772 path->slots[*level]++;
1777 next = btrfs_find_tree_block(root, bytenr, blocksize);
1778 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
1779 free_extent_buffer(next);
1780 reada_walk_down(root, cur, path->slots[*level]);
1781 next = read_tree_block(root, bytenr, blocksize,
1783 if (!extent_buffer_uptodate(next)) {
1784 struct btrfs_key node_key;
1786 btrfs_node_key_to_cpu(path->nodes[*level],
1788 path->slots[*level]);
1789 btrfs_add_corrupt_extent_record(root->fs_info,
1791 path->nodes[*level]->start,
1792 root->leafsize, *level);
1798 ret = check_child_node(root, cur, path->slots[*level], next);
1804 if (btrfs_is_leaf(next))
1805 status = btrfs_check_leaf(root, NULL, next);
1807 status = btrfs_check_node(root, NULL, next);
1808 if (status != BTRFS_TREE_BLOCK_CLEAN) {
1809 free_extent_buffer(next);
1814 *level = *level - 1;
1815 free_extent_buffer(path->nodes[*level]);
1816 path->nodes[*level] = next;
1817 path->slots[*level] = 0;
1820 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
1824 static int walk_up_tree(struct btrfs_root *root, struct btrfs_path *path,
1825 struct walk_control *wc, int *level)
1828 struct extent_buffer *leaf;
1830 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1831 leaf = path->nodes[i];
1832 if (path->slots[i] + 1 < btrfs_header_nritems(leaf)) {
1837 free_extent_buffer(path->nodes[*level]);
1838 path->nodes[*level] = NULL;
1839 BUG_ON(*level > wc->active_node);
1840 if (*level == wc->active_node)
1841 leave_shared_node(root, wc, *level);
1848 static int check_root_dir(struct inode_record *rec)
1850 struct inode_backref *backref;
1853 if (!rec->found_inode_item || rec->errors)
1855 if (rec->nlink != 1 || rec->found_link != 0)
1857 if (list_empty(&rec->backrefs))
1859 backref = list_entry(rec->backrefs.next, struct inode_backref, list);
1860 if (!backref->found_inode_ref)
1862 if (backref->index != 0 || backref->namelen != 2 ||
1863 memcmp(backref->name, "..", 2))
1865 if (backref->found_dir_index || backref->found_dir_item)
1872 static int repair_inode_isize(struct btrfs_trans_handle *trans,
1873 struct btrfs_root *root, struct btrfs_path *path,
1874 struct inode_record *rec)
1876 struct btrfs_inode_item *ei;
1877 struct btrfs_key key;
1880 key.objectid = rec->ino;
1881 key.type = BTRFS_INODE_ITEM_KEY;
1882 key.offset = (u64)-1;
1884 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1888 if (!path->slots[0]) {
1895 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1896 if (key.objectid != rec->ino) {
1901 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1902 struct btrfs_inode_item);
1903 btrfs_set_inode_size(path->nodes[0], ei, rec->found_size);
1904 btrfs_mark_buffer_dirty(path->nodes[0]);
1905 rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
1906 printf("reset isize for dir %Lu root %Lu\n", rec->ino,
1907 root->root_key.objectid);
1909 btrfs_release_path(path);
1913 static int repair_inode_orphan_item(struct btrfs_trans_handle *trans,
1914 struct btrfs_root *root,
1915 struct btrfs_path *path,
1916 struct inode_record *rec)
1920 ret = btrfs_add_orphan_item(trans, root, path, rec->ino);
1921 btrfs_release_path(path);
1923 rec->errors &= ~I_ERR_NO_ORPHAN_ITEM;
1927 static int repair_inode_nbytes(struct btrfs_trans_handle *trans,
1928 struct btrfs_root *root,
1929 struct btrfs_path *path,
1930 struct inode_record *rec)
1932 struct btrfs_inode_item *ei;
1933 struct btrfs_key key;
1936 key.objectid = rec->ino;
1937 key.type = BTRFS_INODE_ITEM_KEY;
1940 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1947 /* Since ret == 0, no need to check anything */
1948 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1949 struct btrfs_inode_item);
1950 btrfs_set_inode_nbytes(path->nodes[0], ei, rec->found_size);
1951 btrfs_mark_buffer_dirty(path->nodes[0]);
1952 rec->errors &= ~I_ERR_FILE_NBYTES_WRONG;
1953 printf("reset nbytes for ino %llu root %llu\n",
1954 rec->ino, root->root_key.objectid);
1956 btrfs_release_path(path);
1960 static int add_missing_dir_index(struct btrfs_root *root,
1961 struct cache_tree *inode_cache,
1962 struct inode_record *rec,
1963 struct inode_backref *backref)
1965 struct btrfs_path *path;
1966 struct btrfs_trans_handle *trans;
1967 struct btrfs_dir_item *dir_item;
1968 struct extent_buffer *leaf;
1969 struct btrfs_key key;
1970 struct btrfs_disk_key disk_key;
1971 struct inode_record *dir_rec;
1972 unsigned long name_ptr;
1973 u32 data_size = sizeof(*dir_item) + backref->namelen;
1976 path = btrfs_alloc_path();
1980 trans = btrfs_start_transaction(root, 1);
1981 if (IS_ERR(trans)) {
1982 btrfs_free_path(path);
1983 return PTR_ERR(trans);
1986 fprintf(stderr, "repairing missing dir index item for inode %llu\n",
1987 (unsigned long long)rec->ino);
1988 key.objectid = backref->dir;
1989 key.type = BTRFS_DIR_INDEX_KEY;
1990 key.offset = backref->index;
1992 ret = btrfs_insert_empty_item(trans, root, path, &key, data_size);
1995 leaf = path->nodes[0];
1996 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
1998 disk_key.objectid = cpu_to_le64(rec->ino);
1999 disk_key.type = BTRFS_INODE_ITEM_KEY;
2000 disk_key.offset = 0;
2002 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
2003 btrfs_set_dir_type(leaf, dir_item, imode_to_type(rec->imode));
2004 btrfs_set_dir_data_len(leaf, dir_item, 0);
2005 btrfs_set_dir_name_len(leaf, dir_item, backref->namelen);
2006 name_ptr = (unsigned long)(dir_item + 1);
2007 write_extent_buffer(leaf, backref->name, name_ptr, backref->namelen);
2008 btrfs_mark_buffer_dirty(leaf);
2009 btrfs_free_path(path);
2010 btrfs_commit_transaction(trans, root);
2012 backref->found_dir_index = 1;
2013 dir_rec = get_inode_rec(inode_cache, backref->dir, 0);
2016 dir_rec->found_size += backref->namelen;
2017 if (dir_rec->found_size == dir_rec->isize &&
2018 (dir_rec->errors & I_ERR_DIR_ISIZE_WRONG))
2019 dir_rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
2020 if (dir_rec->found_size != dir_rec->isize)
2021 dir_rec->errors |= I_ERR_DIR_ISIZE_WRONG;
2026 static int delete_dir_index(struct btrfs_root *root,
2027 struct cache_tree *inode_cache,
2028 struct inode_record *rec,
2029 struct inode_backref *backref)
2031 struct btrfs_trans_handle *trans;
2032 struct btrfs_dir_item *di;
2033 struct btrfs_path *path;
2036 path = btrfs_alloc_path();
2040 trans = btrfs_start_transaction(root, 1);
2041 if (IS_ERR(trans)) {
2042 btrfs_free_path(path);
2043 return PTR_ERR(trans);
2047 fprintf(stderr, "Deleting bad dir index [%llu,%u,%llu] root %llu\n",
2048 (unsigned long long)backref->dir,
2049 BTRFS_DIR_INDEX_KEY, (unsigned long long)backref->index,
2050 (unsigned long long)root->objectid);
2052 di = btrfs_lookup_dir_index(trans, root, path, backref->dir,
2053 backref->name, backref->namelen,
2054 backref->index, -1);
2057 btrfs_free_path(path);
2058 btrfs_commit_transaction(trans, root);
2065 ret = btrfs_del_item(trans, root, path);
2067 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2069 btrfs_free_path(path);
2070 btrfs_commit_transaction(trans, root);
2074 static int create_inode_item(struct btrfs_root *root,
2075 struct inode_record *rec,
2076 struct inode_backref *backref, int root_dir)
2078 struct btrfs_trans_handle *trans;
2079 struct btrfs_inode_item inode_item;
2080 time_t now = time(NULL);
2083 trans = btrfs_start_transaction(root, 1);
2084 if (IS_ERR(trans)) {
2085 ret = PTR_ERR(trans);
2089 fprintf(stderr, "root %llu inode %llu recreating inode item, this may "
2090 "be incomplete, please check permissions and content after "
2091 "the fsck completes.\n", (unsigned long long)root->objectid,
2092 (unsigned long long)rec->ino);
2094 memset(&inode_item, 0, sizeof(inode_item));
2095 btrfs_set_stack_inode_generation(&inode_item, trans->transid);
2097 btrfs_set_stack_inode_nlink(&inode_item, 1);
2099 btrfs_set_stack_inode_nlink(&inode_item, rec->found_link);
2100 btrfs_set_stack_inode_nbytes(&inode_item, rec->found_size);
2101 if (rec->found_dir_item) {
2102 if (rec->found_file_extent)
2103 fprintf(stderr, "root %llu inode %llu has both a dir "
2104 "item and extents, unsure if it is a dir or a "
2105 "regular file so setting it as a directory\n",
2106 (unsigned long long)root->objectid,
2107 (unsigned long long)rec->ino);
2108 btrfs_set_stack_inode_mode(&inode_item, S_IFDIR | 0755);
2109 btrfs_set_stack_inode_size(&inode_item, rec->found_size);
2110 } else if (!rec->found_dir_item) {
2111 btrfs_set_stack_inode_size(&inode_item, rec->extent_end);
2112 btrfs_set_stack_inode_mode(&inode_item, S_IFREG | 0755);
2114 btrfs_set_stack_timespec_sec(&inode_item.atime, now);
2115 btrfs_set_stack_timespec_nsec(&inode_item.atime, 0);
2116 btrfs_set_stack_timespec_sec(&inode_item.ctime, now);
2117 btrfs_set_stack_timespec_nsec(&inode_item.ctime, 0);
2118 btrfs_set_stack_timespec_sec(&inode_item.mtime, now);
2119 btrfs_set_stack_timespec_nsec(&inode_item.mtime, 0);
2120 btrfs_set_stack_timespec_sec(&inode_item.otime, 0);
2121 btrfs_set_stack_timespec_nsec(&inode_item.otime, 0);
2123 ret = btrfs_insert_inode(trans, root, rec->ino, &inode_item);
2125 btrfs_commit_transaction(trans, root);
2129 static int repair_inode_backrefs(struct btrfs_root *root,
2130 struct inode_record *rec,
2131 struct cache_tree *inode_cache,
2134 struct inode_backref *tmp, *backref;
2135 u64 root_dirid = btrfs_root_dirid(&root->root_item);
2139 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
2140 if (!delete && rec->ino == root_dirid) {
2141 if (!rec->found_inode_item) {
2142 ret = create_inode_item(root, rec, backref, 1);
2149 /* Index 0 for root dir's are special, don't mess with it */
2150 if (rec->ino == root_dirid && backref->index == 0)
2154 ((backref->found_dir_index && !backref->found_inode_ref) ||
2155 (backref->found_dir_index && backref->found_inode_ref &&
2156 (backref->errors & REF_ERR_INDEX_UNMATCH)))) {
2157 ret = delete_dir_index(root, inode_cache, rec, backref);
2161 list_del(&backref->list);
2165 if (!delete && !backref->found_dir_index &&
2166 backref->found_dir_item && backref->found_inode_ref) {
2167 ret = add_missing_dir_index(root, inode_cache, rec,
2172 if (backref->found_dir_item &&
2173 backref->found_dir_index &&
2174 backref->found_dir_index) {
2175 if (!backref->errors &&
2176 backref->found_inode_ref) {
2177 list_del(&backref->list);
2183 if (!delete && (!backref->found_dir_index &&
2184 !backref->found_dir_item &&
2185 backref->found_inode_ref)) {
2186 struct btrfs_trans_handle *trans;
2187 struct btrfs_key location;
2189 ret = check_dir_conflict(root, backref->name,
2195 * let nlink fixing routine to handle it,
2196 * which can do it better.
2201 location.objectid = rec->ino;
2202 location.type = BTRFS_INODE_ITEM_KEY;
2203 location.offset = 0;
2205 trans = btrfs_start_transaction(root, 1);
2206 if (IS_ERR(trans)) {
2207 ret = PTR_ERR(trans);
2210 fprintf(stderr, "adding missing dir index/item pair "
2212 (unsigned long long)rec->ino);
2213 ret = btrfs_insert_dir_item(trans, root, backref->name,
2215 backref->dir, &location,
2216 imode_to_type(rec->imode),
2219 btrfs_commit_transaction(trans, root);
2223 if (!delete && (backref->found_inode_ref &&
2224 backref->found_dir_index &&
2225 backref->found_dir_item &&
2226 !(backref->errors & REF_ERR_INDEX_UNMATCH) &&
2227 !rec->found_inode_item)) {
2228 ret = create_inode_item(root, rec, backref, 0);
2235 return ret ? ret : repaired;
2239 * To determine the file type for nlink/inode_item repair
2241 * Return 0 if file type is found and BTRFS_FT_* is stored into type.
2242 * Return -ENOENT if file type is not found.
2244 static int find_file_type(struct inode_record *rec, u8 *type)
2246 struct inode_backref *backref;
2248 /* For inode item recovered case */
2249 if (rec->found_inode_item) {
2250 *type = imode_to_type(rec->imode);
2254 list_for_each_entry(backref, &rec->backrefs, list) {
2255 if (backref->found_dir_index || backref->found_dir_item) {
2256 *type = backref->filetype;
2264 * To determine the file name for nlink repair
2266 * Return 0 if file name is found, set name and namelen.
2267 * Return -ENOENT if file name is not found.
2269 static int find_file_name(struct inode_record *rec,
2270 char *name, int *namelen)
2272 struct inode_backref *backref;
2274 list_for_each_entry(backref, &rec->backrefs, list) {
2275 if (backref->found_dir_index || backref->found_dir_item ||
2276 backref->found_inode_ref) {
2277 memcpy(name, backref->name, backref->namelen);
2278 *namelen = backref->namelen;
2285 /* Reset the nlink of the inode to the correct one */
2286 static int reset_nlink(struct btrfs_trans_handle *trans,
2287 struct btrfs_root *root,
2288 struct btrfs_path *path,
2289 struct inode_record *rec)
2291 struct inode_backref *backref;
2292 struct inode_backref *tmp;
2293 struct btrfs_key key;
2294 struct btrfs_inode_item *inode_item;
2297 /* We don't believe this either, reset it and iterate backref */
2298 rec->found_link = 0;
2300 /* Remove all backref including the valid ones */
2301 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
2302 ret = btrfs_unlink(trans, root, rec->ino, backref->dir,
2303 backref->index, backref->name,
2304 backref->namelen, 0);
2308 /* remove invalid backref, so it won't be added back */
2309 if (!(backref->found_dir_index &&
2310 backref->found_dir_item &&
2311 backref->found_inode_ref)) {
2312 list_del(&backref->list);
2319 /* Set nlink to 0 */
2320 key.objectid = rec->ino;
2321 key.type = BTRFS_INODE_ITEM_KEY;
2323 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2330 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2331 struct btrfs_inode_item);
2332 btrfs_set_inode_nlink(path->nodes[0], inode_item, 0);
2333 btrfs_mark_buffer_dirty(path->nodes[0]);
2334 btrfs_release_path(path);
2337 * Add back valid inode_ref/dir_item/dir_index,
2338 * add_link() will handle the nlink inc, so new nlink must be correct
2340 list_for_each_entry(backref, &rec->backrefs, list) {
2341 ret = btrfs_add_link(trans, root, rec->ino, backref->dir,
2342 backref->name, backref->namelen,
2343 backref->ref_type, &backref->index, 1);
2348 btrfs_release_path(path);
2352 static int repair_inode_nlinks(struct btrfs_trans_handle *trans,
2353 struct btrfs_root *root,
2354 struct btrfs_path *path,
2355 struct inode_record *rec)
2357 char *dir_name = "lost+found";
2358 char namebuf[BTRFS_NAME_LEN] = {0};
2363 int name_recovered = 0;
2364 int type_recovered = 0;
2368 * Get file name and type first before these invalid inode ref
2369 * are deleted by remove_all_invalid_backref()
2371 name_recovered = !find_file_name(rec, namebuf, &namelen);
2372 type_recovered = !find_file_type(rec, &type);
2374 if (!name_recovered) {
2375 printf("Can't get file name for inode %llu, using '%llu' as fallback\n",
2376 rec->ino, rec->ino);
2377 namelen = count_digits(rec->ino);
2378 sprintf(namebuf, "%llu", rec->ino);
2381 if (!type_recovered) {
2382 printf("Can't get file type for inode %llu, using FILE as fallback\n",
2384 type = BTRFS_FT_REG_FILE;
2388 ret = reset_nlink(trans, root, path, rec);
2391 "Failed to reset nlink for inode %llu: %s\n",
2392 rec->ino, strerror(-ret));
2396 if (rec->found_link == 0) {
2397 lost_found_ino = root->highest_inode;
2398 if (lost_found_ino >= BTRFS_LAST_FREE_OBJECTID) {
2403 ret = btrfs_mkdir(trans, root, dir_name, strlen(dir_name),
2404 BTRFS_FIRST_FREE_OBJECTID, &lost_found_ino,
2407 fprintf(stderr, "Failed to create '%s' dir: %s\n",
2408 dir_name, strerror(-ret));
2411 ret = btrfs_add_link(trans, root, rec->ino, lost_found_ino,
2412 namebuf, namelen, type, NULL, 1);
2414 * Add ".INO" suffix several times to handle case where
2415 * "FILENAME.INO" is already taken by another file.
2417 while (ret == -EEXIST) {
2419 * Conflicting file name, add ".INO" as suffix * +1 for '.'
2421 if (namelen + count_digits(rec->ino) + 1 >
2426 snprintf(namebuf + namelen, BTRFS_NAME_LEN - namelen,
2428 namelen += count_digits(rec->ino) + 1;
2429 ret = btrfs_add_link(trans, root, rec->ino,
2430 lost_found_ino, namebuf,
2431 namelen, type, NULL, 1);
2435 "Failed to link the inode %llu to %s dir: %s\n",
2436 rec->ino, dir_name, strerror(-ret));
2440 * Just increase the found_link, don't actually add the
2441 * backref. This will make things easier and this inode
2442 * record will be freed after the repair is done.
2443 * So fsck will not report problem about this inode.
2446 printf("Moving file '%.*s' to '%s' dir since it has no valid backref\n",
2447 namelen, namebuf, dir_name);
2449 printf("Fixed the nlink of inode %llu\n", rec->ino);
2452 * Clear the flag anyway, or we will loop forever for the same inode
2453 * as it will not be removed from the bad inode list and the dead loop
2456 rec->errors &= ~I_ERR_LINK_COUNT_WRONG;
2457 btrfs_release_path(path);
2462 * Check if there is any normal(reg or prealloc) file extent for given
2464 * This is used to determine the file type when neither its dir_index/item or
2465 * inode_item exists.
2467 * This will *NOT* report error, if any error happens, just consider it does
2468 * not have any normal file extent.
2470 static int find_normal_file_extent(struct btrfs_root *root, u64 ino)
2472 struct btrfs_path *path;
2473 struct btrfs_key key;
2474 struct btrfs_key found_key;
2475 struct btrfs_file_extent_item *fi;
2479 path = btrfs_alloc_path();
2483 key.type = BTRFS_EXTENT_DATA_KEY;
2486 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2491 if (ret && path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2492 ret = btrfs_next_leaf(root, path);
2499 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2501 if (found_key.objectid != ino ||
2502 found_key.type != BTRFS_EXTENT_DATA_KEY)
2504 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
2505 struct btrfs_file_extent_item);
2506 type = btrfs_file_extent_type(path->nodes[0], fi);
2507 if (type != BTRFS_FILE_EXTENT_INLINE) {
2513 btrfs_free_path(path);
2517 static u32 btrfs_type_to_imode(u8 type)
2519 static u32 imode_by_btrfs_type[] = {
2520 [BTRFS_FT_REG_FILE] = S_IFREG,
2521 [BTRFS_FT_DIR] = S_IFDIR,
2522 [BTRFS_FT_CHRDEV] = S_IFCHR,
2523 [BTRFS_FT_BLKDEV] = S_IFBLK,
2524 [BTRFS_FT_FIFO] = S_IFIFO,
2525 [BTRFS_FT_SOCK] = S_IFSOCK,
2526 [BTRFS_FT_SYMLINK] = S_IFLNK,
2529 return imode_by_btrfs_type[(type)];
2532 static int repair_inode_no_item(struct btrfs_trans_handle *trans,
2533 struct btrfs_root *root,
2534 struct btrfs_path *path,
2535 struct inode_record *rec)
2539 int type_recovered = 0;
2542 printf("Trying to rebuild inode:%llu\n", rec->ino);
2544 type_recovered = !find_file_type(rec, &filetype);
2547 * Try to determine inode type if type not found.
2549 * For found regular file extent, it must be FILE.
2550 * For found dir_item/index, it must be DIR.
2552 * For undetermined one, use FILE as fallback.
2555 * 1. If found backref(inode_index/item is already handled) to it,
2557 * Need new inode-inode ref structure to allow search for that.
2559 if (!type_recovered) {
2560 if (rec->found_file_extent &&
2561 find_normal_file_extent(root, rec->ino)) {
2563 filetype = BTRFS_FT_REG_FILE;
2564 } else if (rec->found_dir_item) {
2566 filetype = BTRFS_FT_DIR;
2567 } else if (!list_empty(&rec->orphan_extents)) {
2569 filetype = BTRFS_FT_REG_FILE;
2571 printf("Can't determint the filetype for inode %llu, assume it is a normal file\n",
2574 filetype = BTRFS_FT_REG_FILE;
2578 ret = btrfs_new_inode(trans, root, rec->ino,
2579 mode | btrfs_type_to_imode(filetype));
2584 * Here inode rebuild is done, we only rebuild the inode item,
2585 * don't repair the nlink(like move to lost+found).
2586 * That is the job of nlink repair.
2588 * We just fill the record and return
2590 rec->found_dir_item = 1;
2591 rec->imode = mode | btrfs_type_to_imode(filetype);
2593 rec->errors &= ~I_ERR_NO_INODE_ITEM;
2594 /* Ensure the inode_nlinks repair function will be called */
2595 rec->errors |= I_ERR_LINK_COUNT_WRONG;
2600 static int repair_inode_orphan_extent(struct btrfs_trans_handle *trans,
2601 struct btrfs_root *root,
2602 struct btrfs_path *path,
2603 struct inode_record *rec)
2605 struct orphan_data_extent *orphan;
2606 struct orphan_data_extent *tmp;
2609 list_for_each_entry_safe(orphan, tmp, &rec->orphan_extents, list) {
2611 * Check for conflicting file extents
2613 * Here we don't know whether the extents is compressed or not,
2614 * so we can only assume it not compressed nor data offset,
2615 * and use its disk_len as extent length.
2617 ret = btrfs_get_extent(NULL, root, path, orphan->objectid,
2618 orphan->offset, orphan->disk_len, 0);
2619 btrfs_release_path(path);
2624 "orphan extent (%llu, %llu) conflicts, delete the orphan\n",
2625 orphan->disk_bytenr, orphan->disk_len);
2626 ret = btrfs_free_extent(trans,
2627 root->fs_info->extent_root,
2628 orphan->disk_bytenr, orphan->disk_len,
2629 0, root->objectid, orphan->objectid,
2634 ret = btrfs_insert_file_extent(trans, root, orphan->objectid,
2635 orphan->offset, orphan->disk_bytenr,
2636 orphan->disk_len, orphan->disk_len);
2640 /* Update file size info */
2641 rec->found_size += orphan->disk_len;
2642 if (rec->found_size == rec->nbytes)
2643 rec->errors &= ~I_ERR_FILE_NBYTES_WRONG;
2645 /* Update the file extent hole info too */
2646 ret = del_file_extent_hole(&rec->holes, orphan->offset,
2650 if (RB_EMPTY_ROOT(&rec->holes))
2651 rec->errors &= ~I_ERR_FILE_EXTENT_DISCOUNT;
2653 list_del(&orphan->list);
2656 rec->errors &= ~I_ERR_FILE_EXTENT_ORPHAN;
2661 static int repair_inode_discount_extent(struct btrfs_trans_handle *trans,
2662 struct btrfs_root *root,
2663 struct btrfs_path *path,
2664 struct inode_record *rec)
2666 struct rb_node *node;
2667 struct file_extent_hole *hole;
2671 node = rb_first(&rec->holes);
2675 hole = rb_entry(node, struct file_extent_hole, node);
2676 ret = btrfs_punch_hole(trans, root, rec->ino,
2677 hole->start, hole->len);
2680 ret = del_file_extent_hole(&rec->holes, hole->start,
2684 if (RB_EMPTY_ROOT(&rec->holes))
2685 rec->errors &= ~I_ERR_FILE_EXTENT_DISCOUNT;
2686 node = rb_first(&rec->holes);
2688 /* special case for a file losing all its file extent */
2690 ret = btrfs_punch_hole(trans, root, rec->ino, 0,
2691 round_up(rec->isize, root->sectorsize));
2695 printf("Fixed discount file extents for inode: %llu in root: %llu\n",
2696 rec->ino, root->objectid);
2701 static int try_repair_inode(struct btrfs_root *root, struct inode_record *rec)
2703 struct btrfs_trans_handle *trans;
2704 struct btrfs_path *path;
2707 if (!(rec->errors & (I_ERR_DIR_ISIZE_WRONG |
2708 I_ERR_NO_ORPHAN_ITEM |
2709 I_ERR_LINK_COUNT_WRONG |
2710 I_ERR_NO_INODE_ITEM |
2711 I_ERR_FILE_EXTENT_ORPHAN |
2712 I_ERR_FILE_EXTENT_DISCOUNT|
2713 I_ERR_FILE_NBYTES_WRONG)))
2716 path = btrfs_alloc_path();
2721 * For nlink repair, it may create a dir and add link, so
2722 * 2 for parent(256)'s dir_index and dir_item
2723 * 2 for lost+found dir's inode_item and inode_ref
2724 * 1 for the new inode_ref of the file
2725 * 2 for lost+found dir's dir_index and dir_item for the file
2727 trans = btrfs_start_transaction(root, 7);
2728 if (IS_ERR(trans)) {
2729 btrfs_free_path(path);
2730 return PTR_ERR(trans);
2733 if (rec->errors & I_ERR_NO_INODE_ITEM)
2734 ret = repair_inode_no_item(trans, root, path, rec);
2735 if (!ret && rec->errors & I_ERR_FILE_EXTENT_ORPHAN)
2736 ret = repair_inode_orphan_extent(trans, root, path, rec);
2737 if (!ret && rec->errors & I_ERR_FILE_EXTENT_DISCOUNT)
2738 ret = repair_inode_discount_extent(trans, root, path, rec);
2739 if (!ret && rec->errors & I_ERR_DIR_ISIZE_WRONG)
2740 ret = repair_inode_isize(trans, root, path, rec);
2741 if (!ret && rec->errors & I_ERR_NO_ORPHAN_ITEM)
2742 ret = repair_inode_orphan_item(trans, root, path, rec);
2743 if (!ret && rec->errors & I_ERR_LINK_COUNT_WRONG)
2744 ret = repair_inode_nlinks(trans, root, path, rec);
2745 if (!ret && rec->errors & I_ERR_FILE_NBYTES_WRONG)
2746 ret = repair_inode_nbytes(trans, root, path, rec);
2747 btrfs_commit_transaction(trans, root);
2748 btrfs_free_path(path);
2752 static int check_inode_recs(struct btrfs_root *root,
2753 struct cache_tree *inode_cache)
2755 struct cache_extent *cache;
2756 struct ptr_node *node;
2757 struct inode_record *rec;
2758 struct inode_backref *backref;
2763 u64 root_dirid = btrfs_root_dirid(&root->root_item);
2765 if (btrfs_root_refs(&root->root_item) == 0) {
2766 if (!cache_tree_empty(inode_cache))
2767 fprintf(stderr, "warning line %d\n", __LINE__);
2772 * We need to record the highest inode number for later 'lost+found'
2774 * We must select a ino not used/refered by any existing inode, or
2775 * 'lost+found' ino may be a missing ino in a corrupted leaf,
2776 * this may cause 'lost+found' dir has wrong nlinks.
2778 cache = last_cache_extent(inode_cache);
2780 node = container_of(cache, struct ptr_node, cache);
2782 if (rec->ino > root->highest_inode)
2783 root->highest_inode = rec->ino;
2787 * We need to repair backrefs first because we could change some of the
2788 * errors in the inode recs.
2790 * We also need to go through and delete invalid backrefs first and then
2791 * add the correct ones second. We do this because we may get EEXIST
2792 * when adding back the correct index because we hadn't yet deleted the
2795 * For example, if we were missing a dir index then the directories
2796 * isize would be wrong, so if we fixed the isize to what we thought it
2797 * would be and then fixed the backref we'd still have a invalid fs, so
2798 * we need to add back the dir index and then check to see if the isize
2803 if (stage == 3 && !err)
2806 cache = search_cache_extent(inode_cache, 0);
2807 while (repair && cache) {
2808 node = container_of(cache, struct ptr_node, cache);
2810 cache = next_cache_extent(cache);
2812 /* Need to free everything up and rescan */
2814 remove_cache_extent(inode_cache, &node->cache);
2816 free_inode_rec(rec);
2820 if (list_empty(&rec->backrefs))
2823 ret = repair_inode_backrefs(root, rec, inode_cache,
2837 rec = get_inode_rec(inode_cache, root_dirid, 0);
2839 ret = check_root_dir(rec);
2841 fprintf(stderr, "root %llu root dir %llu error\n",
2842 (unsigned long long)root->root_key.objectid,
2843 (unsigned long long)root_dirid);
2844 print_inode_error(root, rec);
2849 struct btrfs_trans_handle *trans;
2851 trans = btrfs_start_transaction(root, 1);
2852 if (IS_ERR(trans)) {
2853 err = PTR_ERR(trans);
2858 "root %llu missing its root dir, recreating\n",
2859 (unsigned long long)root->objectid);
2861 ret = btrfs_make_root_dir(trans, root, root_dirid);
2864 btrfs_commit_transaction(trans, root);
2868 fprintf(stderr, "root %llu root dir %llu not found\n",
2869 (unsigned long long)root->root_key.objectid,
2870 (unsigned long long)root_dirid);
2874 cache = search_cache_extent(inode_cache, 0);
2877 node = container_of(cache, struct ptr_node, cache);
2879 remove_cache_extent(inode_cache, &node->cache);
2881 if (rec->ino == root_dirid ||
2882 rec->ino == BTRFS_ORPHAN_OBJECTID) {
2883 free_inode_rec(rec);
2887 if (rec->errors & I_ERR_NO_ORPHAN_ITEM) {
2888 ret = check_orphan_item(root, rec->ino);
2890 rec->errors &= ~I_ERR_NO_ORPHAN_ITEM;
2891 if (can_free_inode_rec(rec)) {
2892 free_inode_rec(rec);
2897 if (!rec->found_inode_item)
2898 rec->errors |= I_ERR_NO_INODE_ITEM;
2899 if (rec->found_link != rec->nlink)
2900 rec->errors |= I_ERR_LINK_COUNT_WRONG;
2902 ret = try_repair_inode(root, rec);
2903 if (ret == 0 && can_free_inode_rec(rec)) {
2904 free_inode_rec(rec);
2910 if (!(repair && ret == 0))
2912 print_inode_error(root, rec);
2913 list_for_each_entry(backref, &rec->backrefs, list) {
2914 if (!backref->found_dir_item)
2915 backref->errors |= REF_ERR_NO_DIR_ITEM;
2916 if (!backref->found_dir_index)
2917 backref->errors |= REF_ERR_NO_DIR_INDEX;
2918 if (!backref->found_inode_ref)
2919 backref->errors |= REF_ERR_NO_INODE_REF;
2920 fprintf(stderr, "\tunresolved ref dir %llu index %llu"
2921 " namelen %u name %s filetype %d errors %x",
2922 (unsigned long long)backref->dir,
2923 (unsigned long long)backref->index,
2924 backref->namelen, backref->name,
2925 backref->filetype, backref->errors);
2926 print_ref_error(backref->errors);
2928 free_inode_rec(rec);
2930 return (error > 0) ? -1 : 0;
2933 static struct root_record *get_root_rec(struct cache_tree *root_cache,
2936 struct cache_extent *cache;
2937 struct root_record *rec = NULL;
2940 cache = lookup_cache_extent(root_cache, objectid, 1);
2942 rec = container_of(cache, struct root_record, cache);
2944 rec = calloc(1, sizeof(*rec));
2945 rec->objectid = objectid;
2946 INIT_LIST_HEAD(&rec->backrefs);
2947 rec->cache.start = objectid;
2948 rec->cache.size = 1;
2950 ret = insert_cache_extent(root_cache, &rec->cache);
2956 static struct root_backref *get_root_backref(struct root_record *rec,
2957 u64 ref_root, u64 dir, u64 index,
2958 const char *name, int namelen)
2960 struct root_backref *backref;
2962 list_for_each_entry(backref, &rec->backrefs, list) {
2963 if (backref->ref_root != ref_root || backref->dir != dir ||
2964 backref->namelen != namelen)
2966 if (memcmp(name, backref->name, namelen))
2971 backref = malloc(sizeof(*backref) + namelen + 1);
2972 memset(backref, 0, sizeof(*backref));
2973 backref->ref_root = ref_root;
2975 backref->index = index;
2976 backref->namelen = namelen;
2977 memcpy(backref->name, name, namelen);
2978 backref->name[namelen] = '\0';
2979 list_add_tail(&backref->list, &rec->backrefs);
2983 static void free_root_record(struct cache_extent *cache)
2985 struct root_record *rec;
2986 struct root_backref *backref;
2988 rec = container_of(cache, struct root_record, cache);
2989 while (!list_empty(&rec->backrefs)) {
2990 backref = list_entry(rec->backrefs.next,
2991 struct root_backref, list);
2992 list_del(&backref->list);
2999 FREE_EXTENT_CACHE_BASED_TREE(root_recs, free_root_record);
3001 static int add_root_backref(struct cache_tree *root_cache,
3002 u64 root_id, u64 ref_root, u64 dir, u64 index,
3003 const char *name, int namelen,
3004 int item_type, int errors)
3006 struct root_record *rec;
3007 struct root_backref *backref;
3009 rec = get_root_rec(root_cache, root_id);
3010 backref = get_root_backref(rec, ref_root, dir, index, name, namelen);
3012 backref->errors |= errors;
3014 if (item_type != BTRFS_DIR_ITEM_KEY) {
3015 if (backref->found_dir_index || backref->found_back_ref ||
3016 backref->found_forward_ref) {
3017 if (backref->index != index)
3018 backref->errors |= REF_ERR_INDEX_UNMATCH;
3020 backref->index = index;
3024 if (item_type == BTRFS_DIR_ITEM_KEY) {
3025 if (backref->found_forward_ref)
3027 backref->found_dir_item = 1;
3028 } else if (item_type == BTRFS_DIR_INDEX_KEY) {
3029 backref->found_dir_index = 1;
3030 } else if (item_type == BTRFS_ROOT_REF_KEY) {
3031 if (backref->found_forward_ref)
3032 backref->errors |= REF_ERR_DUP_ROOT_REF;
3033 else if (backref->found_dir_item)
3035 backref->found_forward_ref = 1;
3036 } else if (item_type == BTRFS_ROOT_BACKREF_KEY) {
3037 if (backref->found_back_ref)
3038 backref->errors |= REF_ERR_DUP_ROOT_BACKREF;
3039 backref->found_back_ref = 1;
3044 if (backref->found_forward_ref && backref->found_dir_item)
3045 backref->reachable = 1;
3049 static int merge_root_recs(struct btrfs_root *root,
3050 struct cache_tree *src_cache,
3051 struct cache_tree *dst_cache)
3053 struct cache_extent *cache;
3054 struct ptr_node *node;
3055 struct inode_record *rec;
3056 struct inode_backref *backref;
3059 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
3060 free_inode_recs_tree(src_cache);
3065 cache = search_cache_extent(src_cache, 0);
3068 node = container_of(cache, struct ptr_node, cache);
3070 remove_cache_extent(src_cache, &node->cache);
3073 ret = is_child_root(root, root->objectid, rec->ino);
3079 list_for_each_entry(backref, &rec->backrefs, list) {
3080 BUG_ON(backref->found_inode_ref);
3081 if (backref->found_dir_item)
3082 add_root_backref(dst_cache, rec->ino,
3083 root->root_key.objectid, backref->dir,
3084 backref->index, backref->name,
3085 backref->namelen, BTRFS_DIR_ITEM_KEY,
3087 if (backref->found_dir_index)
3088 add_root_backref(dst_cache, rec->ino,
3089 root->root_key.objectid, backref->dir,
3090 backref->index, backref->name,
3091 backref->namelen, BTRFS_DIR_INDEX_KEY,
3095 free_inode_rec(rec);
3102 static int check_root_refs(struct btrfs_root *root,
3103 struct cache_tree *root_cache)
3105 struct root_record *rec;
3106 struct root_record *ref_root;
3107 struct root_backref *backref;
3108 struct cache_extent *cache;
3114 rec = get_root_rec(root_cache, BTRFS_FS_TREE_OBJECTID);
3117 /* fixme: this can not detect circular references */
3120 cache = search_cache_extent(root_cache, 0);
3124 rec = container_of(cache, struct root_record, cache);
3125 cache = next_cache_extent(cache);
3127 if (rec->found_ref == 0)
3130 list_for_each_entry(backref, &rec->backrefs, list) {
3131 if (!backref->reachable)
3134 ref_root = get_root_rec(root_cache,
3136 if (ref_root->found_ref > 0)
3139 backref->reachable = 0;
3141 if (rec->found_ref == 0)
3147 cache = search_cache_extent(root_cache, 0);
3151 rec = container_of(cache, struct root_record, cache);
3152 cache = next_cache_extent(cache);
3154 if (rec->found_ref == 0 &&
3155 rec->objectid >= BTRFS_FIRST_FREE_OBJECTID &&
3156 rec->objectid <= BTRFS_LAST_FREE_OBJECTID) {
3157 ret = check_orphan_item(root->fs_info->tree_root,
3163 * If we don't have a root item then we likely just have
3164 * a dir item in a snapshot for this root but no actual
3165 * ref key or anything so it's meaningless.
3167 if (!rec->found_root_item)
3170 fprintf(stderr, "fs tree %llu not referenced\n",
3171 (unsigned long long)rec->objectid);
3175 if (rec->found_ref > 0 && !rec->found_root_item)
3177 list_for_each_entry(backref, &rec->backrefs, list) {
3178 if (!backref->found_dir_item)
3179 backref->errors |= REF_ERR_NO_DIR_ITEM;
3180 if (!backref->found_dir_index)
3181 backref->errors |= REF_ERR_NO_DIR_INDEX;
3182 if (!backref->found_back_ref)
3183 backref->errors |= REF_ERR_NO_ROOT_BACKREF;
3184 if (!backref->found_forward_ref)
3185 backref->errors |= REF_ERR_NO_ROOT_REF;
3186 if (backref->reachable && backref->errors)
3193 fprintf(stderr, "fs tree %llu refs %u %s\n",
3194 (unsigned long long)rec->objectid, rec->found_ref,
3195 rec->found_root_item ? "" : "not found");
3197 list_for_each_entry(backref, &rec->backrefs, list) {
3198 if (!backref->reachable)
3200 if (!backref->errors && rec->found_root_item)
3202 fprintf(stderr, "\tunresolved ref root %llu dir %llu"
3203 " index %llu namelen %u name %s errors %x\n",
3204 (unsigned long long)backref->ref_root,
3205 (unsigned long long)backref->dir,
3206 (unsigned long long)backref->index,
3207 backref->namelen, backref->name,
3209 print_ref_error(backref->errors);
3212 return errors > 0 ? 1 : 0;
3215 static int process_root_ref(struct extent_buffer *eb, int slot,
3216 struct btrfs_key *key,
3217 struct cache_tree *root_cache)
3223 struct btrfs_root_ref *ref;
3224 char namebuf[BTRFS_NAME_LEN];
3227 ref = btrfs_item_ptr(eb, slot, struct btrfs_root_ref);
3229 dirid = btrfs_root_ref_dirid(eb, ref);
3230 index = btrfs_root_ref_sequence(eb, ref);
3231 name_len = btrfs_root_ref_name_len(eb, ref);
3233 if (name_len <= BTRFS_NAME_LEN) {
3237 len = BTRFS_NAME_LEN;
3238 error = REF_ERR_NAME_TOO_LONG;
3240 read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
3242 if (key->type == BTRFS_ROOT_REF_KEY) {
3243 add_root_backref(root_cache, key->offset, key->objectid, dirid,
3244 index, namebuf, len, key->type, error);
3246 add_root_backref(root_cache, key->objectid, key->offset, dirid,
3247 index, namebuf, len, key->type, error);
3252 static void free_corrupt_block(struct cache_extent *cache)
3254 struct btrfs_corrupt_block *corrupt;
3256 corrupt = container_of(cache, struct btrfs_corrupt_block, cache);
3260 FREE_EXTENT_CACHE_BASED_TREE(corrupt_blocks, free_corrupt_block);
3263 * Repair the btree of the given root.
3265 * The fix is to remove the node key in corrupt_blocks cache_tree.
3266 * and rebalance the tree.
3267 * After the fix, the btree should be writeable.
3269 static int repair_btree(struct btrfs_root *root,
3270 struct cache_tree *corrupt_blocks)
3272 struct btrfs_trans_handle *trans;
3273 struct btrfs_path *path;
3274 struct btrfs_corrupt_block *corrupt;
3275 struct cache_extent *cache;
3276 struct btrfs_key key;
3281 if (cache_tree_empty(corrupt_blocks))
3284 path = btrfs_alloc_path();
3288 trans = btrfs_start_transaction(root, 1);
3289 if (IS_ERR(trans)) {
3290 ret = PTR_ERR(trans);
3291 fprintf(stderr, "Error starting transaction: %s\n",
3295 cache = first_cache_extent(corrupt_blocks);
3297 corrupt = container_of(cache, struct btrfs_corrupt_block,
3299 level = corrupt->level;
3300 path->lowest_level = level;
3301 key.objectid = corrupt->key.objectid;
3302 key.type = corrupt->key.type;
3303 key.offset = corrupt->key.offset;
3306 * Here we don't want to do any tree balance, since it may
3307 * cause a balance with corrupted brother leaf/node,
3308 * so ins_len set to 0 here.
3309 * Balance will be done after all corrupt node/leaf is deleted.
3311 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3314 offset = btrfs_node_blockptr(path->nodes[level],
3315 path->slots[level]);
3317 /* Remove the ptr */
3318 ret = btrfs_del_ptr(trans, root, path, level,
3319 path->slots[level]);
3323 * Remove the corresponding extent
3324 * return value is not concerned.
3326 btrfs_release_path(path);
3327 ret = btrfs_free_extent(trans, root, offset, root->nodesize,
3328 0, root->root_key.objectid,
3330 cache = next_cache_extent(cache);
3333 /* Balance the btree using btrfs_search_slot() */
3334 cache = first_cache_extent(corrupt_blocks);
3336 corrupt = container_of(cache, struct btrfs_corrupt_block,
3338 memcpy(&key, &corrupt->key, sizeof(key));
3339 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3342 /* return will always >0 since it won't find the item */
3344 btrfs_release_path(path);
3345 cache = next_cache_extent(cache);
3348 btrfs_commit_transaction(trans, root);
3350 btrfs_free_path(path);
3354 static int check_fs_root(struct btrfs_root *root,
3355 struct cache_tree *root_cache,
3356 struct walk_control *wc)
3362 struct btrfs_path path;
3363 struct shared_node root_node;
3364 struct root_record *rec;
3365 struct btrfs_root_item *root_item = &root->root_item;
3366 struct cache_tree corrupt_blocks;
3367 struct orphan_data_extent *orphan;
3368 struct orphan_data_extent *tmp;
3369 enum btrfs_tree_block_status status;
3372 * Reuse the corrupt_block cache tree to record corrupted tree block
3374 * Unlike the usage in extent tree check, here we do it in a per
3375 * fs/subvol tree base.
3377 cache_tree_init(&corrupt_blocks);
3378 root->fs_info->corrupt_blocks = &corrupt_blocks;
3380 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
3381 rec = get_root_rec(root_cache, root->root_key.objectid);
3382 if (btrfs_root_refs(root_item) > 0)
3383 rec->found_root_item = 1;
3386 btrfs_init_path(&path);
3387 memset(&root_node, 0, sizeof(root_node));
3388 cache_tree_init(&root_node.root_cache);
3389 cache_tree_init(&root_node.inode_cache);
3391 /* Move the orphan extent record to corresponding inode_record */
3392 list_for_each_entry_safe(orphan, tmp,
3393 &root->orphan_data_extents, list) {
3394 struct inode_record *inode;
3396 inode = get_inode_rec(&root_node.inode_cache, orphan->objectid,
3398 inode->errors |= I_ERR_FILE_EXTENT_ORPHAN;
3399 list_move(&orphan->list, &inode->orphan_extents);
3402 level = btrfs_header_level(root->node);
3403 memset(wc->nodes, 0, sizeof(wc->nodes));
3404 wc->nodes[level] = &root_node;
3405 wc->active_node = level;
3406 wc->root_level = level;
3408 /* We may not have checked the root block, lets do that now */
3409 if (btrfs_is_leaf(root->node))
3410 status = btrfs_check_leaf(root, NULL, root->node);
3412 status = btrfs_check_node(root, NULL, root->node);
3413 if (status != BTRFS_TREE_BLOCK_CLEAN)
3416 if (btrfs_root_refs(root_item) > 0 ||
3417 btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3418 path.nodes[level] = root->node;
3419 extent_buffer_get(root->node);
3420 path.slots[level] = 0;
3422 struct btrfs_key key;
3423 struct btrfs_disk_key found_key;
3425 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3426 level = root_item->drop_level;
3427 path.lowest_level = level;
3428 wret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
3431 btrfs_node_key(path.nodes[level], &found_key,
3433 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3434 sizeof(found_key)));
3438 wret = walk_down_tree(root, &path, wc, &level);
3444 wret = walk_up_tree(root, &path, wc, &level);
3451 btrfs_release_path(&path);
3453 if (!cache_tree_empty(&corrupt_blocks)) {
3454 struct cache_extent *cache;
3455 struct btrfs_corrupt_block *corrupt;
3457 printf("The following tree block(s) is corrupted in tree %llu:\n",
3458 root->root_key.objectid);
3459 cache = first_cache_extent(&corrupt_blocks);
3461 corrupt = container_of(cache,
3462 struct btrfs_corrupt_block,
3464 printf("\ttree block bytenr: %llu, level: %d, node key: (%llu, %u, %llu)\n",
3465 cache->start, corrupt->level,
3466 corrupt->key.objectid, corrupt->key.type,
3467 corrupt->key.offset);
3468 cache = next_cache_extent(cache);
3471 printf("Try to repair the btree for root %llu\n",
3472 root->root_key.objectid);
3473 ret = repair_btree(root, &corrupt_blocks);
3475 fprintf(stderr, "Failed to repair btree: %s\n",
3478 printf("Btree for root %llu is fixed\n",
3479 root->root_key.objectid);
3483 err = merge_root_recs(root, &root_node.root_cache, root_cache);
3487 if (root_node.current) {
3488 root_node.current->checked = 1;
3489 maybe_free_inode_rec(&root_node.inode_cache,
3493 err = check_inode_recs(root, &root_node.inode_cache);
3497 free_corrupt_blocks_tree(&corrupt_blocks);
3498 root->fs_info->corrupt_blocks = NULL;
3499 free_orphan_data_extents(&root->orphan_data_extents);
3503 static int fs_root_objectid(u64 objectid)
3505 if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
3506 objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3508 return is_fstree(objectid);
3511 static int check_fs_roots(struct btrfs_root *root,
3512 struct cache_tree *root_cache)
3514 struct btrfs_path path;
3515 struct btrfs_key key;
3516 struct walk_control wc;
3517 struct extent_buffer *leaf, *tree_node;
3518 struct btrfs_root *tmp_root;
3519 struct btrfs_root *tree_root = root->fs_info->tree_root;
3524 * Just in case we made any changes to the extent tree that weren't
3525 * reflected into the free space cache yet.
3528 reset_cached_block_groups(root->fs_info);
3529 memset(&wc, 0, sizeof(wc));
3530 cache_tree_init(&wc.shared);
3531 btrfs_init_path(&path);
3536 key.type = BTRFS_ROOT_ITEM_KEY;
3537 ret = btrfs_search_slot(NULL, tree_root, &key, &path, 0, 0);
3542 tree_node = tree_root->node;
3544 if (tree_node != tree_root->node) {
3545 free_root_recs_tree(root_cache);
3546 btrfs_release_path(&path);
3549 leaf = path.nodes[0];
3550 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
3551 ret = btrfs_next_leaf(tree_root, &path);
3557 leaf = path.nodes[0];
3559 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
3560 if (key.type == BTRFS_ROOT_ITEM_KEY &&
3561 fs_root_objectid(key.objectid)) {
3562 if (key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
3563 tmp_root = btrfs_read_fs_root_no_cache(
3564 root->fs_info, &key);
3566 key.offset = (u64)-1;
3567 tmp_root = btrfs_read_fs_root(
3568 root->fs_info, &key);
3570 if (IS_ERR(tmp_root)) {
3574 ret = check_fs_root(tmp_root, root_cache, &wc);
3575 if (ret == -EAGAIN) {
3576 free_root_recs_tree(root_cache);
3577 btrfs_release_path(&path);
3582 if (key.objectid == BTRFS_TREE_RELOC_OBJECTID)
3583 btrfs_free_fs_root(tmp_root);
3584 } else if (key.type == BTRFS_ROOT_REF_KEY ||
3585 key.type == BTRFS_ROOT_BACKREF_KEY) {
3586 process_root_ref(leaf, path.slots[0], &key,
3593 btrfs_release_path(&path);
3595 free_extent_cache_tree(&wc.shared);
3596 if (!cache_tree_empty(&wc.shared))
3597 fprintf(stderr, "warning line %d\n", __LINE__);
3602 static int all_backpointers_checked(struct extent_record *rec, int print_errs)
3604 struct list_head *cur = rec->backrefs.next;
3605 struct extent_backref *back;
3606 struct tree_backref *tback;
3607 struct data_backref *dback;
3611 while(cur != &rec->backrefs) {
3612 back = list_entry(cur, struct extent_backref, list);
3614 if (!back->found_extent_tree) {
3618 if (back->is_data) {
3619 dback = (struct data_backref *)back;
3620 fprintf(stderr, "Backref %llu %s %llu"
3621 " owner %llu offset %llu num_refs %lu"
3622 " not found in extent tree\n",
3623 (unsigned long long)rec->start,
3624 back->full_backref ?
3626 back->full_backref ?
3627 (unsigned long long)dback->parent:
3628 (unsigned long long)dback->root,
3629 (unsigned long long)dback->owner,
3630 (unsigned long long)dback->offset,
3631 (unsigned long)dback->num_refs);
3633 tback = (struct tree_backref *)back;
3634 fprintf(stderr, "Backref %llu parent %llu"
3635 " root %llu not found in extent tree\n",
3636 (unsigned long long)rec->start,
3637 (unsigned long long)tback->parent,
3638 (unsigned long long)tback->root);
3641 if (!back->is_data && !back->found_ref) {
3645 tback = (struct tree_backref *)back;
3646 fprintf(stderr, "Backref %llu %s %llu not referenced back %p\n",
3647 (unsigned long long)rec->start,
3648 back->full_backref ? "parent" : "root",
3649 back->full_backref ?
3650 (unsigned long long)tback->parent :
3651 (unsigned long long)tback->root, back);
3653 if (back->is_data) {
3654 dback = (struct data_backref *)back;
3655 if (dback->found_ref != dback->num_refs) {
3659 fprintf(stderr, "Incorrect local backref count"
3660 " on %llu %s %llu owner %llu"
3661 " offset %llu found %u wanted %u back %p\n",
3662 (unsigned long long)rec->start,
3663 back->full_backref ?
3665 back->full_backref ?
3666 (unsigned long long)dback->parent:
3667 (unsigned long long)dback->root,
3668 (unsigned long long)dback->owner,
3669 (unsigned long long)dback->offset,
3670 dback->found_ref, dback->num_refs, back);
3672 if (dback->disk_bytenr != rec->start) {
3676 fprintf(stderr, "Backref disk bytenr does not"
3677 " match extent record, bytenr=%llu, "
3678 "ref bytenr=%llu\n",
3679 (unsigned long long)rec->start,
3680 (unsigned long long)dback->disk_bytenr);
3683 if (dback->bytes != rec->nr) {
3687 fprintf(stderr, "Backref bytes do not match "
3688 "extent backref, bytenr=%llu, ref "
3689 "bytes=%llu, backref bytes=%llu\n",
3690 (unsigned long long)rec->start,
3691 (unsigned long long)rec->nr,
3692 (unsigned long long)dback->bytes);
3695 if (!back->is_data) {
3698 dback = (struct data_backref *)back;
3699 found += dback->found_ref;
3702 if (found != rec->refs) {
3706 fprintf(stderr, "Incorrect global backref count "
3707 "on %llu found %llu wanted %llu\n",
3708 (unsigned long long)rec->start,
3709 (unsigned long long)found,
3710 (unsigned long long)rec->refs);
3716 static int free_all_extent_backrefs(struct extent_record *rec)
3718 struct extent_backref *back;
3719 struct list_head *cur;
3720 while (!list_empty(&rec->backrefs)) {
3721 cur = rec->backrefs.next;
3722 back = list_entry(cur, struct extent_backref, list);
3729 static void free_extent_record_cache(struct btrfs_fs_info *fs_info,
3730 struct cache_tree *extent_cache)
3732 struct cache_extent *cache;
3733 struct extent_record *rec;
3736 cache = first_cache_extent(extent_cache);
3739 rec = container_of(cache, struct extent_record, cache);
3740 remove_cache_extent(extent_cache, cache);
3741 free_all_extent_backrefs(rec);
3746 static int maybe_free_extent_rec(struct cache_tree *extent_cache,
3747 struct extent_record *rec)
3749 if (rec->content_checked && rec->owner_ref_checked &&
3750 rec->extent_item_refs == rec->refs && rec->refs > 0 &&
3751 rec->num_duplicates == 0 && !all_backpointers_checked(rec, 0) &&
3752 !rec->bad_full_backref && !rec->crossing_stripes) {
3753 remove_cache_extent(extent_cache, &rec->cache);
3754 free_all_extent_backrefs(rec);
3755 list_del_init(&rec->list);
3761 static int check_owner_ref(struct btrfs_root *root,
3762 struct extent_record *rec,
3763 struct extent_buffer *buf)
3765 struct extent_backref *node;
3766 struct tree_backref *back;
3767 struct btrfs_root *ref_root;
3768 struct btrfs_key key;
3769 struct btrfs_path path;
3770 struct extent_buffer *parent;
3775 list_for_each_entry(node, &rec->backrefs, list) {
3778 if (!node->found_ref)
3780 if (node->full_backref)
3782 back = (struct tree_backref *)node;
3783 if (btrfs_header_owner(buf) == back->root)
3786 BUG_ON(rec->is_root);
3788 /* try to find the block by search corresponding fs tree */
3789 key.objectid = btrfs_header_owner(buf);
3790 key.type = BTRFS_ROOT_ITEM_KEY;
3791 key.offset = (u64)-1;
3793 ref_root = btrfs_read_fs_root(root->fs_info, &key);
3794 if (IS_ERR(ref_root))
3797 level = btrfs_header_level(buf);
3799 btrfs_item_key_to_cpu(buf, &key, 0);
3801 btrfs_node_key_to_cpu(buf, &key, 0);
3803 btrfs_init_path(&path);
3804 path.lowest_level = level + 1;
3805 ret = btrfs_search_slot(NULL, ref_root, &key, &path, 0, 0);
3809 parent = path.nodes[level + 1];
3810 if (parent && buf->start == btrfs_node_blockptr(parent,
3811 path.slots[level + 1]))
3814 btrfs_release_path(&path);
3815 return found ? 0 : 1;
3818 static int is_extent_tree_record(struct extent_record *rec)
3820 struct list_head *cur = rec->backrefs.next;
3821 struct extent_backref *node;
3822 struct tree_backref *back;
3825 while(cur != &rec->backrefs) {
3826 node = list_entry(cur, struct extent_backref, list);
3830 back = (struct tree_backref *)node;
3831 if (node->full_backref)
3833 if (back->root == BTRFS_EXTENT_TREE_OBJECTID)
3840 static int record_bad_block_io(struct btrfs_fs_info *info,
3841 struct cache_tree *extent_cache,
3844 struct extent_record *rec;
3845 struct cache_extent *cache;
3846 struct btrfs_key key;
3848 cache = lookup_cache_extent(extent_cache, start, len);
3852 rec = container_of(cache, struct extent_record, cache);
3853 if (!is_extent_tree_record(rec))
3856 btrfs_disk_key_to_cpu(&key, &rec->parent_key);
3857 return btrfs_add_corrupt_extent_record(info, &key, start, len, 0);
3860 static int swap_values(struct btrfs_root *root, struct btrfs_path *path,
3861 struct extent_buffer *buf, int slot)
3863 if (btrfs_header_level(buf)) {
3864 struct btrfs_key_ptr ptr1, ptr2;
3866 read_extent_buffer(buf, &ptr1, btrfs_node_key_ptr_offset(slot),
3867 sizeof(struct btrfs_key_ptr));
3868 read_extent_buffer(buf, &ptr2,
3869 btrfs_node_key_ptr_offset(slot + 1),
3870 sizeof(struct btrfs_key_ptr));
3871 write_extent_buffer(buf, &ptr1,
3872 btrfs_node_key_ptr_offset(slot + 1),
3873 sizeof(struct btrfs_key_ptr));
3874 write_extent_buffer(buf, &ptr2,
3875 btrfs_node_key_ptr_offset(slot),
3876 sizeof(struct btrfs_key_ptr));
3878 struct btrfs_disk_key key;
3879 btrfs_node_key(buf, &key, 0);
3880 btrfs_fixup_low_keys(root, path, &key,
3881 btrfs_header_level(buf) + 1);
3884 struct btrfs_item *item1, *item2;
3885 struct btrfs_key k1, k2;
3886 char *item1_data, *item2_data;
3887 u32 item1_offset, item2_offset, item1_size, item2_size;
3889 item1 = btrfs_item_nr(slot);
3890 item2 = btrfs_item_nr(slot + 1);
3891 btrfs_item_key_to_cpu(buf, &k1, slot);
3892 btrfs_item_key_to_cpu(buf, &k2, slot + 1);
3893 item1_offset = btrfs_item_offset(buf, item1);
3894 item2_offset = btrfs_item_offset(buf, item2);
3895 item1_size = btrfs_item_size(buf, item1);
3896 item2_size = btrfs_item_size(buf, item2);
3898 item1_data = malloc(item1_size);
3901 item2_data = malloc(item2_size);
3907 read_extent_buffer(buf, item1_data, item1_offset, item1_size);
3908 read_extent_buffer(buf, item2_data, item2_offset, item2_size);
3910 write_extent_buffer(buf, item1_data, item2_offset, item2_size);
3911 write_extent_buffer(buf, item2_data, item1_offset, item1_size);
3915 btrfs_set_item_offset(buf, item1, item2_offset);
3916 btrfs_set_item_offset(buf, item2, item1_offset);
3917 btrfs_set_item_size(buf, item1, item2_size);
3918 btrfs_set_item_size(buf, item2, item1_size);
3920 path->slots[0] = slot;
3921 btrfs_set_item_key_unsafe(root, path, &k2);
3922 path->slots[0] = slot + 1;
3923 btrfs_set_item_key_unsafe(root, path, &k1);
3928 static int fix_key_order(struct btrfs_trans_handle *trans,
3929 struct btrfs_root *root,
3930 struct btrfs_path *path)
3932 struct extent_buffer *buf;
3933 struct btrfs_key k1, k2;
3935 int level = path->lowest_level;
3938 buf = path->nodes[level];
3939 for (i = 0; i < btrfs_header_nritems(buf) - 1; i++) {
3941 btrfs_node_key_to_cpu(buf, &k1, i);
3942 btrfs_node_key_to_cpu(buf, &k2, i + 1);
3944 btrfs_item_key_to_cpu(buf, &k1, i);
3945 btrfs_item_key_to_cpu(buf, &k2, i + 1);
3947 if (btrfs_comp_cpu_keys(&k1, &k2) < 0)
3949 ret = swap_values(root, path, buf, i);
3952 btrfs_mark_buffer_dirty(buf);
3958 static int delete_bogus_item(struct btrfs_trans_handle *trans,
3959 struct btrfs_root *root,
3960 struct btrfs_path *path,
3961 struct extent_buffer *buf, int slot)
3963 struct btrfs_key key;
3964 int nritems = btrfs_header_nritems(buf);
3966 btrfs_item_key_to_cpu(buf, &key, slot);
3968 /* These are all the keys we can deal with missing. */
3969 if (key.type != BTRFS_DIR_INDEX_KEY &&
3970 key.type != BTRFS_EXTENT_ITEM_KEY &&
3971 key.type != BTRFS_METADATA_ITEM_KEY &&
3972 key.type != BTRFS_TREE_BLOCK_REF_KEY &&
3973 key.type != BTRFS_EXTENT_DATA_REF_KEY)
3976 printf("Deleting bogus item [%llu,%u,%llu] at slot %d on block %llu\n",
3977 (unsigned long long)key.objectid, key.type,
3978 (unsigned long long)key.offset, slot, buf->start);
3979 memmove_extent_buffer(buf, btrfs_item_nr_offset(slot),
3980 btrfs_item_nr_offset(slot + 1),
3981 sizeof(struct btrfs_item) *
3982 (nritems - slot - 1));
3983 btrfs_set_header_nritems(buf, nritems - 1);
3985 struct btrfs_disk_key disk_key;
3987 btrfs_item_key(buf, &disk_key, 0);
3988 btrfs_fixup_low_keys(root, path, &disk_key, 1);
3990 btrfs_mark_buffer_dirty(buf);
3994 static int fix_item_offset(struct btrfs_trans_handle *trans,
3995 struct btrfs_root *root,
3996 struct btrfs_path *path)
3998 struct extent_buffer *buf;
4002 /* We should only get this for leaves */
4003 BUG_ON(path->lowest_level);
4004 buf = path->nodes[0];
4006 for (i = 0; i < btrfs_header_nritems(buf); i++) {
4007 unsigned int shift = 0, offset;
4009 if (i == 0 && btrfs_item_end_nr(buf, i) !=
4010 BTRFS_LEAF_DATA_SIZE(root)) {
4011 if (btrfs_item_end_nr(buf, i) >
4012 BTRFS_LEAF_DATA_SIZE(root)) {
4013 ret = delete_bogus_item(trans, root, path,
4017 fprintf(stderr, "item is off the end of the "
4018 "leaf, can't fix\n");
4022 shift = BTRFS_LEAF_DATA_SIZE(root) -
4023 btrfs_item_end_nr(buf, i);
4024 } else if (i > 0 && btrfs_item_end_nr(buf, i) !=
4025 btrfs_item_offset_nr(buf, i - 1)) {
4026 if (btrfs_item_end_nr(buf, i) >
4027 btrfs_item_offset_nr(buf, i - 1)) {
4028 ret = delete_bogus_item(trans, root, path,
4032 fprintf(stderr, "items overlap, can't fix\n");
4036 shift = btrfs_item_offset_nr(buf, i - 1) -
4037 btrfs_item_end_nr(buf, i);
4042 printf("Shifting item nr %d by %u bytes in block %llu\n",
4043 i, shift, (unsigned long long)buf->start);
4044 offset = btrfs_item_offset_nr(buf, i);
4045 memmove_extent_buffer(buf,
4046 btrfs_leaf_data(buf) + offset + shift,
4047 btrfs_leaf_data(buf) + offset,
4048 btrfs_item_size_nr(buf, i));
4049 btrfs_set_item_offset(buf, btrfs_item_nr(i),
4051 btrfs_mark_buffer_dirty(buf);
4055 * We may have moved things, in which case we want to exit so we don't
4056 * write those changes out. Once we have proper abort functionality in
4057 * progs this can be changed to something nicer.
4064 * Attempt to fix basic block failures. If we can't fix it for whatever reason
4065 * then just return -EIO.
4067 static int try_to_fix_bad_block(struct btrfs_root *root,
4068 struct extent_buffer *buf,
4069 enum btrfs_tree_block_status status)
4071 struct btrfs_trans_handle *trans;
4072 struct ulist *roots;
4073 struct ulist_node *node;
4074 struct btrfs_root *search_root;
4075 struct btrfs_path *path;
4076 struct ulist_iterator iter;
4077 struct btrfs_key root_key, key;
4080 if (status != BTRFS_TREE_BLOCK_BAD_KEY_ORDER &&
4081 status != BTRFS_TREE_BLOCK_INVALID_OFFSETS)
4084 path = btrfs_alloc_path();
4088 ret = btrfs_find_all_roots(NULL, root->fs_info, buf->start,
4091 btrfs_free_path(path);
4095 ULIST_ITER_INIT(&iter);
4096 while ((node = ulist_next(roots, &iter))) {
4097 root_key.objectid = node->val;
4098 root_key.type = BTRFS_ROOT_ITEM_KEY;
4099 root_key.offset = (u64)-1;
4101 search_root = btrfs_read_fs_root(root->fs_info, &root_key);
4108 trans = btrfs_start_transaction(search_root, 0);
4109 if (IS_ERR(trans)) {
4110 ret = PTR_ERR(trans);
4114 path->lowest_level = btrfs_header_level(buf);
4115 path->skip_check_block = 1;
4116 if (path->lowest_level)
4117 btrfs_node_key_to_cpu(buf, &key, 0);
4119 btrfs_item_key_to_cpu(buf, &key, 0);
4120 ret = btrfs_search_slot(trans, search_root, &key, path, 0, 1);
4123 btrfs_commit_transaction(trans, search_root);
4126 if (status == BTRFS_TREE_BLOCK_BAD_KEY_ORDER)
4127 ret = fix_key_order(trans, search_root, path);
4128 else if (status == BTRFS_TREE_BLOCK_INVALID_OFFSETS)
4129 ret = fix_item_offset(trans, search_root, path);
4131 btrfs_commit_transaction(trans, search_root);
4134 btrfs_release_path(path);
4135 btrfs_commit_transaction(trans, search_root);
4138 btrfs_free_path(path);
4142 static int check_block(struct btrfs_root *root,
4143 struct cache_tree *extent_cache,
4144 struct extent_buffer *buf, u64 flags)
4146 struct extent_record *rec;
4147 struct cache_extent *cache;
4148 struct btrfs_key key;
4149 enum btrfs_tree_block_status status;
4153 cache = lookup_cache_extent(extent_cache, buf->start, buf->len);
4156 rec = container_of(cache, struct extent_record, cache);
4157 rec->generation = btrfs_header_generation(buf);
4159 level = btrfs_header_level(buf);
4160 if (btrfs_header_nritems(buf) > 0) {
4163 btrfs_item_key_to_cpu(buf, &key, 0);
4165 btrfs_node_key_to_cpu(buf, &key, 0);
4167 rec->info_objectid = key.objectid;
4169 rec->info_level = level;
4171 if (btrfs_is_leaf(buf))
4172 status = btrfs_check_leaf(root, &rec->parent_key, buf);
4174 status = btrfs_check_node(root, &rec->parent_key, buf);
4176 if (status != BTRFS_TREE_BLOCK_CLEAN) {
4178 status = try_to_fix_bad_block(root, buf, status);
4179 if (status != BTRFS_TREE_BLOCK_CLEAN) {
4181 fprintf(stderr, "bad block %llu\n",
4182 (unsigned long long)buf->start);
4185 * Signal to callers we need to start the scan over
4186 * again since we'll have cow'ed blocks.
4191 rec->content_checked = 1;
4192 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4193 rec->owner_ref_checked = 1;
4195 ret = check_owner_ref(root, rec, buf);
4197 rec->owner_ref_checked = 1;
4201 maybe_free_extent_rec(extent_cache, rec);
4205 static struct tree_backref *find_tree_backref(struct extent_record *rec,
4206 u64 parent, u64 root)
4208 struct list_head *cur = rec->backrefs.next;
4209 struct extent_backref *node;
4210 struct tree_backref *back;
4212 while(cur != &rec->backrefs) {
4213 node = list_entry(cur, struct extent_backref, list);
4217 back = (struct tree_backref *)node;
4219 if (!node->full_backref)
4221 if (parent == back->parent)
4224 if (node->full_backref)
4226 if (back->root == root)
4233 static struct tree_backref *alloc_tree_backref(struct extent_record *rec,
4234 u64 parent, u64 root)
4236 struct tree_backref *ref = malloc(sizeof(*ref));
4237 memset(&ref->node, 0, sizeof(ref->node));
4239 ref->parent = parent;
4240 ref->node.full_backref = 1;
4243 ref->node.full_backref = 0;
4245 list_add_tail(&ref->node.list, &rec->backrefs);
4250 static struct data_backref *find_data_backref(struct extent_record *rec,
4251 u64 parent, u64 root,
4252 u64 owner, u64 offset,
4254 u64 disk_bytenr, u64 bytes)
4256 struct list_head *cur = rec->backrefs.next;
4257 struct extent_backref *node;
4258 struct data_backref *back;
4260 while(cur != &rec->backrefs) {
4261 node = list_entry(cur, struct extent_backref, list);
4265 back = (struct data_backref *)node;
4267 if (!node->full_backref)
4269 if (parent == back->parent)
4272 if (node->full_backref)
4274 if (back->root == root && back->owner == owner &&
4275 back->offset == offset) {
4276 if (found_ref && node->found_ref &&
4277 (back->bytes != bytes ||
4278 back->disk_bytenr != disk_bytenr))
4287 static struct data_backref *alloc_data_backref(struct extent_record *rec,
4288 u64 parent, u64 root,
4289 u64 owner, u64 offset,
4292 struct data_backref *ref = malloc(sizeof(*ref));
4293 memset(&ref->node, 0, sizeof(ref->node));
4294 ref->node.is_data = 1;
4297 ref->parent = parent;
4300 ref->node.full_backref = 1;
4304 ref->offset = offset;
4305 ref->node.full_backref = 0;
4307 ref->bytes = max_size;
4310 list_add_tail(&ref->node.list, &rec->backrefs);
4311 if (max_size > rec->max_size)
4312 rec->max_size = max_size;
4316 static int add_extent_rec(struct cache_tree *extent_cache,
4317 struct btrfs_key *parent_key, u64 parent_gen,
4318 u64 start, u64 nr, u64 extent_item_refs,
4319 int is_root, int inc_ref, int set_checked,
4320 int metadata, int extent_rec, u64 max_size)
4322 struct extent_record *rec;
4323 struct cache_extent *cache;
4327 cache = lookup_cache_extent(extent_cache, start, nr);
4329 rec = container_of(cache, struct extent_record, cache);
4333 rec->nr = max(nr, max_size);
4336 * We need to make sure to reset nr to whatever the extent
4337 * record says was the real size, this way we can compare it to
4341 if (start != rec->start || rec->found_rec) {
4342 struct extent_record *tmp;
4345 if (list_empty(&rec->list))
4346 list_add_tail(&rec->list,
4347 &duplicate_extents);
4350 * We have to do this song and dance in case we
4351 * find an extent record that falls inside of
4352 * our current extent record but does not have
4353 * the same objectid.
4355 tmp = malloc(sizeof(*tmp));
4359 tmp->max_size = max_size;
4362 tmp->metadata = metadata;
4363 tmp->extent_item_refs = extent_item_refs;
4364 INIT_LIST_HEAD(&tmp->list);
4365 list_add_tail(&tmp->list, &rec->dups);
4366 rec->num_duplicates++;
4373 if (extent_item_refs && !dup) {
4374 if (rec->extent_item_refs) {
4375 fprintf(stderr, "block %llu rec "
4376 "extent_item_refs %llu, passed %llu\n",
4377 (unsigned long long)start,
4378 (unsigned long long)
4379 rec->extent_item_refs,
4380 (unsigned long long)extent_item_refs);
4382 rec->extent_item_refs = extent_item_refs;
4387 rec->content_checked = 1;
4388 rec->owner_ref_checked = 1;
4392 btrfs_cpu_key_to_disk(&rec->parent_key, parent_key);
4394 rec->parent_generation = parent_gen;
4396 if (rec->max_size < max_size)
4397 rec->max_size = max_size;
4400 * A metadata extent can't cross stripe_len boundary, otherwise
4401 * kernel scrub won't be able to handle it.
4402 * As now stripe_len is fixed to BTRFS_STRIPE_LEN, just check
4405 if (metadata && check_crossing_stripes(rec->start,
4407 rec->crossing_stripes = 1;
4408 maybe_free_extent_rec(extent_cache, rec);
4411 rec = malloc(sizeof(*rec));
4413 rec->max_size = max_size;
4414 rec->nr = max(nr, max_size);
4415 rec->found_rec = !!extent_rec;
4416 rec->content_checked = 0;
4417 rec->owner_ref_checked = 0;
4418 rec->num_duplicates = 0;
4419 rec->metadata = metadata;
4420 rec->flag_block_full_backref = -1;
4421 rec->bad_full_backref = 0;
4422 rec->crossing_stripes = 0;
4423 INIT_LIST_HEAD(&rec->backrefs);
4424 INIT_LIST_HEAD(&rec->dups);
4425 INIT_LIST_HEAD(&rec->list);
4437 if (extent_item_refs)
4438 rec->extent_item_refs = extent_item_refs;
4440 rec->extent_item_refs = 0;
4443 btrfs_cpu_key_to_disk(&rec->parent_key, parent_key);
4445 memset(&rec->parent_key, 0, sizeof(*parent_key));
4448 rec->parent_generation = parent_gen;
4450 rec->parent_generation = 0;
4452 rec->cache.start = start;
4453 rec->cache.size = nr;
4454 ret = insert_cache_extent(extent_cache, &rec->cache);
4458 rec->content_checked = 1;
4459 rec->owner_ref_checked = 1;
4463 if (check_crossing_stripes(rec->start, rec->max_size))
4464 rec->crossing_stripes = 1;
4468 static int add_tree_backref(struct cache_tree *extent_cache, u64 bytenr,
4469 u64 parent, u64 root, int found_ref)
4471 struct extent_record *rec;
4472 struct tree_backref *back;
4473 struct cache_extent *cache;
4475 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4477 add_extent_rec(extent_cache, NULL, 0, bytenr,
4478 1, 0, 0, 0, 0, 1, 0, 0);
4479 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4484 rec = container_of(cache, struct extent_record, cache);
4485 if (rec->start != bytenr) {
4489 back = find_tree_backref(rec, parent, root);
4491 back = alloc_tree_backref(rec, parent, root);
4494 if (back->node.found_ref) {
4495 fprintf(stderr, "Extent back ref already exists "
4496 "for %llu parent %llu root %llu \n",
4497 (unsigned long long)bytenr,
4498 (unsigned long long)parent,
4499 (unsigned long long)root);
4501 back->node.found_ref = 1;
4503 if (back->node.found_extent_tree) {
4504 fprintf(stderr, "Extent back ref already exists "
4505 "for %llu parent %llu root %llu \n",
4506 (unsigned long long)bytenr,
4507 (unsigned long long)parent,
4508 (unsigned long long)root);
4510 back->node.found_extent_tree = 1;
4512 maybe_free_extent_rec(extent_cache, rec);
4516 static int add_data_backref(struct cache_tree *extent_cache, u64 bytenr,
4517 u64 parent, u64 root, u64 owner, u64 offset,
4518 u32 num_refs, int found_ref, u64 max_size)
4520 struct extent_record *rec;
4521 struct data_backref *back;
4522 struct cache_extent *cache;
4524 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4526 add_extent_rec(extent_cache, NULL, 0, bytenr, 1, 0, 0, 0, 0,
4528 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4533 rec = container_of(cache, struct extent_record, cache);
4534 if (rec->max_size < max_size)
4535 rec->max_size = max_size;
4538 * If found_ref is set then max_size is the real size and must match the
4539 * existing refs. So if we have already found a ref then we need to
4540 * make sure that this ref matches the existing one, otherwise we need
4541 * to add a new backref so we can notice that the backrefs don't match
4542 * and we need to figure out who is telling the truth. This is to
4543 * account for that awful fsync bug I introduced where we'd end up with
4544 * a btrfs_file_extent_item that would have its length include multiple
4545 * prealloc extents or point inside of a prealloc extent.
4547 back = find_data_backref(rec, parent, root, owner, offset, found_ref,
4550 back = alloc_data_backref(rec, parent, root, owner, offset,
4554 BUG_ON(num_refs != 1);
4555 if (back->node.found_ref)
4556 BUG_ON(back->bytes != max_size);
4557 back->node.found_ref = 1;
4558 back->found_ref += 1;
4559 back->bytes = max_size;
4560 back->disk_bytenr = bytenr;
4562 rec->content_checked = 1;
4563 rec->owner_ref_checked = 1;
4565 if (back->node.found_extent_tree) {
4566 fprintf(stderr, "Extent back ref already exists "
4567 "for %llu parent %llu root %llu "
4568 "owner %llu offset %llu num_refs %lu\n",
4569 (unsigned long long)bytenr,
4570 (unsigned long long)parent,
4571 (unsigned long long)root,
4572 (unsigned long long)owner,
4573 (unsigned long long)offset,
4574 (unsigned long)num_refs);
4576 back->num_refs = num_refs;
4577 back->node.found_extent_tree = 1;
4579 maybe_free_extent_rec(extent_cache, rec);
4583 static int add_pending(struct cache_tree *pending,
4584 struct cache_tree *seen, u64 bytenr, u32 size)
4587 ret = add_cache_extent(seen, bytenr, size);
4590 add_cache_extent(pending, bytenr, size);
4594 static int pick_next_pending(struct cache_tree *pending,
4595 struct cache_tree *reada,
4596 struct cache_tree *nodes,
4597 u64 last, struct block_info *bits, int bits_nr,
4600 unsigned long node_start = last;
4601 struct cache_extent *cache;
4604 cache = search_cache_extent(reada, 0);
4606 bits[0].start = cache->start;
4607 bits[0].size = cache->size;
4612 if (node_start > 32768)
4613 node_start -= 32768;
4615 cache = search_cache_extent(nodes, node_start);
4617 cache = search_cache_extent(nodes, 0);
4620 cache = search_cache_extent(pending, 0);
4625 bits[ret].start = cache->start;
4626 bits[ret].size = cache->size;
4627 cache = next_cache_extent(cache);
4629 } while (cache && ret < bits_nr);
4635 bits[ret].start = cache->start;
4636 bits[ret].size = cache->size;
4637 cache = next_cache_extent(cache);
4639 } while (cache && ret < bits_nr);
4641 if (bits_nr - ret > 8) {
4642 u64 lookup = bits[0].start + bits[0].size;
4643 struct cache_extent *next;
4644 next = search_cache_extent(pending, lookup);
4646 if (next->start - lookup > 32768)
4648 bits[ret].start = next->start;
4649 bits[ret].size = next->size;
4650 lookup = next->start + next->size;
4654 next = next_cache_extent(next);
4662 static void free_chunk_record(struct cache_extent *cache)
4664 struct chunk_record *rec;
4666 rec = container_of(cache, struct chunk_record, cache);
4667 list_del_init(&rec->list);
4668 list_del_init(&rec->dextents);
4672 void free_chunk_cache_tree(struct cache_tree *chunk_cache)
4674 cache_tree_free_extents(chunk_cache, free_chunk_record);
4677 static void free_device_record(struct rb_node *node)
4679 struct device_record *rec;
4681 rec = container_of(node, struct device_record, node);
4685 FREE_RB_BASED_TREE(device_cache, free_device_record);
4687 int insert_block_group_record(struct block_group_tree *tree,
4688 struct block_group_record *bg_rec)
4692 ret = insert_cache_extent(&tree->tree, &bg_rec->cache);
4696 list_add_tail(&bg_rec->list, &tree->block_groups);
4700 static void free_block_group_record(struct cache_extent *cache)
4702 struct block_group_record *rec;
4704 rec = container_of(cache, struct block_group_record, cache);
4705 list_del_init(&rec->list);
4709 void free_block_group_tree(struct block_group_tree *tree)
4711 cache_tree_free_extents(&tree->tree, free_block_group_record);
4714 int insert_device_extent_record(struct device_extent_tree *tree,
4715 struct device_extent_record *de_rec)
4720 * Device extent is a bit different from the other extents, because
4721 * the extents which belong to the different devices may have the
4722 * same start and size, so we need use the special extent cache
4723 * search/insert functions.
4725 ret = insert_cache_extent2(&tree->tree, &de_rec->cache);
4729 list_add_tail(&de_rec->chunk_list, &tree->no_chunk_orphans);
4730 list_add_tail(&de_rec->device_list, &tree->no_device_orphans);
4734 static void free_device_extent_record(struct cache_extent *cache)
4736 struct device_extent_record *rec;
4738 rec = container_of(cache, struct device_extent_record, cache);
4739 if (!list_empty(&rec->chunk_list))
4740 list_del_init(&rec->chunk_list);
4741 if (!list_empty(&rec->device_list))
4742 list_del_init(&rec->device_list);
4746 void free_device_extent_tree(struct device_extent_tree *tree)
4748 cache_tree_free_extents(&tree->tree, free_device_extent_record);
4751 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4752 static int process_extent_ref_v0(struct cache_tree *extent_cache,
4753 struct extent_buffer *leaf, int slot)
4755 struct btrfs_extent_ref_v0 *ref0;
4756 struct btrfs_key key;
4758 btrfs_item_key_to_cpu(leaf, &key, slot);
4759 ref0 = btrfs_item_ptr(leaf, slot, struct btrfs_extent_ref_v0);
4760 if (btrfs_ref_objectid_v0(leaf, ref0) < BTRFS_FIRST_FREE_OBJECTID) {
4761 add_tree_backref(extent_cache, key.objectid, key.offset, 0, 0);
4763 add_data_backref(extent_cache, key.objectid, key.offset, 0,
4764 0, 0, btrfs_ref_count_v0(leaf, ref0), 0, 0);
4770 struct chunk_record *btrfs_new_chunk_record(struct extent_buffer *leaf,
4771 struct btrfs_key *key,
4774 struct btrfs_chunk *ptr;
4775 struct chunk_record *rec;
4778 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4779 num_stripes = btrfs_chunk_num_stripes(leaf, ptr);
4781 rec = malloc(btrfs_chunk_record_size(num_stripes));
4783 fprintf(stderr, "memory allocation failed\n");
4787 memset(rec, 0, btrfs_chunk_record_size(num_stripes));
4789 INIT_LIST_HEAD(&rec->list);
4790 INIT_LIST_HEAD(&rec->dextents);
4793 rec->cache.start = key->offset;
4794 rec->cache.size = btrfs_chunk_length(leaf, ptr);
4796 rec->generation = btrfs_header_generation(leaf);
4798 rec->objectid = key->objectid;
4799 rec->type = key->type;
4800 rec->offset = key->offset;
4802 rec->length = rec->cache.size;
4803 rec->owner = btrfs_chunk_owner(leaf, ptr);
4804 rec->stripe_len = btrfs_chunk_stripe_len(leaf, ptr);
4805 rec->type_flags = btrfs_chunk_type(leaf, ptr);
4806 rec->io_width = btrfs_chunk_io_width(leaf, ptr);
4807 rec->io_align = btrfs_chunk_io_align(leaf, ptr);
4808 rec->sector_size = btrfs_chunk_sector_size(leaf, ptr);
4809 rec->num_stripes = num_stripes;
4810 rec->sub_stripes = btrfs_chunk_sub_stripes(leaf, ptr);
4812 for (i = 0; i < rec->num_stripes; ++i) {
4813 rec->stripes[i].devid =
4814 btrfs_stripe_devid_nr(leaf, ptr, i);
4815 rec->stripes[i].offset =
4816 btrfs_stripe_offset_nr(leaf, ptr, i);
4817 read_extent_buffer(leaf, rec->stripes[i].dev_uuid,
4818 (unsigned long)btrfs_stripe_dev_uuid_nr(ptr, i),
4825 static int process_chunk_item(struct cache_tree *chunk_cache,
4826 struct btrfs_key *key, struct extent_buffer *eb,
4829 struct chunk_record *rec;
4832 rec = btrfs_new_chunk_record(eb, key, slot);
4833 ret = insert_cache_extent(chunk_cache, &rec->cache);
4835 fprintf(stderr, "Chunk[%llu, %llu] existed.\n",
4836 rec->offset, rec->length);
4843 static int process_device_item(struct rb_root *dev_cache,
4844 struct btrfs_key *key, struct extent_buffer *eb, int slot)
4846 struct btrfs_dev_item *ptr;
4847 struct device_record *rec;
4850 ptr = btrfs_item_ptr(eb,
4851 slot, struct btrfs_dev_item);
4853 rec = malloc(sizeof(*rec));
4855 fprintf(stderr, "memory allocation failed\n");
4859 rec->devid = key->offset;
4860 rec->generation = btrfs_header_generation(eb);
4862 rec->objectid = key->objectid;
4863 rec->type = key->type;
4864 rec->offset = key->offset;
4866 rec->devid = btrfs_device_id(eb, ptr);
4867 rec->total_byte = btrfs_device_total_bytes(eb, ptr);
4868 rec->byte_used = btrfs_device_bytes_used(eb, ptr);
4870 ret = rb_insert(dev_cache, &rec->node, device_record_compare);
4872 fprintf(stderr, "Device[%llu] existed.\n", rec->devid);
4879 struct block_group_record *
4880 btrfs_new_block_group_record(struct extent_buffer *leaf, struct btrfs_key *key,
4883 struct btrfs_block_group_item *ptr;
4884 struct block_group_record *rec;
4886 rec = malloc(sizeof(*rec));
4888 fprintf(stderr, "memory allocation failed\n");
4891 memset(rec, 0, sizeof(*rec));
4893 rec->cache.start = key->objectid;
4894 rec->cache.size = key->offset;
4896 rec->generation = btrfs_header_generation(leaf);
4898 rec->objectid = key->objectid;
4899 rec->type = key->type;
4900 rec->offset = key->offset;
4902 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_block_group_item);
4903 rec->flags = btrfs_disk_block_group_flags(leaf, ptr);
4905 INIT_LIST_HEAD(&rec->list);
4910 static int process_block_group_item(struct block_group_tree *block_group_cache,
4911 struct btrfs_key *key,
4912 struct extent_buffer *eb, int slot)
4914 struct block_group_record *rec;
4917 rec = btrfs_new_block_group_record(eb, key, slot);
4918 ret = insert_block_group_record(block_group_cache, rec);
4920 fprintf(stderr, "Block Group[%llu, %llu] existed.\n",
4921 rec->objectid, rec->offset);
4928 struct device_extent_record *
4929 btrfs_new_device_extent_record(struct extent_buffer *leaf,
4930 struct btrfs_key *key, int slot)
4932 struct device_extent_record *rec;
4933 struct btrfs_dev_extent *ptr;
4935 rec = malloc(sizeof(*rec));
4937 fprintf(stderr, "memory allocation failed\n");
4940 memset(rec, 0, sizeof(*rec));
4942 rec->cache.objectid = key->objectid;
4943 rec->cache.start = key->offset;
4945 rec->generation = btrfs_header_generation(leaf);
4947 rec->objectid = key->objectid;
4948 rec->type = key->type;
4949 rec->offset = key->offset;
4951 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
4952 rec->chunk_objecteid =
4953 btrfs_dev_extent_chunk_objectid(leaf, ptr);
4955 btrfs_dev_extent_chunk_offset(leaf, ptr);
4956 rec->length = btrfs_dev_extent_length(leaf, ptr);
4957 rec->cache.size = rec->length;
4959 INIT_LIST_HEAD(&rec->chunk_list);
4960 INIT_LIST_HEAD(&rec->device_list);
4966 process_device_extent_item(struct device_extent_tree *dev_extent_cache,
4967 struct btrfs_key *key, struct extent_buffer *eb,
4970 struct device_extent_record *rec;
4973 rec = btrfs_new_device_extent_record(eb, key, slot);
4974 ret = insert_device_extent_record(dev_extent_cache, rec);
4977 "Device extent[%llu, %llu, %llu] existed.\n",
4978 rec->objectid, rec->offset, rec->length);
4985 static int process_extent_item(struct btrfs_root *root,
4986 struct cache_tree *extent_cache,
4987 struct extent_buffer *eb, int slot)
4989 struct btrfs_extent_item *ei;
4990 struct btrfs_extent_inline_ref *iref;
4991 struct btrfs_extent_data_ref *dref;
4992 struct btrfs_shared_data_ref *sref;
4993 struct btrfs_key key;
4997 u32 item_size = btrfs_item_size_nr(eb, slot);
5003 btrfs_item_key_to_cpu(eb, &key, slot);
5005 if (key.type == BTRFS_METADATA_ITEM_KEY) {
5007 num_bytes = root->leafsize;
5009 num_bytes = key.offset;
5012 if (item_size < sizeof(*ei)) {
5013 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5014 struct btrfs_extent_item_v0 *ei0;
5015 BUG_ON(item_size != sizeof(*ei0));
5016 ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
5017 refs = btrfs_extent_refs_v0(eb, ei0);
5021 return add_extent_rec(extent_cache, NULL, 0, key.objectid,
5022 num_bytes, refs, 0, 0, 0, metadata, 1,
5026 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
5027 refs = btrfs_extent_refs(eb, ei);
5029 add_extent_rec(extent_cache, NULL, 0, key.objectid, num_bytes,
5030 refs, 0, 0, 0, metadata, 1, num_bytes);
5032 ptr = (unsigned long)(ei + 1);
5033 if (btrfs_extent_flags(eb, ei) & BTRFS_EXTENT_FLAG_TREE_BLOCK &&
5034 key.type == BTRFS_EXTENT_ITEM_KEY)
5035 ptr += sizeof(struct btrfs_tree_block_info);
5037 end = (unsigned long)ei + item_size;
5039 iref = (struct btrfs_extent_inline_ref *)ptr;
5040 type = btrfs_extent_inline_ref_type(eb, iref);
5041 offset = btrfs_extent_inline_ref_offset(eb, iref);
5043 case BTRFS_TREE_BLOCK_REF_KEY:
5044 add_tree_backref(extent_cache, key.objectid,
5047 case BTRFS_SHARED_BLOCK_REF_KEY:
5048 add_tree_backref(extent_cache, key.objectid,
5051 case BTRFS_EXTENT_DATA_REF_KEY:
5052 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
5053 add_data_backref(extent_cache, key.objectid, 0,
5054 btrfs_extent_data_ref_root(eb, dref),
5055 btrfs_extent_data_ref_objectid(eb,
5057 btrfs_extent_data_ref_offset(eb, dref),
5058 btrfs_extent_data_ref_count(eb, dref),
5061 case BTRFS_SHARED_DATA_REF_KEY:
5062 sref = (struct btrfs_shared_data_ref *)(iref + 1);
5063 add_data_backref(extent_cache, key.objectid, offset,
5065 btrfs_shared_data_ref_count(eb, sref),
5069 fprintf(stderr, "corrupt extent record: key %Lu %u %Lu\n",
5070 key.objectid, key.type, num_bytes);
5073 ptr += btrfs_extent_inline_ref_size(type);
5080 static int check_cache_range(struct btrfs_root *root,
5081 struct btrfs_block_group_cache *cache,
5082 u64 offset, u64 bytes)
5084 struct btrfs_free_space *entry;
5090 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
5091 bytenr = btrfs_sb_offset(i);
5092 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
5093 cache->key.objectid, bytenr, 0,
5094 &logical, &nr, &stripe_len);
5099 if (logical[nr] + stripe_len <= offset)
5101 if (offset + bytes <= logical[nr])
5103 if (logical[nr] == offset) {
5104 if (stripe_len >= bytes) {
5108 bytes -= stripe_len;
5109 offset += stripe_len;
5110 } else if (logical[nr] < offset) {
5111 if (logical[nr] + stripe_len >=
5116 bytes = (offset + bytes) -
5117 (logical[nr] + stripe_len);
5118 offset = logical[nr] + stripe_len;
5121 * Could be tricky, the super may land in the
5122 * middle of the area we're checking. First
5123 * check the easiest case, it's at the end.
5125 if (logical[nr] + stripe_len >=
5127 bytes = logical[nr] - offset;
5131 /* Check the left side */
5132 ret = check_cache_range(root, cache,
5134 logical[nr] - offset);
5140 /* Now we continue with the right side */
5141 bytes = (offset + bytes) -
5142 (logical[nr] + stripe_len);
5143 offset = logical[nr] + stripe_len;
5150 entry = btrfs_find_free_space(cache->free_space_ctl, offset, bytes);
5152 fprintf(stderr, "There is no free space entry for %Lu-%Lu\n",
5153 offset, offset+bytes);
5157 if (entry->offset != offset) {
5158 fprintf(stderr, "Wanted offset %Lu, found %Lu\n", offset,
5163 if (entry->bytes != bytes) {
5164 fprintf(stderr, "Wanted bytes %Lu, found %Lu for off %Lu\n",
5165 bytes, entry->bytes, offset);
5169 unlink_free_space(cache->free_space_ctl, entry);
5174 static int verify_space_cache(struct btrfs_root *root,
5175 struct btrfs_block_group_cache *cache)
5177 struct btrfs_path *path;
5178 struct extent_buffer *leaf;
5179 struct btrfs_key key;
5183 path = btrfs_alloc_path();
5187 root = root->fs_info->extent_root;
5189 last = max_t(u64, cache->key.objectid, BTRFS_SUPER_INFO_OFFSET);
5191 key.objectid = last;
5193 key.type = BTRFS_EXTENT_ITEM_KEY;
5195 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5200 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5201 ret = btrfs_next_leaf(root, path);
5209 leaf = path->nodes[0];
5210 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5211 if (key.objectid >= cache->key.offset + cache->key.objectid)
5213 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
5214 key.type != BTRFS_METADATA_ITEM_KEY) {
5219 if (last == key.objectid) {
5220 if (key.type == BTRFS_EXTENT_ITEM_KEY)
5221 last = key.objectid + key.offset;
5223 last = key.objectid + root->leafsize;
5228 ret = check_cache_range(root, cache, last,
5229 key.objectid - last);
5232 if (key.type == BTRFS_EXTENT_ITEM_KEY)
5233 last = key.objectid + key.offset;
5235 last = key.objectid + root->leafsize;
5239 if (last < cache->key.objectid + cache->key.offset)
5240 ret = check_cache_range(root, cache, last,
5241 cache->key.objectid +
5242 cache->key.offset - last);
5245 btrfs_free_path(path);
5248 !RB_EMPTY_ROOT(&cache->free_space_ctl->free_space_offset)) {
5249 fprintf(stderr, "There are still entries left in the space "
5257 static int check_space_cache(struct btrfs_root *root)
5259 struct btrfs_block_group_cache *cache;
5260 u64 start = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
5264 if (btrfs_super_cache_generation(root->fs_info->super_copy) != -1ULL &&
5265 btrfs_super_generation(root->fs_info->super_copy) !=
5266 btrfs_super_cache_generation(root->fs_info->super_copy)) {
5267 printf("cache and super generation don't match, space cache "
5268 "will be invalidated\n");
5273 cache = btrfs_lookup_first_block_group(root->fs_info, start);
5277 start = cache->key.objectid + cache->key.offset;
5278 if (!cache->free_space_ctl) {
5279 if (btrfs_init_free_space_ctl(cache,
5280 root->sectorsize)) {
5285 btrfs_remove_free_space_cache(cache);
5288 ret = load_free_space_cache(root->fs_info, cache);
5292 ret = verify_space_cache(root, cache);
5294 fprintf(stderr, "cache appears valid but isnt %Lu\n",
5295 cache->key.objectid);
5300 return error ? -EINVAL : 0;
5303 static int check_extent_csums(struct btrfs_root *root, u64 bytenr,
5304 u64 num_bytes, unsigned long leaf_offset,
5305 struct extent_buffer *eb) {
5308 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
5310 unsigned long csum_offset;
5314 u64 data_checked = 0;
5320 if (num_bytes % root->sectorsize)
5323 data = malloc(num_bytes);
5327 while (offset < num_bytes) {
5330 read_len = num_bytes - offset;
5331 /* read as much space once a time */
5332 ret = read_extent_data(root, data + offset,
5333 bytenr + offset, &read_len, mirror);
5337 /* verify every 4k data's checksum */
5338 while (data_checked < read_len) {
5340 tmp = offset + data_checked;
5342 csum = btrfs_csum_data(NULL, (char *)data + tmp,
5343 csum, root->sectorsize);
5344 btrfs_csum_final(csum, (char *)&csum);
5346 csum_offset = leaf_offset +
5347 tmp / root->sectorsize * csum_size;
5348 read_extent_buffer(eb, (char *)&csum_expected,
5349 csum_offset, csum_size);
5350 /* try another mirror */
5351 if (csum != csum_expected) {
5352 fprintf(stderr, "mirror %d bytenr %llu csum %u expected csum %u\n",
5353 mirror, bytenr + tmp,
5354 csum, csum_expected);
5355 num_copies = btrfs_num_copies(
5356 &root->fs_info->mapping_tree,
5358 if (mirror < num_copies - 1) {
5363 data_checked += root->sectorsize;
5372 static int check_extent_exists(struct btrfs_root *root, u64 bytenr,
5375 struct btrfs_path *path;
5376 struct extent_buffer *leaf;
5377 struct btrfs_key key;
5380 path = btrfs_alloc_path();
5382 fprintf(stderr, "Error allocing path\n");
5386 key.objectid = bytenr;
5387 key.type = BTRFS_EXTENT_ITEM_KEY;
5388 key.offset = (u64)-1;
5391 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
5394 fprintf(stderr, "Error looking up extent record %d\n", ret);
5395 btrfs_free_path(path);
5398 if (path->slots[0] > 0) {
5401 ret = btrfs_prev_leaf(root, path);
5404 } else if (ret > 0) {
5411 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5414 * Block group items come before extent items if they have the same
5415 * bytenr, so walk back one more just in case. Dear future traveler,
5416 * first congrats on mastering time travel. Now if it's not too much
5417 * trouble could you go back to 2006 and tell Chris to make the
5418 * BLOCK_GROUP_ITEM_KEY (and BTRFS_*_REF_KEY) lower than the
5419 * EXTENT_ITEM_KEY please?
5421 while (key.type > BTRFS_EXTENT_ITEM_KEY) {
5422 if (path->slots[0] > 0) {
5425 ret = btrfs_prev_leaf(root, path);
5428 } else if (ret > 0) {
5433 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5437 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5438 ret = btrfs_next_leaf(root, path);
5440 fprintf(stderr, "Error going to next leaf "
5442 btrfs_free_path(path);
5448 leaf = path->nodes[0];
5449 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5450 if (key.type != BTRFS_EXTENT_ITEM_KEY) {
5454 if (key.objectid + key.offset < bytenr) {
5458 if (key.objectid > bytenr + num_bytes)
5461 if (key.objectid == bytenr) {
5462 if (key.offset >= num_bytes) {
5466 num_bytes -= key.offset;
5467 bytenr += key.offset;
5468 } else if (key.objectid < bytenr) {
5469 if (key.objectid + key.offset >= bytenr + num_bytes) {
5473 num_bytes = (bytenr + num_bytes) -
5474 (key.objectid + key.offset);
5475 bytenr = key.objectid + key.offset;
5477 if (key.objectid + key.offset < bytenr + num_bytes) {
5478 u64 new_start = key.objectid + key.offset;
5479 u64 new_bytes = bytenr + num_bytes - new_start;
5482 * Weird case, the extent is in the middle of
5483 * our range, we'll have to search one side
5484 * and then the other. Not sure if this happens
5485 * in real life, but no harm in coding it up
5486 * anyway just in case.
5488 btrfs_release_path(path);
5489 ret = check_extent_exists(root, new_start,
5492 fprintf(stderr, "Right section didn't "
5496 num_bytes = key.objectid - bytenr;
5499 num_bytes = key.objectid - bytenr;
5506 if (num_bytes && !ret) {
5507 fprintf(stderr, "There are no extents for csum range "
5508 "%Lu-%Lu\n", bytenr, bytenr+num_bytes);
5512 btrfs_free_path(path);
5516 static int check_csums(struct btrfs_root *root)
5518 struct btrfs_path *path;
5519 struct extent_buffer *leaf;
5520 struct btrfs_key key;
5521 u64 offset = 0, num_bytes = 0;
5522 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
5526 unsigned long leaf_offset;
5528 root = root->fs_info->csum_root;
5529 if (!extent_buffer_uptodate(root->node)) {
5530 fprintf(stderr, "No valid csum tree found\n");
5534 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
5535 key.type = BTRFS_EXTENT_CSUM_KEY;
5538 path = btrfs_alloc_path();
5542 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5544 fprintf(stderr, "Error searching csum tree %d\n", ret);
5545 btrfs_free_path(path);
5549 if (ret > 0 && path->slots[0])
5554 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5555 ret = btrfs_next_leaf(root, path);
5557 fprintf(stderr, "Error going to next leaf "
5564 leaf = path->nodes[0];
5566 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5567 if (key.type != BTRFS_EXTENT_CSUM_KEY) {
5572 data_len = (btrfs_item_size_nr(leaf, path->slots[0]) /
5573 csum_size) * root->sectorsize;
5574 if (!check_data_csum)
5575 goto skip_csum_check;
5576 leaf_offset = btrfs_item_ptr_offset(leaf, path->slots[0]);
5577 ret = check_extent_csums(root, key.offset, data_len,
5583 offset = key.offset;
5584 } else if (key.offset != offset + num_bytes) {
5585 ret = check_extent_exists(root, offset, num_bytes);
5587 fprintf(stderr, "Csum exists for %Lu-%Lu but "
5588 "there is no extent record\n",
5589 offset, offset+num_bytes);
5592 offset = key.offset;
5595 num_bytes += data_len;
5599 btrfs_free_path(path);
5603 static int is_dropped_key(struct btrfs_key *key,
5604 struct btrfs_key *drop_key) {
5605 if (key->objectid < drop_key->objectid)
5607 else if (key->objectid == drop_key->objectid) {
5608 if (key->type < drop_key->type)
5610 else if (key->type == drop_key->type) {
5611 if (key->offset < drop_key->offset)
5619 * Here are the rules for FULL_BACKREF.
5621 * 1) If BTRFS_HEADER_FLAG_RELOC is set then we have FULL_BACKREF set.
5622 * 2) If btrfs_header_owner(buf) no longer points to buf then we have
5624 * 3) We cow'ed the block walking down a reloc tree. This is impossible to tell
5625 * if it happened after the relocation occurred since we'll have dropped the
5626 * reloc root, so it's entirely possible to have FULL_BACKREF set on buf and
5627 * have no real way to know for sure.
5629 * We process the blocks one root at a time, and we start from the lowest root
5630 * objectid and go to the highest. So we can just lookup the owner backref for
5631 * the record and if we don't find it then we know it doesn't exist and we have
5634 * FIXME: if we ever start reclaiming root objectid's then we need to fix this
5635 * assumption and simply indicate that we _think_ that the FULL BACKREF needs to
5636 * be set or not and then we can check later once we've gathered all the refs.
5638 static int calc_extent_flag(struct btrfs_root *root,
5639 struct cache_tree *extent_cache,
5640 struct extent_buffer *buf,
5641 struct root_item_record *ri,
5644 struct extent_record *rec;
5645 struct cache_extent *cache;
5646 struct tree_backref *tback;
5649 cache = lookup_cache_extent(extent_cache, buf->start, 1);
5650 /* we have added this extent before */
5652 rec = container_of(cache, struct extent_record, cache);
5655 * Except file/reloc tree, we can not have
5658 if (ri->objectid < BTRFS_FIRST_FREE_OBJECTID)
5663 if (buf->start == ri->bytenr)
5666 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
5669 owner = btrfs_header_owner(buf);
5670 if (owner == ri->objectid)
5673 tback = find_tree_backref(rec, 0, owner);
5678 if (rec->flag_block_full_backref != -1 &&
5679 rec->flag_block_full_backref != 0)
5680 rec->bad_full_backref = 1;
5683 *flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5684 if (rec->flag_block_full_backref != -1 &&
5685 rec->flag_block_full_backref != 1)
5686 rec->bad_full_backref = 1;
5690 static int run_next_block(struct btrfs_root *root,
5691 struct block_info *bits,
5694 struct cache_tree *pending,
5695 struct cache_tree *seen,
5696 struct cache_tree *reada,
5697 struct cache_tree *nodes,
5698 struct cache_tree *extent_cache,
5699 struct cache_tree *chunk_cache,
5700 struct rb_root *dev_cache,
5701 struct block_group_tree *block_group_cache,
5702 struct device_extent_tree *dev_extent_cache,
5703 struct root_item_record *ri)
5705 struct extent_buffer *buf;
5706 struct extent_record *rec = NULL;
5717 struct btrfs_key key;
5718 struct cache_extent *cache;
5721 nritems = pick_next_pending(pending, reada, nodes, *last, bits,
5722 bits_nr, &reada_bits);
5727 for(i = 0; i < nritems; i++) {
5728 ret = add_cache_extent(reada, bits[i].start,
5733 /* fixme, get the parent transid */
5734 readahead_tree_block(root, bits[i].start,
5738 *last = bits[0].start;
5739 bytenr = bits[0].start;
5740 size = bits[0].size;
5742 cache = lookup_cache_extent(pending, bytenr, size);
5744 remove_cache_extent(pending, cache);
5747 cache = lookup_cache_extent(reada, bytenr, size);
5749 remove_cache_extent(reada, cache);
5752 cache = lookup_cache_extent(nodes, bytenr, size);
5754 remove_cache_extent(nodes, cache);
5757 cache = lookup_cache_extent(extent_cache, bytenr, size);
5759 rec = container_of(cache, struct extent_record, cache);
5760 gen = rec->parent_generation;
5763 /* fixme, get the real parent transid */
5764 buf = read_tree_block(root, bytenr, size, gen);
5765 if (!extent_buffer_uptodate(buf)) {
5766 record_bad_block_io(root->fs_info,
5767 extent_cache, bytenr, size);
5771 nritems = btrfs_header_nritems(buf);
5774 if (!init_extent_tree) {
5775 ret = btrfs_lookup_extent_info(NULL, root, bytenr,
5776 btrfs_header_level(buf), 1, NULL,
5779 ret = calc_extent_flag(root, extent_cache, buf, ri, &flags);
5781 fprintf(stderr, "Couldn't calc extent flags\n");
5782 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5787 ret = calc_extent_flag(root, extent_cache, buf, ri, &flags);
5789 fprintf(stderr, "Couldn't calc extent flags\n");
5790 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5794 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5796 ri->objectid != BTRFS_TREE_RELOC_OBJECTID &&
5797 ri->objectid == btrfs_header_owner(buf)) {
5799 * Ok we got to this block from it's original owner and
5800 * we have FULL_BACKREF set. Relocation can leave
5801 * converted blocks over so this is altogether possible,
5802 * however it's not possible if the generation > the
5803 * last snapshot, so check for this case.
5805 if (!btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC) &&
5806 btrfs_header_generation(buf) > ri->last_snapshot) {
5807 flags &= ~BTRFS_BLOCK_FLAG_FULL_BACKREF;
5808 rec->bad_full_backref = 1;
5813 (ri->objectid == BTRFS_TREE_RELOC_OBJECTID ||
5814 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
5815 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5816 rec->bad_full_backref = 1;
5820 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5821 rec->flag_block_full_backref = 1;
5825 rec->flag_block_full_backref = 0;
5827 owner = btrfs_header_owner(buf);
5830 ret = check_block(root, extent_cache, buf, flags);
5834 if (btrfs_is_leaf(buf)) {
5835 btree_space_waste += btrfs_leaf_free_space(root, buf);
5836 for (i = 0; i < nritems; i++) {
5837 struct btrfs_file_extent_item *fi;
5838 btrfs_item_key_to_cpu(buf, &key, i);
5839 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
5840 process_extent_item(root, extent_cache, buf,
5844 if (key.type == BTRFS_METADATA_ITEM_KEY) {
5845 process_extent_item(root, extent_cache, buf,
5849 if (key.type == BTRFS_EXTENT_CSUM_KEY) {
5851 btrfs_item_size_nr(buf, i);
5854 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5855 process_chunk_item(chunk_cache, &key, buf, i);
5858 if (key.type == BTRFS_DEV_ITEM_KEY) {
5859 process_device_item(dev_cache, &key, buf, i);
5862 if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5863 process_block_group_item(block_group_cache,
5867 if (key.type == BTRFS_DEV_EXTENT_KEY) {
5868 process_device_extent_item(dev_extent_cache,
5873 if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
5874 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5875 process_extent_ref_v0(extent_cache, buf, i);
5882 if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
5883 add_tree_backref(extent_cache, key.objectid, 0,
5887 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
5888 add_tree_backref(extent_cache, key.objectid,
5892 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
5893 struct btrfs_extent_data_ref *ref;
5894 ref = btrfs_item_ptr(buf, i,
5895 struct btrfs_extent_data_ref);
5896 add_data_backref(extent_cache,
5898 btrfs_extent_data_ref_root(buf, ref),
5899 btrfs_extent_data_ref_objectid(buf,
5901 btrfs_extent_data_ref_offset(buf, ref),
5902 btrfs_extent_data_ref_count(buf, ref),
5903 0, root->sectorsize);
5906 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
5907 struct btrfs_shared_data_ref *ref;
5908 ref = btrfs_item_ptr(buf, i,
5909 struct btrfs_shared_data_ref);
5910 add_data_backref(extent_cache,
5911 key.objectid, key.offset, 0, 0, 0,
5912 btrfs_shared_data_ref_count(buf, ref),
5913 0, root->sectorsize);
5916 if (key.type == BTRFS_ORPHAN_ITEM_KEY) {
5917 struct bad_item *bad;
5919 if (key.objectid == BTRFS_ORPHAN_OBJECTID)
5923 bad = malloc(sizeof(struct bad_item));
5926 INIT_LIST_HEAD(&bad->list);
5927 memcpy(&bad->key, &key,
5928 sizeof(struct btrfs_key));
5929 bad->root_id = owner;
5930 list_add_tail(&bad->list, &delete_items);
5933 if (key.type != BTRFS_EXTENT_DATA_KEY)
5935 fi = btrfs_item_ptr(buf, i,
5936 struct btrfs_file_extent_item);
5937 if (btrfs_file_extent_type(buf, fi) ==
5938 BTRFS_FILE_EXTENT_INLINE)
5940 if (btrfs_file_extent_disk_bytenr(buf, fi) == 0)
5943 data_bytes_allocated +=
5944 btrfs_file_extent_disk_num_bytes(buf, fi);
5945 if (data_bytes_allocated < root->sectorsize) {
5948 data_bytes_referenced +=
5949 btrfs_file_extent_num_bytes(buf, fi);
5950 add_data_backref(extent_cache,
5951 btrfs_file_extent_disk_bytenr(buf, fi),
5952 parent, owner, key.objectid, key.offset -
5953 btrfs_file_extent_offset(buf, fi), 1, 1,
5954 btrfs_file_extent_disk_num_bytes(buf, fi));
5958 struct btrfs_key first_key;
5960 first_key.objectid = 0;
5963 btrfs_item_key_to_cpu(buf, &first_key, 0);
5964 level = btrfs_header_level(buf);
5965 for (i = 0; i < nritems; i++) {
5966 ptr = btrfs_node_blockptr(buf, i);
5967 size = btrfs_level_size(root, level - 1);
5968 btrfs_node_key_to_cpu(buf, &key, i);
5970 if ((level == ri->drop_level)
5971 && is_dropped_key(&key, &ri->drop_key)) {
5975 ret = add_extent_rec(extent_cache, &key,
5976 btrfs_node_ptr_generation(buf, i),
5977 ptr, size, 0, 0, 1, 0, 1, 0,
5981 add_tree_backref(extent_cache, ptr, parent, owner, 1);
5984 add_pending(nodes, seen, ptr, size);
5986 add_pending(pending, seen, ptr, size);
5989 btree_space_waste += (BTRFS_NODEPTRS_PER_BLOCK(root) -
5990 nritems) * sizeof(struct btrfs_key_ptr);
5992 total_btree_bytes += buf->len;
5993 if (fs_root_objectid(btrfs_header_owner(buf)))
5994 total_fs_tree_bytes += buf->len;
5995 if (btrfs_header_owner(buf) == BTRFS_EXTENT_TREE_OBJECTID)
5996 total_extent_tree_bytes += buf->len;
5997 if (!found_old_backref &&
5998 btrfs_header_owner(buf) == BTRFS_TREE_RELOC_OBJECTID &&
5999 btrfs_header_backref_rev(buf) == BTRFS_MIXED_BACKREF_REV &&
6000 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
6001 found_old_backref = 1;
6003 free_extent_buffer(buf);
6007 static int add_root_to_pending(struct extent_buffer *buf,
6008 struct cache_tree *extent_cache,
6009 struct cache_tree *pending,
6010 struct cache_tree *seen,
6011 struct cache_tree *nodes,
6014 if (btrfs_header_level(buf) > 0)
6015 add_pending(nodes, seen, buf->start, buf->len);
6017 add_pending(pending, seen, buf->start, buf->len);
6018 add_extent_rec(extent_cache, NULL, 0, buf->start, buf->len,
6019 0, 1, 1, 0, 1, 0, buf->len);
6021 if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
6022 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
6023 add_tree_backref(extent_cache, buf->start, buf->start,
6026 add_tree_backref(extent_cache, buf->start, 0, objectid, 1);
6030 /* as we fix the tree, we might be deleting blocks that
6031 * we're tracking for repair. This hook makes sure we
6032 * remove any backrefs for blocks as we are fixing them.
6034 static int free_extent_hook(struct btrfs_trans_handle *trans,
6035 struct btrfs_root *root,
6036 u64 bytenr, u64 num_bytes, u64 parent,
6037 u64 root_objectid, u64 owner, u64 offset,
6040 struct extent_record *rec;
6041 struct cache_extent *cache;
6043 struct cache_tree *extent_cache = root->fs_info->fsck_extent_cache;
6045 is_data = owner >= BTRFS_FIRST_FREE_OBJECTID;
6046 cache = lookup_cache_extent(extent_cache, bytenr, num_bytes);
6050 rec = container_of(cache, struct extent_record, cache);
6052 struct data_backref *back;
6053 back = find_data_backref(rec, parent, root_objectid, owner,
6054 offset, 1, bytenr, num_bytes);
6057 if (back->node.found_ref) {
6058 back->found_ref -= refs_to_drop;
6060 rec->refs -= refs_to_drop;
6062 if (back->node.found_extent_tree) {
6063 back->num_refs -= refs_to_drop;
6064 if (rec->extent_item_refs)
6065 rec->extent_item_refs -= refs_to_drop;
6067 if (back->found_ref == 0)
6068 back->node.found_ref = 0;
6069 if (back->num_refs == 0)
6070 back->node.found_extent_tree = 0;
6072 if (!back->node.found_extent_tree && back->node.found_ref) {
6073 list_del(&back->node.list);
6077 struct tree_backref *back;
6078 back = find_tree_backref(rec, parent, root_objectid);
6081 if (back->node.found_ref) {
6084 back->node.found_ref = 0;
6086 if (back->node.found_extent_tree) {
6087 if (rec->extent_item_refs)
6088 rec->extent_item_refs--;
6089 back->node.found_extent_tree = 0;
6091 if (!back->node.found_extent_tree && back->node.found_ref) {
6092 list_del(&back->node.list);
6096 maybe_free_extent_rec(extent_cache, rec);
6101 static int delete_extent_records(struct btrfs_trans_handle *trans,
6102 struct btrfs_root *root,
6103 struct btrfs_path *path,
6104 u64 bytenr, u64 new_len)
6106 struct btrfs_key key;
6107 struct btrfs_key found_key;
6108 struct extent_buffer *leaf;
6113 key.objectid = bytenr;
6115 key.offset = (u64)-1;
6118 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
6125 if (path->slots[0] == 0)
6131 leaf = path->nodes[0];
6132 slot = path->slots[0];
6134 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6135 if (found_key.objectid != bytenr)
6138 if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
6139 found_key.type != BTRFS_METADATA_ITEM_KEY &&
6140 found_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
6141 found_key.type != BTRFS_EXTENT_DATA_REF_KEY &&
6142 found_key.type != BTRFS_EXTENT_REF_V0_KEY &&
6143 found_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
6144 found_key.type != BTRFS_SHARED_DATA_REF_KEY) {
6145 btrfs_release_path(path);
6146 if (found_key.type == 0) {
6147 if (found_key.offset == 0)
6149 key.offset = found_key.offset - 1;
6150 key.type = found_key.type;
6152 key.type = found_key.type - 1;
6153 key.offset = (u64)-1;
6157 fprintf(stderr, "repair deleting extent record: key %Lu %u %Lu\n",
6158 found_key.objectid, found_key.type, found_key.offset);
6160 ret = btrfs_del_item(trans, root->fs_info->extent_root, path);
6163 btrfs_release_path(path);
6165 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
6166 found_key.type == BTRFS_METADATA_ITEM_KEY) {
6167 u64 bytes = (found_key.type == BTRFS_EXTENT_ITEM_KEY) ?
6168 found_key.offset : root->leafsize;
6170 ret = btrfs_update_block_group(trans, root, bytenr,
6177 btrfs_release_path(path);
6182 * for a single backref, this will allocate a new extent
6183 * and add the backref to it.
6185 static int record_extent(struct btrfs_trans_handle *trans,
6186 struct btrfs_fs_info *info,
6187 struct btrfs_path *path,
6188 struct extent_record *rec,
6189 struct extent_backref *back,
6190 int allocated, u64 flags)
6193 struct btrfs_root *extent_root = info->extent_root;
6194 struct extent_buffer *leaf;
6195 struct btrfs_key ins_key;
6196 struct btrfs_extent_item *ei;
6197 struct tree_backref *tback;
6198 struct data_backref *dback;
6199 struct btrfs_tree_block_info *bi;
6202 rec->max_size = max_t(u64, rec->max_size,
6203 info->extent_root->leafsize);
6206 u32 item_size = sizeof(*ei);
6209 item_size += sizeof(*bi);
6211 ins_key.objectid = rec->start;
6212 ins_key.offset = rec->max_size;
6213 ins_key.type = BTRFS_EXTENT_ITEM_KEY;
6215 ret = btrfs_insert_empty_item(trans, extent_root, path,
6216 &ins_key, item_size);
6220 leaf = path->nodes[0];
6221 ei = btrfs_item_ptr(leaf, path->slots[0],
6222 struct btrfs_extent_item);
6224 btrfs_set_extent_refs(leaf, ei, 0);
6225 btrfs_set_extent_generation(leaf, ei, rec->generation);
6227 if (back->is_data) {
6228 btrfs_set_extent_flags(leaf, ei,
6229 BTRFS_EXTENT_FLAG_DATA);
6231 struct btrfs_disk_key copy_key;;
6233 tback = (struct tree_backref *)back;
6234 bi = (struct btrfs_tree_block_info *)(ei + 1);
6235 memset_extent_buffer(leaf, 0, (unsigned long)bi,
6238 btrfs_set_disk_key_objectid(©_key,
6239 rec->info_objectid);
6240 btrfs_set_disk_key_type(©_key, 0);
6241 btrfs_set_disk_key_offset(©_key, 0);
6243 btrfs_set_tree_block_level(leaf, bi, rec->info_level);
6244 btrfs_set_tree_block_key(leaf, bi, ©_key);
6246 btrfs_set_extent_flags(leaf, ei,
6247 BTRFS_EXTENT_FLAG_TREE_BLOCK | flags);
6250 btrfs_mark_buffer_dirty(leaf);
6251 ret = btrfs_update_block_group(trans, extent_root, rec->start,
6252 rec->max_size, 1, 0);
6255 btrfs_release_path(path);
6258 if (back->is_data) {
6262 dback = (struct data_backref *)back;
6263 if (back->full_backref)
6264 parent = dback->parent;
6268 for (i = 0; i < dback->found_ref; i++) {
6269 /* if parent != 0, we're doing a full backref
6270 * passing BTRFS_FIRST_FREE_OBJECTID as the owner
6271 * just makes the backref allocator create a data
6274 ret = btrfs_inc_extent_ref(trans, info->extent_root,
6275 rec->start, rec->max_size,
6279 BTRFS_FIRST_FREE_OBJECTID :
6285 fprintf(stderr, "adding new data backref"
6286 " on %llu %s %llu owner %llu"
6287 " offset %llu found %d\n",
6288 (unsigned long long)rec->start,
6289 back->full_backref ?
6291 back->full_backref ?
6292 (unsigned long long)parent :
6293 (unsigned long long)dback->root,
6294 (unsigned long long)dback->owner,
6295 (unsigned long long)dback->offset,
6300 tback = (struct tree_backref *)back;
6301 if (back->full_backref)
6302 parent = tback->parent;
6306 ret = btrfs_inc_extent_ref(trans, info->extent_root,
6307 rec->start, rec->max_size,
6308 parent, tback->root, 0, 0);
6309 fprintf(stderr, "adding new tree backref on "
6310 "start %llu len %llu parent %llu root %llu\n",
6311 rec->start, rec->max_size, parent, tback->root);
6316 btrfs_release_path(path);
6320 struct extent_entry {
6325 struct list_head list;
6328 static struct extent_entry *find_entry(struct list_head *entries,
6329 u64 bytenr, u64 bytes)
6331 struct extent_entry *entry = NULL;
6333 list_for_each_entry(entry, entries, list) {
6334 if (entry->bytenr == bytenr && entry->bytes == bytes)
6341 static struct extent_entry *find_most_right_entry(struct list_head *entries)
6343 struct extent_entry *entry, *best = NULL, *prev = NULL;
6345 list_for_each_entry(entry, entries, list) {
6352 * If there are as many broken entries as entries then we know
6353 * not to trust this particular entry.
6355 if (entry->broken == entry->count)
6359 * If our current entry == best then we can't be sure our best
6360 * is really the best, so we need to keep searching.
6362 if (best && best->count == entry->count) {
6368 /* Prev == entry, not good enough, have to keep searching */
6369 if (!prev->broken && prev->count == entry->count)
6373 best = (prev->count > entry->count) ? prev : entry;
6374 else if (best->count < entry->count)
6382 static int repair_ref(struct btrfs_fs_info *info, struct btrfs_path *path,
6383 struct data_backref *dback, struct extent_entry *entry)
6385 struct btrfs_trans_handle *trans;
6386 struct btrfs_root *root;
6387 struct btrfs_file_extent_item *fi;
6388 struct extent_buffer *leaf;
6389 struct btrfs_key key;
6393 key.objectid = dback->root;
6394 key.type = BTRFS_ROOT_ITEM_KEY;
6395 key.offset = (u64)-1;
6396 root = btrfs_read_fs_root(info, &key);
6398 fprintf(stderr, "Couldn't find root for our ref\n");
6403 * The backref points to the original offset of the extent if it was
6404 * split, so we need to search down to the offset we have and then walk
6405 * forward until we find the backref we're looking for.
6407 key.objectid = dback->owner;
6408 key.type = BTRFS_EXTENT_DATA_KEY;
6409 key.offset = dback->offset;
6410 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6412 fprintf(stderr, "Error looking up ref %d\n", ret);
6417 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
6418 ret = btrfs_next_leaf(root, path);
6420 fprintf(stderr, "Couldn't find our ref, next\n");
6424 leaf = path->nodes[0];
6425 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6426 if (key.objectid != dback->owner ||
6427 key.type != BTRFS_EXTENT_DATA_KEY) {
6428 fprintf(stderr, "Couldn't find our ref, search\n");
6431 fi = btrfs_item_ptr(leaf, path->slots[0],
6432 struct btrfs_file_extent_item);
6433 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6434 bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6436 if (bytenr == dback->disk_bytenr && bytes == dback->bytes)
6441 btrfs_release_path(path);
6443 trans = btrfs_start_transaction(root, 1);
6445 return PTR_ERR(trans);
6448 * Ok we have the key of the file extent we want to fix, now we can cow
6449 * down to the thing and fix it.
6451 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6453 fprintf(stderr, "Error cowing down to ref [%Lu, %u, %Lu]: %d\n",
6454 key.objectid, key.type, key.offset, ret);
6458 fprintf(stderr, "Well that's odd, we just found this key "
6459 "[%Lu, %u, %Lu]\n", key.objectid, key.type,
6464 leaf = path->nodes[0];
6465 fi = btrfs_item_ptr(leaf, path->slots[0],
6466 struct btrfs_file_extent_item);
6468 if (btrfs_file_extent_compression(leaf, fi) &&
6469 dback->disk_bytenr != entry->bytenr) {
6470 fprintf(stderr, "Ref doesn't match the record start and is "
6471 "compressed, please take a btrfs-image of this file "
6472 "system and send it to a btrfs developer so they can "
6473 "complete this functionality for bytenr %Lu\n",
6474 dback->disk_bytenr);
6479 if (dback->node.broken && dback->disk_bytenr != entry->bytenr) {
6480 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6481 } else if (dback->disk_bytenr > entry->bytenr) {
6482 u64 off_diff, offset;
6484 off_diff = dback->disk_bytenr - entry->bytenr;
6485 offset = btrfs_file_extent_offset(leaf, fi);
6486 if (dback->disk_bytenr + offset +
6487 btrfs_file_extent_num_bytes(leaf, fi) >
6488 entry->bytenr + entry->bytes) {
6489 fprintf(stderr, "Ref is past the entry end, please "
6490 "take a btrfs-image of this file system and "
6491 "send it to a btrfs developer, ref %Lu\n",
6492 dback->disk_bytenr);
6497 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6498 btrfs_set_file_extent_offset(leaf, fi, offset);
6499 } else if (dback->disk_bytenr < entry->bytenr) {
6502 offset = btrfs_file_extent_offset(leaf, fi);
6503 if (dback->disk_bytenr + offset < entry->bytenr) {
6504 fprintf(stderr, "Ref is before the entry start, please"
6505 " take a btrfs-image of this file system and "
6506 "send it to a btrfs developer, ref %Lu\n",
6507 dback->disk_bytenr);
6512 offset += dback->disk_bytenr;
6513 offset -= entry->bytenr;
6514 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6515 btrfs_set_file_extent_offset(leaf, fi, offset);
6518 btrfs_set_file_extent_disk_num_bytes(leaf, fi, entry->bytes);
6521 * Chances are if disk_num_bytes were wrong then so is ram_bytes, but
6522 * only do this if we aren't using compression, otherwise it's a
6525 if (!btrfs_file_extent_compression(leaf, fi))
6526 btrfs_set_file_extent_ram_bytes(leaf, fi, entry->bytes);
6528 printf("ram bytes may be wrong?\n");
6529 btrfs_mark_buffer_dirty(leaf);
6531 err = btrfs_commit_transaction(trans, root);
6532 btrfs_release_path(path);
6533 return ret ? ret : err;
6536 static int verify_backrefs(struct btrfs_fs_info *info, struct btrfs_path *path,
6537 struct extent_record *rec)
6539 struct extent_backref *back;
6540 struct data_backref *dback;
6541 struct extent_entry *entry, *best = NULL;
6544 int broken_entries = 0;
6549 * Metadata is easy and the backrefs should always agree on bytenr and
6550 * size, if not we've got bigger issues.
6555 list_for_each_entry(back, &rec->backrefs, list) {
6556 if (back->full_backref || !back->is_data)
6559 dback = (struct data_backref *)back;
6562 * We only pay attention to backrefs that we found a real
6565 if (dback->found_ref == 0)
6569 * For now we only catch when the bytes don't match, not the
6570 * bytenr. We can easily do this at the same time, but I want
6571 * to have a fs image to test on before we just add repair
6572 * functionality willy-nilly so we know we won't screw up the
6576 entry = find_entry(&entries, dback->disk_bytenr,
6579 entry = malloc(sizeof(struct extent_entry));
6584 memset(entry, 0, sizeof(*entry));
6585 entry->bytenr = dback->disk_bytenr;
6586 entry->bytes = dback->bytes;
6587 list_add_tail(&entry->list, &entries);
6592 * If we only have on entry we may think the entries agree when
6593 * in reality they don't so we have to do some extra checking.
6595 if (dback->disk_bytenr != rec->start ||
6596 dback->bytes != rec->nr || back->broken)
6607 /* Yay all the backrefs agree, carry on good sir */
6608 if (nr_entries <= 1 && !mismatch)
6611 fprintf(stderr, "attempting to repair backref discrepency for bytenr "
6612 "%Lu\n", rec->start);
6615 * First we want to see if the backrefs can agree amongst themselves who
6616 * is right, so figure out which one of the entries has the highest
6619 best = find_most_right_entry(&entries);
6622 * Ok so we may have an even split between what the backrefs think, so
6623 * this is where we use the extent ref to see what it thinks.
6626 entry = find_entry(&entries, rec->start, rec->nr);
6627 if (!entry && (!broken_entries || !rec->found_rec)) {
6628 fprintf(stderr, "Backrefs don't agree with each other "
6629 "and extent record doesn't agree with anybody,"
6630 " so we can't fix bytenr %Lu bytes %Lu\n",
6631 rec->start, rec->nr);
6634 } else if (!entry) {
6636 * Ok our backrefs were broken, we'll assume this is the
6637 * correct value and add an entry for this range.
6639 entry = malloc(sizeof(struct extent_entry));
6644 memset(entry, 0, sizeof(*entry));
6645 entry->bytenr = rec->start;
6646 entry->bytes = rec->nr;
6647 list_add_tail(&entry->list, &entries);
6651 best = find_most_right_entry(&entries);
6653 fprintf(stderr, "Backrefs and extent record evenly "
6654 "split on who is right, this is going to "
6655 "require user input to fix bytenr %Lu bytes "
6656 "%Lu\n", rec->start, rec->nr);
6663 * I don't think this can happen currently as we'll abort() if we catch
6664 * this case higher up, but in case somebody removes that we still can't
6665 * deal with it properly here yet, so just bail out of that's the case.
6667 if (best->bytenr != rec->start) {
6668 fprintf(stderr, "Extent start and backref starts don't match, "
6669 "please use btrfs-image on this file system and send "
6670 "it to a btrfs developer so they can make fsck fix "
6671 "this particular case. bytenr is %Lu, bytes is %Lu\n",
6672 rec->start, rec->nr);
6678 * Ok great we all agreed on an extent record, let's go find the real
6679 * references and fix up the ones that don't match.
6681 list_for_each_entry(back, &rec->backrefs, list) {
6682 if (back->full_backref || !back->is_data)
6685 dback = (struct data_backref *)back;
6688 * Still ignoring backrefs that don't have a real ref attached
6691 if (dback->found_ref == 0)
6694 if (dback->bytes == best->bytes &&
6695 dback->disk_bytenr == best->bytenr)
6698 ret = repair_ref(info, path, dback, best);
6704 * Ok we messed with the actual refs, which means we need to drop our
6705 * entire cache and go back and rescan. I know this is a huge pain and
6706 * adds a lot of extra work, but it's the only way to be safe. Once all
6707 * the backrefs agree we may not need to do anything to the extent
6712 while (!list_empty(&entries)) {
6713 entry = list_entry(entries.next, struct extent_entry, list);
6714 list_del_init(&entry->list);
6720 static int process_duplicates(struct btrfs_root *root,
6721 struct cache_tree *extent_cache,
6722 struct extent_record *rec)
6724 struct extent_record *good, *tmp;
6725 struct cache_extent *cache;
6729 * If we found a extent record for this extent then return, or if we
6730 * have more than one duplicate we are likely going to need to delete
6733 if (rec->found_rec || rec->num_duplicates > 1)
6736 /* Shouldn't happen but just in case */
6737 BUG_ON(!rec->num_duplicates);
6740 * So this happens if we end up with a backref that doesn't match the
6741 * actual extent entry. So either the backref is bad or the extent
6742 * entry is bad. Either way we want to have the extent_record actually
6743 * reflect what we found in the extent_tree, so we need to take the
6744 * duplicate out and use that as the extent_record since the only way we
6745 * get a duplicate is if we find a real life BTRFS_EXTENT_ITEM_KEY.
6747 remove_cache_extent(extent_cache, &rec->cache);
6749 good = list_entry(rec->dups.next, struct extent_record, list);
6750 list_del_init(&good->list);
6751 INIT_LIST_HEAD(&good->backrefs);
6752 INIT_LIST_HEAD(&good->dups);
6753 good->cache.start = good->start;
6754 good->cache.size = good->nr;
6755 good->content_checked = 0;
6756 good->owner_ref_checked = 0;
6757 good->num_duplicates = 0;
6758 good->refs = rec->refs;
6759 list_splice_init(&rec->backrefs, &good->backrefs);
6761 cache = lookup_cache_extent(extent_cache, good->start,
6765 tmp = container_of(cache, struct extent_record, cache);
6768 * If we find another overlapping extent and it's found_rec is
6769 * set then it's a duplicate and we need to try and delete
6772 if (tmp->found_rec || tmp->num_duplicates > 0) {
6773 if (list_empty(&good->list))
6774 list_add_tail(&good->list,
6775 &duplicate_extents);
6776 good->num_duplicates += tmp->num_duplicates + 1;
6777 list_splice_init(&tmp->dups, &good->dups);
6778 list_del_init(&tmp->list);
6779 list_add_tail(&tmp->list, &good->dups);
6780 remove_cache_extent(extent_cache, &tmp->cache);
6785 * Ok we have another non extent item backed extent rec, so lets
6786 * just add it to this extent and carry on like we did above.
6788 good->refs += tmp->refs;
6789 list_splice_init(&tmp->backrefs, &good->backrefs);
6790 remove_cache_extent(extent_cache, &tmp->cache);
6793 ret = insert_cache_extent(extent_cache, &good->cache);
6796 return good->num_duplicates ? 0 : 1;
6799 static int delete_duplicate_records(struct btrfs_root *root,
6800 struct extent_record *rec)
6802 struct btrfs_trans_handle *trans;
6803 LIST_HEAD(delete_list);
6804 struct btrfs_path *path;
6805 struct extent_record *tmp, *good, *n;
6808 struct btrfs_key key;
6810 path = btrfs_alloc_path();
6817 /* Find the record that covers all of the duplicates. */
6818 list_for_each_entry(tmp, &rec->dups, list) {
6819 if (good->start < tmp->start)
6821 if (good->nr > tmp->nr)
6824 if (tmp->start + tmp->nr < good->start + good->nr) {
6825 fprintf(stderr, "Ok we have overlapping extents that "
6826 "aren't completely covered by eachother, this "
6827 "is going to require more careful thought. "
6828 "The extents are [%Lu-%Lu] and [%Lu-%Lu]\n",
6829 tmp->start, tmp->nr, good->start, good->nr);
6836 list_add_tail(&rec->list, &delete_list);
6838 list_for_each_entry_safe(tmp, n, &rec->dups, list) {
6841 list_move_tail(&tmp->list, &delete_list);
6844 root = root->fs_info->extent_root;
6845 trans = btrfs_start_transaction(root, 1);
6846 if (IS_ERR(trans)) {
6847 ret = PTR_ERR(trans);
6851 list_for_each_entry(tmp, &delete_list, list) {
6852 if (tmp->found_rec == 0)
6854 key.objectid = tmp->start;
6855 key.type = BTRFS_EXTENT_ITEM_KEY;
6856 key.offset = tmp->nr;
6858 /* Shouldn't happen but just in case */
6859 if (tmp->metadata) {
6860 fprintf(stderr, "Well this shouldn't happen, extent "
6861 "record overlaps but is metadata? "
6862 "[%Lu, %Lu]\n", tmp->start, tmp->nr);
6866 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
6872 ret = btrfs_del_item(trans, root, path);
6875 btrfs_release_path(path);
6878 err = btrfs_commit_transaction(trans, root);
6882 while (!list_empty(&delete_list)) {
6883 tmp = list_entry(delete_list.next, struct extent_record, list);
6884 list_del_init(&tmp->list);
6890 while (!list_empty(&rec->dups)) {
6891 tmp = list_entry(rec->dups.next, struct extent_record, list);
6892 list_del_init(&tmp->list);
6896 btrfs_free_path(path);
6898 if (!ret && !nr_del)
6899 rec->num_duplicates = 0;
6901 return ret ? ret : nr_del;
6904 static int find_possible_backrefs(struct btrfs_fs_info *info,
6905 struct btrfs_path *path,
6906 struct cache_tree *extent_cache,
6907 struct extent_record *rec)
6909 struct btrfs_root *root;
6910 struct extent_backref *back;
6911 struct data_backref *dback;
6912 struct cache_extent *cache;
6913 struct btrfs_file_extent_item *fi;
6914 struct btrfs_key key;
6918 list_for_each_entry(back, &rec->backrefs, list) {
6919 /* Don't care about full backrefs (poor unloved backrefs) */
6920 if (back->full_backref || !back->is_data)
6923 dback = (struct data_backref *)back;
6925 /* We found this one, we don't need to do a lookup */
6926 if (dback->found_ref)
6929 key.objectid = dback->root;
6930 key.type = BTRFS_ROOT_ITEM_KEY;
6931 key.offset = (u64)-1;
6933 root = btrfs_read_fs_root(info, &key);
6935 /* No root, definitely a bad ref, skip */
6936 if (IS_ERR(root) && PTR_ERR(root) == -ENOENT)
6938 /* Other err, exit */
6940 return PTR_ERR(root);
6942 key.objectid = dback->owner;
6943 key.type = BTRFS_EXTENT_DATA_KEY;
6944 key.offset = dback->offset;
6945 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6947 btrfs_release_path(path);
6950 /* Didn't find it, we can carry on */
6955 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6956 struct btrfs_file_extent_item);
6957 bytenr = btrfs_file_extent_disk_bytenr(path->nodes[0], fi);
6958 bytes = btrfs_file_extent_disk_num_bytes(path->nodes[0], fi);
6959 btrfs_release_path(path);
6960 cache = lookup_cache_extent(extent_cache, bytenr, 1);
6962 struct extent_record *tmp;
6963 tmp = container_of(cache, struct extent_record, cache);
6966 * If we found an extent record for the bytenr for this
6967 * particular backref then we can't add it to our
6968 * current extent record. We only want to add backrefs
6969 * that don't have a corresponding extent item in the
6970 * extent tree since they likely belong to this record
6971 * and we need to fix it if it doesn't match bytenrs.
6977 dback->found_ref += 1;
6978 dback->disk_bytenr = bytenr;
6979 dback->bytes = bytes;
6982 * Set this so the verify backref code knows not to trust the
6983 * values in this backref.
6992 * Record orphan data ref into corresponding root.
6994 * Return 0 if the extent item contains data ref and recorded.
6995 * Return 1 if the extent item contains no useful data ref
6996 * On that case, it may contains only shared_dataref or metadata backref
6997 * or the file extent exists(this should be handled by the extent bytenr
6999 * Return <0 if something goes wrong.
7001 static int record_orphan_data_extents(struct btrfs_fs_info *fs_info,
7002 struct extent_record *rec)
7004 struct btrfs_key key;
7005 struct btrfs_root *dest_root;
7006 struct extent_backref *back;
7007 struct data_backref *dback;
7008 struct orphan_data_extent *orphan;
7009 struct btrfs_path *path;
7010 int recorded_data_ref = 0;
7015 path = btrfs_alloc_path();
7018 list_for_each_entry(back, &rec->backrefs, list) {
7019 if (back->full_backref || !back->is_data ||
7020 !back->found_extent_tree)
7022 dback = (struct data_backref *)back;
7023 if (dback->found_ref)
7025 key.objectid = dback->root;
7026 key.type = BTRFS_ROOT_ITEM_KEY;
7027 key.offset = (u64)-1;
7029 dest_root = btrfs_read_fs_root(fs_info, &key);
7031 /* For non-exist root we just skip it */
7032 if (IS_ERR(dest_root) || !dest_root)
7035 key.objectid = dback->owner;
7036 key.type = BTRFS_EXTENT_DATA_KEY;
7037 key.offset = dback->offset;
7039 ret = btrfs_search_slot(NULL, dest_root, &key, path, 0, 0);
7041 * For ret < 0, it's OK since the fs-tree may be corrupted,
7042 * we need to record it for inode/file extent rebuild.
7043 * For ret > 0, we record it only for file extent rebuild.
7044 * For ret == 0, the file extent exists but only bytenr
7045 * mismatch, let the original bytenr fix routine to handle,
7051 orphan = malloc(sizeof(*orphan));
7056 INIT_LIST_HEAD(&orphan->list);
7057 orphan->root = dback->root;
7058 orphan->objectid = dback->owner;
7059 orphan->offset = dback->offset;
7060 orphan->disk_bytenr = rec->cache.start;
7061 orphan->disk_len = rec->cache.size;
7062 list_add(&dest_root->orphan_data_extents, &orphan->list);
7063 recorded_data_ref = 1;
7066 btrfs_free_path(path);
7068 return !recorded_data_ref;
7074 * when an incorrect extent item is found, this will delete
7075 * all of the existing entries for it and recreate them
7076 * based on what the tree scan found.
7078 static int fixup_extent_refs(struct btrfs_fs_info *info,
7079 struct cache_tree *extent_cache,
7080 struct extent_record *rec)
7082 struct btrfs_trans_handle *trans = NULL;
7084 struct btrfs_path *path;
7085 struct list_head *cur = rec->backrefs.next;
7086 struct cache_extent *cache;
7087 struct extent_backref *back;
7091 if (rec->flag_block_full_backref)
7092 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7094 path = btrfs_alloc_path();
7098 if (rec->refs != rec->extent_item_refs && !rec->metadata) {
7100 * Sometimes the backrefs themselves are so broken they don't
7101 * get attached to any meaningful rec, so first go back and
7102 * check any of our backrefs that we couldn't find and throw
7103 * them into the list if we find the backref so that
7104 * verify_backrefs can figure out what to do.
7106 ret = find_possible_backrefs(info, path, extent_cache, rec);
7111 /* step one, make sure all of the backrefs agree */
7112 ret = verify_backrefs(info, path, rec);
7116 trans = btrfs_start_transaction(info->extent_root, 1);
7117 if (IS_ERR(trans)) {
7118 ret = PTR_ERR(trans);
7122 /* step two, delete all the existing records */
7123 ret = delete_extent_records(trans, info->extent_root, path,
7124 rec->start, rec->max_size);
7129 /* was this block corrupt? If so, don't add references to it */
7130 cache = lookup_cache_extent(info->corrupt_blocks,
7131 rec->start, rec->max_size);
7137 /* step three, recreate all the refs we did find */
7138 while(cur != &rec->backrefs) {
7139 back = list_entry(cur, struct extent_backref, list);
7143 * if we didn't find any references, don't create a
7146 if (!back->found_ref)
7149 rec->bad_full_backref = 0;
7150 ret = record_extent(trans, info, path, rec, back, allocated, flags);
7158 int err = btrfs_commit_transaction(trans, info->extent_root);
7163 btrfs_free_path(path);
7167 static int fixup_extent_flags(struct btrfs_fs_info *fs_info,
7168 struct extent_record *rec)
7170 struct btrfs_trans_handle *trans;
7171 struct btrfs_root *root = fs_info->extent_root;
7172 struct btrfs_path *path;
7173 struct btrfs_extent_item *ei;
7174 struct btrfs_key key;
7178 key.objectid = rec->start;
7179 if (rec->metadata) {
7180 key.type = BTRFS_METADATA_ITEM_KEY;
7181 key.offset = rec->info_level;
7183 key.type = BTRFS_EXTENT_ITEM_KEY;
7184 key.offset = rec->max_size;
7187 path = btrfs_alloc_path();
7191 trans = btrfs_start_transaction(root, 0);
7192 if (IS_ERR(trans)) {
7193 btrfs_free_path(path);
7194 return PTR_ERR(trans);
7197 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
7199 btrfs_free_path(path);
7200 btrfs_commit_transaction(trans, root);
7203 fprintf(stderr, "Didn't find extent for %llu\n",
7204 (unsigned long long)rec->start);
7205 btrfs_free_path(path);
7206 btrfs_commit_transaction(trans, root);
7210 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
7211 struct btrfs_extent_item);
7212 flags = btrfs_extent_flags(path->nodes[0], ei);
7213 if (rec->flag_block_full_backref) {
7214 fprintf(stderr, "setting full backref on %llu\n",
7215 (unsigned long long)key.objectid);
7216 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7218 fprintf(stderr, "clearing full backref on %llu\n",
7219 (unsigned long long)key.objectid);
7220 flags &= ~BTRFS_BLOCK_FLAG_FULL_BACKREF;
7222 btrfs_set_extent_flags(path->nodes[0], ei, flags);
7223 btrfs_mark_buffer_dirty(path->nodes[0]);
7224 btrfs_free_path(path);
7225 return btrfs_commit_transaction(trans, root);
7228 /* right now we only prune from the extent allocation tree */
7229 static int prune_one_block(struct btrfs_trans_handle *trans,
7230 struct btrfs_fs_info *info,
7231 struct btrfs_corrupt_block *corrupt)
7234 struct btrfs_path path;
7235 struct extent_buffer *eb;
7239 int level = corrupt->level + 1;
7241 btrfs_init_path(&path);
7243 /* we want to stop at the parent to our busted block */
7244 path.lowest_level = level;
7246 ret = btrfs_search_slot(trans, info->extent_root,
7247 &corrupt->key, &path, -1, 1);
7252 eb = path.nodes[level];
7259 * hopefully the search gave us the block we want to prune,
7260 * lets try that first
7262 slot = path.slots[level];
7263 found = btrfs_node_blockptr(eb, slot);
7264 if (found == corrupt->cache.start)
7267 nritems = btrfs_header_nritems(eb);
7269 /* the search failed, lets scan this node and hope we find it */
7270 for (slot = 0; slot < nritems; slot++) {
7271 found = btrfs_node_blockptr(eb, slot);
7272 if (found == corrupt->cache.start)
7276 * we couldn't find the bad block. TODO, search all the nodes for pointers
7279 if (eb == info->extent_root->node) {
7284 btrfs_release_path(&path);
7289 printk("deleting pointer to block %Lu\n", corrupt->cache.start);
7290 ret = btrfs_del_ptr(trans, info->extent_root, &path, level, slot);
7293 btrfs_release_path(&path);
7297 static int prune_corrupt_blocks(struct btrfs_fs_info *info)
7299 struct btrfs_trans_handle *trans = NULL;
7300 struct cache_extent *cache;
7301 struct btrfs_corrupt_block *corrupt;
7304 cache = search_cache_extent(info->corrupt_blocks, 0);
7308 trans = btrfs_start_transaction(info->extent_root, 1);
7310 return PTR_ERR(trans);
7312 corrupt = container_of(cache, struct btrfs_corrupt_block, cache);
7313 prune_one_block(trans, info, corrupt);
7314 remove_cache_extent(info->corrupt_blocks, cache);
7317 return btrfs_commit_transaction(trans, info->extent_root);
7321 static void reset_cached_block_groups(struct btrfs_fs_info *fs_info)
7323 struct btrfs_block_group_cache *cache;
7328 ret = find_first_extent_bit(&fs_info->free_space_cache, 0,
7329 &start, &end, EXTENT_DIRTY);
7332 clear_extent_dirty(&fs_info->free_space_cache, start, end,
7338 cache = btrfs_lookup_first_block_group(fs_info, start);
7343 start = cache->key.objectid + cache->key.offset;
7347 static int check_extent_refs(struct btrfs_root *root,
7348 struct cache_tree *extent_cache)
7350 struct extent_record *rec;
7351 struct cache_extent *cache;
7360 * if we're doing a repair, we have to make sure
7361 * we don't allocate from the problem extents.
7362 * In the worst case, this will be all the
7365 cache = search_cache_extent(extent_cache, 0);
7367 rec = container_of(cache, struct extent_record, cache);
7368 set_extent_dirty(root->fs_info->excluded_extents,
7370 rec->start + rec->max_size - 1,
7372 cache = next_cache_extent(cache);
7375 /* pin down all the corrupted blocks too */
7376 cache = search_cache_extent(root->fs_info->corrupt_blocks, 0);
7378 set_extent_dirty(root->fs_info->excluded_extents,
7380 cache->start + cache->size - 1,
7382 cache = next_cache_extent(cache);
7384 prune_corrupt_blocks(root->fs_info);
7385 reset_cached_block_groups(root->fs_info);
7388 reset_cached_block_groups(root->fs_info);
7391 * We need to delete any duplicate entries we find first otherwise we
7392 * could mess up the extent tree when we have backrefs that actually
7393 * belong to a different extent item and not the weird duplicate one.
7395 while (repair && !list_empty(&duplicate_extents)) {
7396 rec = list_entry(duplicate_extents.next, struct extent_record,
7398 list_del_init(&rec->list);
7400 /* Sometimes we can find a backref before we find an actual
7401 * extent, so we need to process it a little bit to see if there
7402 * truly are multiple EXTENT_ITEM_KEY's for the same range, or
7403 * if this is a backref screwup. If we need to delete stuff
7404 * process_duplicates() will return 0, otherwise it will return
7407 if (process_duplicates(root, extent_cache, rec))
7409 ret = delete_duplicate_records(root, rec);
7413 * delete_duplicate_records will return the number of entries
7414 * deleted, so if it's greater than 0 then we know we actually
7415 * did something and we need to remove.
7429 cache = search_cache_extent(extent_cache, 0);
7432 rec = container_of(cache, struct extent_record, cache);
7433 if (rec->num_duplicates) {
7434 fprintf(stderr, "extent item %llu has multiple extent "
7435 "items\n", (unsigned long long)rec->start);
7440 if (rec->refs != rec->extent_item_refs) {
7441 fprintf(stderr, "ref mismatch on [%llu %llu] ",
7442 (unsigned long long)rec->start,
7443 (unsigned long long)rec->nr);
7444 fprintf(stderr, "extent item %llu, found %llu\n",
7445 (unsigned long long)rec->extent_item_refs,
7446 (unsigned long long)rec->refs);
7447 ret = record_orphan_data_extents(root->fs_info, rec);
7454 * we can't use the extent to repair file
7455 * extent, let the fallback method handle it.
7457 if (!fixed && repair) {
7458 ret = fixup_extent_refs(
7469 if (all_backpointers_checked(rec, 1)) {
7470 fprintf(stderr, "backpointer mismatch on [%llu %llu]\n",
7471 (unsigned long long)rec->start,
7472 (unsigned long long)rec->nr);
7474 if (!fixed && !recorded && repair) {
7475 ret = fixup_extent_refs(root->fs_info,
7484 if (!rec->owner_ref_checked) {
7485 fprintf(stderr, "owner ref check failed [%llu %llu]\n",
7486 (unsigned long long)rec->start,
7487 (unsigned long long)rec->nr);
7488 if (!fixed && !recorded && repair) {
7489 ret = fixup_extent_refs(root->fs_info,
7498 if (rec->bad_full_backref) {
7499 fprintf(stderr, "bad full backref, on [%llu]\n",
7500 (unsigned long long)rec->start);
7502 ret = fixup_extent_flags(root->fs_info, rec);
7511 * Although it's not a extent ref's problem, we reuse this
7512 * routine for error reporting.
7513 * No repair function yet.
7515 if (rec->crossing_stripes) {
7517 "bad metadata [%llu, %llu) crossing stripe boundary\n",
7518 rec->start, rec->start + rec->max_size);
7523 remove_cache_extent(extent_cache, cache);
7524 free_all_extent_backrefs(rec);
7525 if (!init_extent_tree && repair && (!cur_err || fixed))
7526 clear_extent_dirty(root->fs_info->excluded_extents,
7528 rec->start + rec->max_size - 1,
7534 if (ret && ret != -EAGAIN) {
7535 fprintf(stderr, "failed to repair damaged filesystem, aborting\n");
7538 struct btrfs_trans_handle *trans;
7540 root = root->fs_info->extent_root;
7541 trans = btrfs_start_transaction(root, 1);
7542 if (IS_ERR(trans)) {
7543 ret = PTR_ERR(trans);
7547 btrfs_fix_block_accounting(trans, root);
7548 ret = btrfs_commit_transaction(trans, root);
7553 fprintf(stderr, "repaired damaged extent references\n");
7559 u64 calc_stripe_length(u64 type, u64 length, int num_stripes)
7563 if (type & BTRFS_BLOCK_GROUP_RAID0) {
7564 stripe_size = length;
7565 stripe_size /= num_stripes;
7566 } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
7567 stripe_size = length * 2;
7568 stripe_size /= num_stripes;
7569 } else if (type & BTRFS_BLOCK_GROUP_RAID5) {
7570 stripe_size = length;
7571 stripe_size /= (num_stripes - 1);
7572 } else if (type & BTRFS_BLOCK_GROUP_RAID6) {
7573 stripe_size = length;
7574 stripe_size /= (num_stripes - 2);
7576 stripe_size = length;
7582 * Check the chunk with its block group/dev list ref:
7583 * Return 0 if all refs seems valid.
7584 * Return 1 if part of refs seems valid, need later check for rebuild ref
7585 * like missing block group and needs to search extent tree to rebuild them.
7586 * Return -1 if essential refs are missing and unable to rebuild.
7588 static int check_chunk_refs(struct chunk_record *chunk_rec,
7589 struct block_group_tree *block_group_cache,
7590 struct device_extent_tree *dev_extent_cache,
7593 struct cache_extent *block_group_item;
7594 struct block_group_record *block_group_rec;
7595 struct cache_extent *dev_extent_item;
7596 struct device_extent_record *dev_extent_rec;
7600 int metadump_v2 = 0;
7604 block_group_item = lookup_cache_extent(&block_group_cache->tree,
7607 if (block_group_item) {
7608 block_group_rec = container_of(block_group_item,
7609 struct block_group_record,
7611 if (chunk_rec->length != block_group_rec->offset ||
7612 chunk_rec->offset != block_group_rec->objectid ||
7614 chunk_rec->type_flags != block_group_rec->flags)) {
7617 "Chunk[%llu, %u, %llu]: length(%llu), offset(%llu), type(%llu) mismatch with block group[%llu, %u, %llu]: offset(%llu), objectid(%llu), flags(%llu)\n",
7618 chunk_rec->objectid,
7623 chunk_rec->type_flags,
7624 block_group_rec->objectid,
7625 block_group_rec->type,
7626 block_group_rec->offset,
7627 block_group_rec->offset,
7628 block_group_rec->objectid,
7629 block_group_rec->flags);
7632 list_del_init(&block_group_rec->list);
7633 chunk_rec->bg_rec = block_group_rec;
7638 "Chunk[%llu, %u, %llu]: length(%llu), offset(%llu), type(%llu) is not found in block group\n",
7639 chunk_rec->objectid,
7644 chunk_rec->type_flags);
7651 length = calc_stripe_length(chunk_rec->type_flags, chunk_rec->length,
7652 chunk_rec->num_stripes);
7653 for (i = 0; i < chunk_rec->num_stripes; ++i) {
7654 devid = chunk_rec->stripes[i].devid;
7655 offset = chunk_rec->stripes[i].offset;
7656 dev_extent_item = lookup_cache_extent2(&dev_extent_cache->tree,
7657 devid, offset, length);
7658 if (dev_extent_item) {
7659 dev_extent_rec = container_of(dev_extent_item,
7660 struct device_extent_record,
7662 if (dev_extent_rec->objectid != devid ||
7663 dev_extent_rec->offset != offset ||
7664 dev_extent_rec->chunk_offset != chunk_rec->offset ||
7665 dev_extent_rec->length != length) {
7668 "Chunk[%llu, %u, %llu] stripe[%llu, %llu] dismatch dev extent[%llu, %llu, %llu]\n",
7669 chunk_rec->objectid,
7672 chunk_rec->stripes[i].devid,
7673 chunk_rec->stripes[i].offset,
7674 dev_extent_rec->objectid,
7675 dev_extent_rec->offset,
7676 dev_extent_rec->length);
7679 list_move(&dev_extent_rec->chunk_list,
7680 &chunk_rec->dextents);
7685 "Chunk[%llu, %u, %llu] stripe[%llu, %llu] is not found in dev extent\n",
7686 chunk_rec->objectid,
7689 chunk_rec->stripes[i].devid,
7690 chunk_rec->stripes[i].offset);
7697 /* check btrfs_chunk -> btrfs_dev_extent / btrfs_block_group_item */
7698 int check_chunks(struct cache_tree *chunk_cache,
7699 struct block_group_tree *block_group_cache,
7700 struct device_extent_tree *dev_extent_cache,
7701 struct list_head *good, struct list_head *bad,
7702 struct list_head *rebuild, int silent)
7704 struct cache_extent *chunk_item;
7705 struct chunk_record *chunk_rec;
7706 struct block_group_record *bg_rec;
7707 struct device_extent_record *dext_rec;
7711 chunk_item = first_cache_extent(chunk_cache);
7712 while (chunk_item) {
7713 chunk_rec = container_of(chunk_item, struct chunk_record,
7715 err = check_chunk_refs(chunk_rec, block_group_cache,
7716 dev_extent_cache, silent);
7719 if (err == 0 && good)
7720 list_add_tail(&chunk_rec->list, good);
7721 if (err > 0 && rebuild)
7722 list_add_tail(&chunk_rec->list, rebuild);
7724 list_add_tail(&chunk_rec->list, bad);
7725 chunk_item = next_cache_extent(chunk_item);
7728 list_for_each_entry(bg_rec, &block_group_cache->block_groups, list) {
7731 "Block group[%llu, %llu] (flags = %llu) didn't find the relative chunk.\n",
7739 list_for_each_entry(dext_rec, &dev_extent_cache->no_chunk_orphans,
7743 "Device extent[%llu, %llu, %llu] didn't find the relative chunk.\n",
7754 static int check_device_used(struct device_record *dev_rec,
7755 struct device_extent_tree *dext_cache)
7757 struct cache_extent *cache;
7758 struct device_extent_record *dev_extent_rec;
7761 cache = search_cache_extent2(&dext_cache->tree, dev_rec->devid, 0);
7763 dev_extent_rec = container_of(cache,
7764 struct device_extent_record,
7766 if (dev_extent_rec->objectid != dev_rec->devid)
7769 list_del_init(&dev_extent_rec->device_list);
7770 total_byte += dev_extent_rec->length;
7771 cache = next_cache_extent(cache);
7774 if (total_byte != dev_rec->byte_used) {
7776 "Dev extent's total-byte(%llu) is not equal to byte-used(%llu) in dev[%llu, %u, %llu]\n",
7777 total_byte, dev_rec->byte_used, dev_rec->objectid,
7778 dev_rec->type, dev_rec->offset);
7785 /* check btrfs_dev_item -> btrfs_dev_extent */
7786 static int check_devices(struct rb_root *dev_cache,
7787 struct device_extent_tree *dev_extent_cache)
7789 struct rb_node *dev_node;
7790 struct device_record *dev_rec;
7791 struct device_extent_record *dext_rec;
7795 dev_node = rb_first(dev_cache);
7797 dev_rec = container_of(dev_node, struct device_record, node);
7798 err = check_device_used(dev_rec, dev_extent_cache);
7802 dev_node = rb_next(dev_node);
7804 list_for_each_entry(dext_rec, &dev_extent_cache->no_device_orphans,
7807 "Device extent[%llu, %llu, %llu] didn't find its device.\n",
7808 dext_rec->objectid, dext_rec->offset, dext_rec->length);
7815 static int add_root_item_to_list(struct list_head *head,
7816 u64 objectid, u64 bytenr, u64 last_snapshot,
7817 u8 level, u8 drop_level,
7818 int level_size, struct btrfs_key *drop_key)
7821 struct root_item_record *ri_rec;
7822 ri_rec = malloc(sizeof(*ri_rec));
7825 ri_rec->bytenr = bytenr;
7826 ri_rec->objectid = objectid;
7827 ri_rec->level = level;
7828 ri_rec->level_size = level_size;
7829 ri_rec->drop_level = drop_level;
7830 ri_rec->last_snapshot = last_snapshot;
7832 memcpy(&ri_rec->drop_key, drop_key, sizeof(*drop_key));
7833 list_add_tail(&ri_rec->list, head);
7838 static void free_root_item_list(struct list_head *list)
7840 struct root_item_record *ri_rec;
7842 while (!list_empty(list)) {
7843 ri_rec = list_first_entry(list, struct root_item_record,
7845 list_del_init(&ri_rec->list);
7850 static int deal_root_from_list(struct list_head *list,
7851 struct btrfs_root *root,
7852 struct block_info *bits,
7854 struct cache_tree *pending,
7855 struct cache_tree *seen,
7856 struct cache_tree *reada,
7857 struct cache_tree *nodes,
7858 struct cache_tree *extent_cache,
7859 struct cache_tree *chunk_cache,
7860 struct rb_root *dev_cache,
7861 struct block_group_tree *block_group_cache,
7862 struct device_extent_tree *dev_extent_cache)
7867 while (!list_empty(list)) {
7868 struct root_item_record *rec;
7869 struct extent_buffer *buf;
7870 rec = list_entry(list->next,
7871 struct root_item_record, list);
7873 buf = read_tree_block(root->fs_info->tree_root,
7874 rec->bytenr, rec->level_size, 0);
7875 if (!extent_buffer_uptodate(buf)) {
7876 free_extent_buffer(buf);
7880 add_root_to_pending(buf, extent_cache, pending,
7881 seen, nodes, rec->objectid);
7883 * To rebuild extent tree, we need deal with snapshot
7884 * one by one, otherwise we deal with node firstly which
7885 * can maximize readahead.
7888 ret = run_next_block(root, bits, bits_nr, &last,
7889 pending, seen, reada, nodes,
7890 extent_cache, chunk_cache,
7891 dev_cache, block_group_cache,
7892 dev_extent_cache, rec);
7896 free_extent_buffer(buf);
7897 list_del(&rec->list);
7903 ret = run_next_block(root, bits, bits_nr, &last, pending, seen,
7904 reada, nodes, extent_cache, chunk_cache,
7905 dev_cache, block_group_cache,
7906 dev_extent_cache, NULL);
7916 static int check_chunks_and_extents(struct btrfs_root *root)
7918 struct rb_root dev_cache;
7919 struct cache_tree chunk_cache;
7920 struct block_group_tree block_group_cache;
7921 struct device_extent_tree dev_extent_cache;
7922 struct cache_tree extent_cache;
7923 struct cache_tree seen;
7924 struct cache_tree pending;
7925 struct cache_tree reada;
7926 struct cache_tree nodes;
7927 struct extent_io_tree excluded_extents;
7928 struct cache_tree corrupt_blocks;
7929 struct btrfs_path path;
7930 struct btrfs_key key;
7931 struct btrfs_key found_key;
7933 struct block_info *bits;
7935 struct extent_buffer *leaf;
7937 struct btrfs_root_item ri;
7938 struct list_head dropping_trees;
7939 struct list_head normal_trees;
7940 struct btrfs_root *root1;
7945 dev_cache = RB_ROOT;
7946 cache_tree_init(&chunk_cache);
7947 block_group_tree_init(&block_group_cache);
7948 device_extent_tree_init(&dev_extent_cache);
7950 cache_tree_init(&extent_cache);
7951 cache_tree_init(&seen);
7952 cache_tree_init(&pending);
7953 cache_tree_init(&nodes);
7954 cache_tree_init(&reada);
7955 cache_tree_init(&corrupt_blocks);
7956 extent_io_tree_init(&excluded_extents);
7957 INIT_LIST_HEAD(&dropping_trees);
7958 INIT_LIST_HEAD(&normal_trees);
7961 root->fs_info->excluded_extents = &excluded_extents;
7962 root->fs_info->fsck_extent_cache = &extent_cache;
7963 root->fs_info->free_extent_hook = free_extent_hook;
7964 root->fs_info->corrupt_blocks = &corrupt_blocks;
7968 bits = malloc(bits_nr * sizeof(struct block_info));
7975 root1 = root->fs_info->tree_root;
7976 level = btrfs_header_level(root1->node);
7977 ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
7978 root1->node->start, 0, level, 0,
7979 btrfs_level_size(root1, level), NULL);
7982 root1 = root->fs_info->chunk_root;
7983 level = btrfs_header_level(root1->node);
7984 ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
7985 root1->node->start, 0, level, 0,
7986 btrfs_level_size(root1, level), NULL);
7989 btrfs_init_path(&path);
7992 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
7993 ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
7998 leaf = path.nodes[0];
7999 slot = path.slots[0];
8000 if (slot >= btrfs_header_nritems(path.nodes[0])) {
8001 ret = btrfs_next_leaf(root, &path);
8004 leaf = path.nodes[0];
8005 slot = path.slots[0];
8007 btrfs_item_key_to_cpu(leaf, &found_key, path.slots[0]);
8008 if (btrfs_key_type(&found_key) == BTRFS_ROOT_ITEM_KEY) {
8009 unsigned long offset;
8012 offset = btrfs_item_ptr_offset(leaf, path.slots[0]);
8013 read_extent_buffer(leaf, &ri, offset, sizeof(ri));
8014 last_snapshot = btrfs_root_last_snapshot(&ri);
8015 if (btrfs_disk_key_objectid(&ri.drop_progress) == 0) {
8016 level = btrfs_root_level(&ri);
8017 level_size = btrfs_level_size(root, level);
8018 ret = add_root_item_to_list(&normal_trees,
8020 btrfs_root_bytenr(&ri),
8021 last_snapshot, level,
8022 0, level_size, NULL);
8026 level = btrfs_root_level(&ri);
8027 level_size = btrfs_level_size(root, level);
8028 objectid = found_key.objectid;
8029 btrfs_disk_key_to_cpu(&found_key,
8031 ret = add_root_item_to_list(&dropping_trees,
8033 btrfs_root_bytenr(&ri),
8034 last_snapshot, level,
8036 level_size, &found_key);
8043 btrfs_release_path(&path);
8046 * check_block can return -EAGAIN if it fixes something, please keep
8047 * this in mind when dealing with return values from these functions, if
8048 * we get -EAGAIN we want to fall through and restart the loop.
8050 ret = deal_root_from_list(&normal_trees, root, bits, bits_nr, &pending,
8051 &seen, &reada, &nodes, &extent_cache,
8052 &chunk_cache, &dev_cache, &block_group_cache,
8059 ret = deal_root_from_list(&dropping_trees, root, bits, bits_nr,
8060 &pending, &seen, &reada, &nodes,
8061 &extent_cache, &chunk_cache, &dev_cache,
8062 &block_group_cache, &dev_extent_cache);
8069 err = check_chunks(&chunk_cache, &block_group_cache,
8070 &dev_extent_cache, NULL, NULL, NULL, 0);
8078 ret = check_extent_refs(root, &extent_cache);
8085 err = check_devices(&dev_cache, &dev_extent_cache);
8091 free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
8092 extent_io_tree_cleanup(&excluded_extents);
8093 root->fs_info->fsck_extent_cache = NULL;
8094 root->fs_info->free_extent_hook = NULL;
8095 root->fs_info->corrupt_blocks = NULL;
8096 root->fs_info->excluded_extents = NULL;
8099 free_chunk_cache_tree(&chunk_cache);
8100 free_device_cache_tree(&dev_cache);
8101 free_block_group_tree(&block_group_cache);
8102 free_device_extent_tree(&dev_extent_cache);
8103 free_extent_cache_tree(&seen);
8104 free_extent_cache_tree(&pending);
8105 free_extent_cache_tree(&reada);
8106 free_extent_cache_tree(&nodes);
8109 free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
8110 free_extent_cache_tree(&seen);
8111 free_extent_cache_tree(&pending);
8112 free_extent_cache_tree(&reada);
8113 free_extent_cache_tree(&nodes);
8114 free_chunk_cache_tree(&chunk_cache);
8115 free_block_group_tree(&block_group_cache);
8116 free_device_cache_tree(&dev_cache);
8117 free_device_extent_tree(&dev_extent_cache);
8118 free_extent_record_cache(root->fs_info, &extent_cache);
8119 free_root_item_list(&normal_trees);
8120 free_root_item_list(&dropping_trees);
8121 extent_io_tree_cleanup(&excluded_extents);
8125 static int btrfs_fsck_reinit_root(struct btrfs_trans_handle *trans,
8126 struct btrfs_root *root, int overwrite)
8128 struct extent_buffer *c;
8129 struct extent_buffer *old = root->node;
8132 struct btrfs_disk_key disk_key = {0,0,0};
8138 extent_buffer_get(c);
8141 c = btrfs_alloc_free_block(trans, root,
8142 btrfs_level_size(root, 0),
8143 root->root_key.objectid,
8144 &disk_key, level, 0, 0);
8147 extent_buffer_get(c);
8151 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
8152 btrfs_set_header_level(c, level);
8153 btrfs_set_header_bytenr(c, c->start);
8154 btrfs_set_header_generation(c, trans->transid);
8155 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
8156 btrfs_set_header_owner(c, root->root_key.objectid);
8158 write_extent_buffer(c, root->fs_info->fsid,
8159 btrfs_header_fsid(), BTRFS_FSID_SIZE);
8161 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
8162 btrfs_header_chunk_tree_uuid(c),
8165 btrfs_mark_buffer_dirty(c);
8167 * this case can happen in the following case:
8169 * 1.overwrite previous root.
8171 * 2.reinit reloc data root, this is because we skip pin
8172 * down reloc data tree before which means we can allocate
8173 * same block bytenr here.
8175 if (old->start == c->start) {
8176 btrfs_set_root_generation(&root->root_item,
8178 root->root_item.level = btrfs_header_level(root->node);
8179 ret = btrfs_update_root(trans, root->fs_info->tree_root,
8180 &root->root_key, &root->root_item);
8182 free_extent_buffer(c);
8186 free_extent_buffer(old);
8188 add_root_to_dirty_list(root);
8192 static int pin_down_tree_blocks(struct btrfs_fs_info *fs_info,
8193 struct extent_buffer *eb, int tree_root)
8195 struct extent_buffer *tmp;
8196 struct btrfs_root_item *ri;
8197 struct btrfs_key key;
8200 int level = btrfs_header_level(eb);
8206 * If we have pinned this block before, don't pin it again.
8207 * This can not only avoid forever loop with broken filesystem
8208 * but also give us some speedups.
8210 if (test_range_bit(&fs_info->pinned_extents, eb->start,
8211 eb->start + eb->len - 1, EXTENT_DIRTY, 0))
8214 btrfs_pin_extent(fs_info, eb->start, eb->len);
8216 leafsize = btrfs_super_leafsize(fs_info->super_copy);
8217 nritems = btrfs_header_nritems(eb);
8218 for (i = 0; i < nritems; i++) {
8220 btrfs_item_key_to_cpu(eb, &key, i);
8221 if (key.type != BTRFS_ROOT_ITEM_KEY)
8223 /* Skip the extent root and reloc roots */
8224 if (key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
8225 key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
8226 key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
8228 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
8229 bytenr = btrfs_disk_root_bytenr(eb, ri);
8232 * If at any point we start needing the real root we
8233 * will have to build a stump root for the root we are
8234 * in, but for now this doesn't actually use the root so
8235 * just pass in extent_root.
8237 tmp = read_tree_block(fs_info->extent_root, bytenr,
8239 if (!extent_buffer_uptodate(tmp)) {
8240 fprintf(stderr, "Error reading root block\n");
8243 ret = pin_down_tree_blocks(fs_info, tmp, 0);
8244 free_extent_buffer(tmp);
8248 bytenr = btrfs_node_blockptr(eb, i);
8250 /* If we aren't the tree root don't read the block */
8251 if (level == 1 && !tree_root) {
8252 btrfs_pin_extent(fs_info, bytenr, leafsize);
8256 tmp = read_tree_block(fs_info->extent_root, bytenr,
8258 if (!extent_buffer_uptodate(tmp)) {
8259 fprintf(stderr, "Error reading tree block\n");
8262 ret = pin_down_tree_blocks(fs_info, tmp, tree_root);
8263 free_extent_buffer(tmp);
8272 static int pin_metadata_blocks(struct btrfs_fs_info *fs_info)
8276 ret = pin_down_tree_blocks(fs_info, fs_info->chunk_root->node, 0);
8280 return pin_down_tree_blocks(fs_info, fs_info->tree_root->node, 1);
8283 static int reset_block_groups(struct btrfs_fs_info *fs_info)
8285 struct btrfs_block_group_cache *cache;
8286 struct btrfs_path *path;
8287 struct extent_buffer *leaf;
8288 struct btrfs_chunk *chunk;
8289 struct btrfs_key key;
8293 path = btrfs_alloc_path();
8298 key.type = BTRFS_CHUNK_ITEM_KEY;
8301 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
8303 btrfs_free_path(path);
8308 * We do this in case the block groups were screwed up and had alloc
8309 * bits that aren't actually set on the chunks. This happens with
8310 * restored images every time and could happen in real life I guess.
8312 fs_info->avail_data_alloc_bits = 0;
8313 fs_info->avail_metadata_alloc_bits = 0;
8314 fs_info->avail_system_alloc_bits = 0;
8316 /* First we need to create the in-memory block groups */
8318 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8319 ret = btrfs_next_leaf(fs_info->chunk_root, path);
8321 btrfs_free_path(path);
8329 leaf = path->nodes[0];
8330 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
8331 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
8336 chunk = btrfs_item_ptr(leaf, path->slots[0],
8337 struct btrfs_chunk);
8338 btrfs_add_block_group(fs_info, 0,
8339 btrfs_chunk_type(leaf, chunk),
8340 key.objectid, key.offset,
8341 btrfs_chunk_length(leaf, chunk));
8342 set_extent_dirty(&fs_info->free_space_cache, key.offset,
8343 key.offset + btrfs_chunk_length(leaf, chunk),
8349 cache = btrfs_lookup_first_block_group(fs_info, start);
8353 start = cache->key.objectid + cache->key.offset;
8356 btrfs_free_path(path);
8360 static int reset_balance(struct btrfs_trans_handle *trans,
8361 struct btrfs_fs_info *fs_info)
8363 struct btrfs_root *root = fs_info->tree_root;
8364 struct btrfs_path *path;
8365 struct extent_buffer *leaf;
8366 struct btrfs_key key;
8367 int del_slot, del_nr = 0;
8371 path = btrfs_alloc_path();
8375 key.objectid = BTRFS_BALANCE_OBJECTID;
8376 key.type = BTRFS_BALANCE_ITEM_KEY;
8379 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8384 goto reinit_data_reloc;
8389 ret = btrfs_del_item(trans, root, path);
8392 btrfs_release_path(path);
8394 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
8395 key.type = BTRFS_ROOT_ITEM_KEY;
8398 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8402 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8407 ret = btrfs_del_items(trans, root, path,
8414 btrfs_release_path(path);
8417 ret = btrfs_search_slot(trans, root, &key, path,
8424 leaf = path->nodes[0];
8425 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
8426 if (key.objectid > BTRFS_TREE_RELOC_OBJECTID)
8428 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8433 del_slot = path->slots[0];
8442 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
8446 btrfs_release_path(path);
8449 key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
8450 key.type = BTRFS_ROOT_ITEM_KEY;
8451 key.offset = (u64)-1;
8452 root = btrfs_read_fs_root(fs_info, &key);
8454 fprintf(stderr, "Error reading data reloc tree\n");
8455 ret = PTR_ERR(root);
8458 record_root_in_trans(trans, root);
8459 ret = btrfs_fsck_reinit_root(trans, root, 0);
8462 ret = btrfs_make_root_dir(trans, root, BTRFS_FIRST_FREE_OBJECTID);
8464 btrfs_free_path(path);
8468 static int reinit_extent_tree(struct btrfs_trans_handle *trans,
8469 struct btrfs_fs_info *fs_info)
8475 * The only reason we don't do this is because right now we're just
8476 * walking the trees we find and pinning down their bytes, we don't look
8477 * at any of the leaves. In order to do mixed groups we'd have to check
8478 * the leaves of any fs roots and pin down the bytes for any file
8479 * extents we find. Not hard but why do it if we don't have to?
8481 if (btrfs_fs_incompat(fs_info, BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)) {
8482 fprintf(stderr, "We don't support re-initing the extent tree "
8483 "for mixed block groups yet, please notify a btrfs "
8484 "developer you want to do this so they can add this "
8485 "functionality.\n");
8490 * first we need to walk all of the trees except the extent tree and pin
8491 * down the bytes that are in use so we don't overwrite any existing
8494 ret = pin_metadata_blocks(fs_info);
8496 fprintf(stderr, "error pinning down used bytes\n");
8501 * Need to drop all the block groups since we're going to recreate all
8504 btrfs_free_block_groups(fs_info);
8505 ret = reset_block_groups(fs_info);
8507 fprintf(stderr, "error resetting the block groups\n");
8511 /* Ok we can allocate now, reinit the extent root */
8512 ret = btrfs_fsck_reinit_root(trans, fs_info->extent_root, 0);
8514 fprintf(stderr, "extent root initialization failed\n");
8516 * When the transaction code is updated we should end the
8517 * transaction, but for now progs only knows about commit so
8518 * just return an error.
8524 * Now we have all the in-memory block groups setup so we can make
8525 * allocations properly, and the metadata we care about is safe since we
8526 * pinned all of it above.
8529 struct btrfs_block_group_cache *cache;
8531 cache = btrfs_lookup_first_block_group(fs_info, start);
8534 start = cache->key.objectid + cache->key.offset;
8535 ret = btrfs_insert_item(trans, fs_info->extent_root,
8536 &cache->key, &cache->item,
8537 sizeof(cache->item));
8539 fprintf(stderr, "Error adding block group\n");
8542 btrfs_extent_post_op(trans, fs_info->extent_root);
8545 ret = reset_balance(trans, fs_info);
8547 fprintf(stderr, "error reseting the pending balance\n");
8552 static int recow_extent_buffer(struct btrfs_root *root, struct extent_buffer *eb)
8554 struct btrfs_path *path;
8555 struct btrfs_trans_handle *trans;
8556 struct btrfs_key key;
8559 printf("Recowing metadata block %llu\n", eb->start);
8560 key.objectid = btrfs_header_owner(eb);
8561 key.type = BTRFS_ROOT_ITEM_KEY;
8562 key.offset = (u64)-1;
8564 root = btrfs_read_fs_root(root->fs_info, &key);
8566 fprintf(stderr, "Couldn't find owner root %llu\n",
8568 return PTR_ERR(root);
8571 path = btrfs_alloc_path();
8575 trans = btrfs_start_transaction(root, 1);
8576 if (IS_ERR(trans)) {
8577 btrfs_free_path(path);
8578 return PTR_ERR(trans);
8581 path->lowest_level = btrfs_header_level(eb);
8582 if (path->lowest_level)
8583 btrfs_node_key_to_cpu(eb, &key, 0);
8585 btrfs_item_key_to_cpu(eb, &key, 0);
8587 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
8588 btrfs_commit_transaction(trans, root);
8589 btrfs_free_path(path);
8593 static int delete_bad_item(struct btrfs_root *root, struct bad_item *bad)
8595 struct btrfs_path *path;
8596 struct btrfs_trans_handle *trans;
8597 struct btrfs_key key;
8600 printf("Deleting bad item [%llu,%u,%llu]\n", bad->key.objectid,
8601 bad->key.type, bad->key.offset);
8602 key.objectid = bad->root_id;
8603 key.type = BTRFS_ROOT_ITEM_KEY;
8604 key.offset = (u64)-1;
8606 root = btrfs_read_fs_root(root->fs_info, &key);
8608 fprintf(stderr, "Couldn't find owner root %llu\n",
8610 return PTR_ERR(root);
8613 path = btrfs_alloc_path();
8617 trans = btrfs_start_transaction(root, 1);
8618 if (IS_ERR(trans)) {
8619 btrfs_free_path(path);
8620 return PTR_ERR(trans);
8623 ret = btrfs_search_slot(trans, root, &bad->key, path, -1, 1);
8629 ret = btrfs_del_item(trans, root, path);
8631 btrfs_commit_transaction(trans, root);
8632 btrfs_free_path(path);
8636 static int zero_log_tree(struct btrfs_root *root)
8638 struct btrfs_trans_handle *trans;
8641 trans = btrfs_start_transaction(root, 1);
8642 if (IS_ERR(trans)) {
8643 ret = PTR_ERR(trans);
8646 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
8647 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
8648 ret = btrfs_commit_transaction(trans, root);
8652 static int populate_csum(struct btrfs_trans_handle *trans,
8653 struct btrfs_root *csum_root, char *buf, u64 start,
8660 while (offset < len) {
8661 sectorsize = csum_root->sectorsize;
8662 ret = read_extent_data(csum_root, buf, start + offset,
8666 ret = btrfs_csum_file_block(trans, csum_root, start + len,
8667 start + offset, buf, sectorsize);
8670 offset += sectorsize;
8675 static int fill_csum_tree_from_one_fs_root(struct btrfs_trans_handle *trans,
8676 struct btrfs_root *csum_root,
8677 struct btrfs_root *cur_root)
8679 struct btrfs_path *path;
8680 struct btrfs_key key;
8681 struct extent_buffer *node;
8682 struct btrfs_file_extent_item *fi;
8689 path = btrfs_alloc_path();
8692 buf = malloc(cur_root->fs_info->csum_root->sectorsize);
8702 ret = btrfs_search_slot(NULL, cur_root, &key, path, 0, 0);
8705 /* Iterate all regular file extents and fill its csum */
8707 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
8709 if (key.type != BTRFS_EXTENT_DATA_KEY)
8711 node = path->nodes[0];
8712 slot = path->slots[0];
8713 fi = btrfs_item_ptr(node, slot, struct btrfs_file_extent_item);
8714 if (btrfs_file_extent_type(node, fi) != BTRFS_FILE_EXTENT_REG)
8716 start = btrfs_file_extent_disk_bytenr(node, fi);
8717 len = btrfs_file_extent_disk_num_bytes(node, fi);
8719 ret = populate_csum(trans, csum_root, buf, start, len);
8726 * TODO: if next leaf is corrupted, jump to nearest next valid
8729 ret = btrfs_next_item(cur_root, path);
8739 btrfs_free_path(path);
8744 static int fill_csum_tree_from_fs(struct btrfs_trans_handle *trans,
8745 struct btrfs_root *csum_root)
8747 struct btrfs_fs_info *fs_info = csum_root->fs_info;
8748 struct btrfs_path *path;
8749 struct btrfs_root *tree_root = fs_info->tree_root;
8750 struct btrfs_root *cur_root;
8751 struct extent_buffer *node;
8752 struct btrfs_key key;
8756 path = btrfs_alloc_path();
8760 key.objectid = BTRFS_FS_TREE_OBJECTID;
8762 key.type = BTRFS_ROOT_ITEM_KEY;
8764 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
8773 node = path->nodes[0];
8774 slot = path->slots[0];
8775 btrfs_item_key_to_cpu(node, &key, slot);
8776 if (key.objectid > BTRFS_LAST_FREE_OBJECTID)
8778 if (key.type != BTRFS_ROOT_ITEM_KEY)
8780 if (!is_fstree(key.objectid))
8782 key.offset = (u64)-1;
8784 cur_root = btrfs_read_fs_root(fs_info, &key);
8785 if (IS_ERR(cur_root) || !cur_root) {
8786 fprintf(stderr, "Fail to read fs/subvol tree: %lld\n",
8790 ret = fill_csum_tree_from_one_fs_root(trans, csum_root,
8795 ret = btrfs_next_item(tree_root, path);
8805 btrfs_free_path(path);
8809 static int fill_csum_tree_from_extent(struct btrfs_trans_handle *trans,
8810 struct btrfs_root *csum_root)
8812 struct btrfs_root *extent_root = csum_root->fs_info->extent_root;
8813 struct btrfs_path *path;
8814 struct btrfs_extent_item *ei;
8815 struct extent_buffer *leaf;
8817 struct btrfs_key key;
8820 path = btrfs_alloc_path();
8825 key.type = BTRFS_EXTENT_ITEM_KEY;
8828 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
8830 btrfs_free_path(path);
8834 buf = malloc(csum_root->sectorsize);
8836 btrfs_free_path(path);
8841 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8842 ret = btrfs_next_leaf(extent_root, path);
8850 leaf = path->nodes[0];
8852 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
8853 if (key.type != BTRFS_EXTENT_ITEM_KEY) {
8858 ei = btrfs_item_ptr(leaf, path->slots[0],
8859 struct btrfs_extent_item);
8860 if (!(btrfs_extent_flags(leaf, ei) &
8861 BTRFS_EXTENT_FLAG_DATA)) {
8866 ret = populate_csum(trans, csum_root, buf, key.objectid,
8873 btrfs_free_path(path);
8879 * Recalculate the csum and put it into the csum tree.
8881 * Extent tree init will wipe out all the extent info, so in that case, we
8882 * can't depend on extent tree, but use fs tree. If search_fs_tree is set, we
8883 * will use fs/subvol trees to init the csum tree.
8885 static int fill_csum_tree(struct btrfs_trans_handle *trans,
8886 struct btrfs_root *csum_root,
8890 return fill_csum_tree_from_fs(trans, csum_root);
8892 return fill_csum_tree_from_extent(trans, csum_root);
8895 struct root_item_info {
8896 /* level of the root */
8898 /* number of nodes at this level, must be 1 for a root */
8902 struct cache_extent cache_extent;
8905 static struct cache_tree *roots_info_cache = NULL;
8907 static void free_roots_info_cache(void)
8909 if (!roots_info_cache)
8912 while (!cache_tree_empty(roots_info_cache)) {
8913 struct cache_extent *entry;
8914 struct root_item_info *rii;
8916 entry = first_cache_extent(roots_info_cache);
8919 remove_cache_extent(roots_info_cache, entry);
8920 rii = container_of(entry, struct root_item_info, cache_extent);
8924 free(roots_info_cache);
8925 roots_info_cache = NULL;
8928 static int build_roots_info_cache(struct btrfs_fs_info *info)
8931 struct btrfs_key key;
8932 struct extent_buffer *leaf;
8933 struct btrfs_path *path;
8935 if (!roots_info_cache) {
8936 roots_info_cache = malloc(sizeof(*roots_info_cache));
8937 if (!roots_info_cache)
8939 cache_tree_init(roots_info_cache);
8942 path = btrfs_alloc_path();
8947 key.type = BTRFS_EXTENT_ITEM_KEY;
8950 ret = btrfs_search_slot(NULL, info->extent_root, &key, path, 0, 0);
8953 leaf = path->nodes[0];
8956 struct btrfs_key found_key;
8957 struct btrfs_extent_item *ei;
8958 struct btrfs_extent_inline_ref *iref;
8959 int slot = path->slots[0];
8964 struct cache_extent *entry;
8965 struct root_item_info *rii;
8967 if (slot >= btrfs_header_nritems(leaf)) {
8968 ret = btrfs_next_leaf(info->extent_root, path);
8975 leaf = path->nodes[0];
8976 slot = path->slots[0];
8979 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8981 if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
8982 found_key.type != BTRFS_METADATA_ITEM_KEY)
8985 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
8986 flags = btrfs_extent_flags(leaf, ei);
8988 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
8989 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
8992 if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
8993 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
8994 level = found_key.offset;
8996 struct btrfs_tree_block_info *info;
8998 info = (struct btrfs_tree_block_info *)(ei + 1);
8999 iref = (struct btrfs_extent_inline_ref *)(info + 1);
9000 level = btrfs_tree_block_level(leaf, info);
9004 * For a root extent, it must be of the following type and the
9005 * first (and only one) iref in the item.
9007 type = btrfs_extent_inline_ref_type(leaf, iref);
9008 if (type != BTRFS_TREE_BLOCK_REF_KEY)
9011 root_id = btrfs_extent_inline_ref_offset(leaf, iref);
9012 entry = lookup_cache_extent(roots_info_cache, root_id, 1);
9014 rii = malloc(sizeof(struct root_item_info));
9019 rii->cache_extent.start = root_id;
9020 rii->cache_extent.size = 1;
9021 rii->level = (u8)-1;
9022 entry = &rii->cache_extent;
9023 ret = insert_cache_extent(roots_info_cache, entry);
9026 rii = container_of(entry, struct root_item_info,
9030 ASSERT(rii->cache_extent.start == root_id);
9031 ASSERT(rii->cache_extent.size == 1);
9033 if (level > rii->level || rii->level == (u8)-1) {
9035 rii->bytenr = found_key.objectid;
9036 rii->gen = btrfs_extent_generation(leaf, ei);
9037 rii->node_count = 1;
9038 } else if (level == rii->level) {
9046 btrfs_free_path(path);
9051 static int maybe_repair_root_item(struct btrfs_fs_info *info,
9052 struct btrfs_path *path,
9053 const struct btrfs_key *root_key,
9054 const int read_only_mode)
9056 const u64 root_id = root_key->objectid;
9057 struct cache_extent *entry;
9058 struct root_item_info *rii;
9059 struct btrfs_root_item ri;
9060 unsigned long offset;
9062 entry = lookup_cache_extent(roots_info_cache, root_id, 1);
9065 "Error: could not find extent items for root %llu\n",
9066 root_key->objectid);
9070 rii = container_of(entry, struct root_item_info, cache_extent);
9071 ASSERT(rii->cache_extent.start == root_id);
9072 ASSERT(rii->cache_extent.size == 1);
9074 if (rii->node_count != 1) {
9076 "Error: could not find btree root extent for root %llu\n",
9081 offset = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
9082 read_extent_buffer(path->nodes[0], &ri, offset, sizeof(ri));
9084 if (btrfs_root_bytenr(&ri) != rii->bytenr ||
9085 btrfs_root_level(&ri) != rii->level ||
9086 btrfs_root_generation(&ri) != rii->gen) {
9089 * If we're in repair mode but our caller told us to not update
9090 * the root item, i.e. just check if it needs to be updated, don't
9091 * print this message, since the caller will call us again shortly
9092 * for the same root item without read only mode (the caller will
9093 * open a transaction first).
9095 if (!(read_only_mode && repair))
9097 "%sroot item for root %llu,"
9098 " current bytenr %llu, current gen %llu, current level %u,"
9099 " new bytenr %llu, new gen %llu, new level %u\n",
9100 (read_only_mode ? "" : "fixing "),
9102 btrfs_root_bytenr(&ri), btrfs_root_generation(&ri),
9103 btrfs_root_level(&ri),
9104 rii->bytenr, rii->gen, rii->level);
9106 if (btrfs_root_generation(&ri) > rii->gen) {
9108 "root %llu has a root item with a more recent gen (%llu) compared to the found root node (%llu)\n",
9109 root_id, btrfs_root_generation(&ri), rii->gen);
9113 if (!read_only_mode) {
9114 btrfs_set_root_bytenr(&ri, rii->bytenr);
9115 btrfs_set_root_level(&ri, rii->level);
9116 btrfs_set_root_generation(&ri, rii->gen);
9117 write_extent_buffer(path->nodes[0], &ri,
9118 offset, sizeof(ri));
9128 * A regression introduced in the 3.17 kernel (more specifically in 3.17-rc2),
9129 * caused read-only snapshots to be corrupted if they were created at a moment
9130 * when the source subvolume/snapshot had orphan items. The issue was that the
9131 * on-disk root items became incorrect, referring to the pre orphan cleanup root
9132 * node instead of the post orphan cleanup root node.
9133 * So this function, and its callees, just detects and fixes those cases. Even
9134 * though the regression was for read-only snapshots, this function applies to
9135 * any snapshot/subvolume root.
9136 * This must be run before any other repair code - not doing it so, makes other
9137 * repair code delete or modify backrefs in the extent tree for example, which
9138 * will result in an inconsistent fs after repairing the root items.
9140 static int repair_root_items(struct btrfs_fs_info *info)
9142 struct btrfs_path *path = NULL;
9143 struct btrfs_key key;
9144 struct extent_buffer *leaf;
9145 struct btrfs_trans_handle *trans = NULL;
9150 ret = build_roots_info_cache(info);
9154 path = btrfs_alloc_path();
9160 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
9161 key.type = BTRFS_ROOT_ITEM_KEY;
9166 * Avoid opening and committing transactions if a leaf doesn't have
9167 * any root items that need to be fixed, so that we avoid rotating
9168 * backup roots unnecessarily.
9171 trans = btrfs_start_transaction(info->tree_root, 1);
9172 if (IS_ERR(trans)) {
9173 ret = PTR_ERR(trans);
9178 ret = btrfs_search_slot(trans, info->tree_root, &key, path,
9182 leaf = path->nodes[0];
9185 struct btrfs_key found_key;
9187 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
9188 int no_more_keys = find_next_key(path, &key);
9190 btrfs_release_path(path);
9192 ret = btrfs_commit_transaction(trans,
9204 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9206 if (found_key.type != BTRFS_ROOT_ITEM_KEY)
9208 if (found_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
9211 ret = maybe_repair_root_item(info, path, &found_key,
9216 if (!trans && repair) {
9219 btrfs_release_path(path);
9229 free_roots_info_cache();
9230 btrfs_free_path(path);
9232 btrfs_commit_transaction(trans, info->tree_root);
9239 const char * const cmd_check_usage[] = {
9240 "btrfs check [options] <device>",
9241 "Check an unmounted btrfs filesystem.",
9243 "-s|--super <superblock> use this superblock copy",
9244 "-b|--backup use the backup root copy",
9245 "--repair try to repair the filesystem",
9246 "--init-csum-tree create a new CRC tree",
9247 "--init-extent-tree create a new extent tree",
9248 "--check-data-csum verify checkums of data blocks",
9249 "--qgroup-report print a report on qgroup consistency",
9250 "--subvol-extents <subvolid> print subvolume extents and sharing state",
9251 "--tree-root <bytenr> use the given bytenr for the tree root",
9255 int cmd_check(int argc, char **argv)
9257 struct cache_tree root_cache;
9258 struct btrfs_root *root;
9259 struct btrfs_fs_info *info;
9262 u64 tree_root_bytenr = 0;
9263 char uuidbuf[BTRFS_UUID_UNPARSED_SIZE];
9266 int init_csum_tree = 0;
9268 int qgroup_report = 0;
9269 enum btrfs_open_ctree_flags ctree_flags = OPEN_CTREE_EXCLUSIVE;
9273 enum { OPT_REPAIR = 257, OPT_INIT_CSUM, OPT_INIT_EXTENT,
9274 OPT_CHECK_CSUM, OPT_READONLY };
9275 static const struct option long_options[] = {
9276 { "super", required_argument, NULL, 's' },
9277 { "repair", no_argument, NULL, OPT_REPAIR },
9278 { "readonly", no_argument, NULL, OPT_READONLY },
9279 { "init-csum-tree", no_argument, NULL, OPT_INIT_CSUM },
9280 { "init-extent-tree", no_argument, NULL, OPT_INIT_EXTENT },
9281 { "check-data-csum", no_argument, NULL, OPT_CHECK_CSUM },
9282 { "backup", no_argument, NULL, 'b' },
9283 { "subvol-extents", required_argument, NULL, 'E' },
9284 { "qgroup-report", no_argument, NULL, 'Q' },
9285 { "tree-root", required_argument, NULL, 'r' },
9289 c = getopt_long(argc, argv, "as:br:", long_options, NULL);
9293 case 'a': /* ignored */ break;
9295 ctree_flags |= OPEN_CTREE_BACKUP_ROOT;
9298 num = arg_strtou64(optarg);
9299 if (num >= BTRFS_SUPER_MIRROR_MAX) {
9301 "ERROR: super mirror should be less than: %d\n",
9302 BTRFS_SUPER_MIRROR_MAX);
9305 bytenr = btrfs_sb_offset(((int)num));
9306 printf("using SB copy %llu, bytenr %llu\n", num,
9307 (unsigned long long)bytenr);
9313 subvolid = arg_strtou64(optarg);
9316 tree_root_bytenr = arg_strtou64(optarg);
9320 usage(cmd_check_usage);
9322 printf("enabling repair mode\n");
9324 ctree_flags |= OPEN_CTREE_WRITES;
9330 printf("Creating a new CRC tree\n");
9333 ctree_flags |= OPEN_CTREE_WRITES;
9335 case OPT_INIT_EXTENT:
9336 init_extent_tree = 1;
9337 ctree_flags |= (OPEN_CTREE_WRITES |
9338 OPEN_CTREE_NO_BLOCK_GROUPS);
9341 case OPT_CHECK_CSUM:
9342 check_data_csum = 1;
9346 argc = argc - optind;
9348 if (check_argc_exact(argc, 1))
9349 usage(cmd_check_usage);
9351 /* This check is the only reason for --readonly to exist */
9352 if (readonly && repair) {
9353 fprintf(stderr, "Repair options are not compatible with --readonly\n");
9358 cache_tree_init(&root_cache);
9360 if((ret = check_mounted(argv[optind])) < 0) {
9361 fprintf(stderr, "Could not check mount status: %s\n", strerror(-ret));
9364 fprintf(stderr, "%s is currently mounted. Aborting.\n", argv[optind]);
9369 /* only allow partial opening under repair mode */
9371 ctree_flags |= OPEN_CTREE_PARTIAL;
9373 info = open_ctree_fs_info(argv[optind], bytenr, tree_root_bytenr,
9376 fprintf(stderr, "Couldn't open file system\n");
9381 root = info->fs_root;
9384 * repair mode will force us to commit transaction which
9385 * will make us fail to load log tree when mounting.
9387 if (repair && btrfs_super_log_root(info->super_copy)) {
9388 ret = ask_user("repair mode will force to clear out log tree, Are you sure?");
9393 ret = zero_log_tree(root);
9395 fprintf(stderr, "fail to zero log tree\n");
9400 uuid_unparse(info->super_copy->fsid, uuidbuf);
9401 if (qgroup_report) {
9402 printf("Print quota groups for %s\nUUID: %s\n", argv[optind],
9404 ret = qgroup_verify_all(info);
9406 print_qgroup_report(1);
9410 printf("Print extent state for subvolume %llu on %s\nUUID: %s\n",
9411 subvolid, argv[optind], uuidbuf);
9412 ret = print_extent_state(info, subvolid);
9415 printf("Checking filesystem on %s\nUUID: %s\n", argv[optind], uuidbuf);
9417 if (!extent_buffer_uptodate(info->tree_root->node) ||
9418 !extent_buffer_uptodate(info->dev_root->node) ||
9419 !extent_buffer_uptodate(info->chunk_root->node)) {
9420 fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
9425 if (init_extent_tree || init_csum_tree) {
9426 struct btrfs_trans_handle *trans;
9428 trans = btrfs_start_transaction(info->extent_root, 0);
9429 if (IS_ERR(trans)) {
9430 fprintf(stderr, "Error starting transaction\n");
9431 ret = PTR_ERR(trans);
9435 if (init_extent_tree) {
9436 printf("Creating a new extent tree\n");
9437 ret = reinit_extent_tree(trans, info);
9442 if (init_csum_tree) {
9443 fprintf(stderr, "Reinit crc root\n");
9444 ret = btrfs_fsck_reinit_root(trans, info->csum_root, 0);
9446 fprintf(stderr, "crc root initialization failed\n");
9451 ret = fill_csum_tree(trans, info->csum_root,
9454 fprintf(stderr, "crc refilling failed\n");
9459 * Ok now we commit and run the normal fsck, which will add
9460 * extent entries for all of the items it finds.
9462 ret = btrfs_commit_transaction(trans, info->extent_root);
9466 if (!extent_buffer_uptodate(info->extent_root->node)) {
9467 fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
9471 if (!extent_buffer_uptodate(info->csum_root->node)) {
9472 fprintf(stderr, "Checksum root corrupted, rerun with --init-csum-tree option\n");
9477 fprintf(stderr, "checking extents\n");
9478 ret = check_chunks_and_extents(root);
9480 fprintf(stderr, "Errors found in extent allocation tree or chunk allocation\n");
9482 ret = repair_root_items(info);
9486 fprintf(stderr, "Fixed %d roots.\n", ret);
9488 } else if (ret > 0) {
9490 "Found %d roots with an outdated root item.\n",
9493 "Please run a filesystem check with the option --repair to fix them.\n");
9498 fprintf(stderr, "checking free space cache\n");
9499 ret = check_space_cache(root);
9504 * We used to have to have these hole extents in between our real
9505 * extents so if we don't have this flag set we need to make sure there
9506 * are no gaps in the file extents for inodes, otherwise we can just
9507 * ignore it when this happens.
9509 no_holes = btrfs_fs_incompat(root->fs_info,
9510 BTRFS_FEATURE_INCOMPAT_NO_HOLES);
9511 fprintf(stderr, "checking fs roots\n");
9512 ret = check_fs_roots(root, &root_cache);
9516 fprintf(stderr, "checking csums\n");
9517 ret = check_csums(root);
9521 fprintf(stderr, "checking root refs\n");
9522 ret = check_root_refs(root, &root_cache);
9526 while (repair && !list_empty(&root->fs_info->recow_ebs)) {
9527 struct extent_buffer *eb;
9529 eb = list_first_entry(&root->fs_info->recow_ebs,
9530 struct extent_buffer, recow);
9531 list_del_init(&eb->recow);
9532 ret = recow_extent_buffer(root, eb);
9537 while (!list_empty(&delete_items)) {
9538 struct bad_item *bad;
9540 bad = list_first_entry(&delete_items, struct bad_item, list);
9541 list_del_init(&bad->list);
9543 ret = delete_bad_item(root, bad);
9547 if (info->quota_enabled) {
9549 fprintf(stderr, "checking quota groups\n");
9550 err = qgroup_verify_all(info);
9555 if (!list_empty(&root->fs_info->recow_ebs)) {
9556 fprintf(stderr, "Transid errors in file system\n");
9560 print_qgroup_report(0);
9561 if (found_old_backref) { /*
9562 * there was a disk format change when mixed
9563 * backref was in testing tree. The old format
9564 * existed about one week.
9566 printf("\n * Found old mixed backref format. "
9567 "The old format is not supported! *"
9568 "\n * Please mount the FS in readonly mode, "
9569 "backup data and re-format the FS. *\n\n");
9572 printf("found %llu bytes used err is %d\n",
9573 (unsigned long long)bytes_used, ret);
9574 printf("total csum bytes: %llu\n",(unsigned long long)total_csum_bytes);
9575 printf("total tree bytes: %llu\n",
9576 (unsigned long long)total_btree_bytes);
9577 printf("total fs tree bytes: %llu\n",
9578 (unsigned long long)total_fs_tree_bytes);
9579 printf("total extent tree bytes: %llu\n",
9580 (unsigned long long)total_extent_tree_bytes);
9581 printf("btree space waste bytes: %llu\n",
9582 (unsigned long long)btree_space_waste);
9583 printf("file data blocks allocated: %llu\n referenced %llu\n",
9584 (unsigned long long)data_bytes_allocated,
9585 (unsigned long long)data_bytes_referenced);
9586 printf("%s\n", PACKAGE_STRING);
9588 free_root_recs_tree(&root_cache);
9591 btrfs_close_all_devices();