2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
23 #include <sys/types.h>
27 #include <uuid/uuid.h>
32 #include "print-tree.h"
33 #include "transaction.h"
36 #include "free-space-cache.h"
38 #include "qgroup-verify.h"
39 #include "rbtree-utils.h"
43 static u64 bytes_used = 0;
44 static u64 total_csum_bytes = 0;
45 static u64 total_btree_bytes = 0;
46 static u64 total_fs_tree_bytes = 0;
47 static u64 total_extent_tree_bytes = 0;
48 static u64 btree_space_waste = 0;
49 static u64 data_bytes_allocated = 0;
50 static u64 data_bytes_referenced = 0;
51 static int found_old_backref = 0;
52 static LIST_HEAD(duplicate_extents);
53 static LIST_HEAD(delete_items);
54 static int repair = 0;
55 static int no_holes = 0;
56 static int init_extent_tree = 0;
57 static int check_data_csum = 0;
59 struct extent_backref {
60 struct list_head list;
61 unsigned int is_data:1;
62 unsigned int found_extent_tree:1;
63 unsigned int full_backref:1;
64 unsigned int found_ref:1;
65 unsigned int broken:1;
69 struct extent_backref node;
84 struct extent_backref node;
91 struct extent_record {
92 struct list_head backrefs;
93 struct list_head dups;
94 struct list_head list;
95 struct cache_extent cache;
96 struct btrfs_disk_key parent_key;
101 u64 extent_item_refs;
103 u64 parent_generation;
107 unsigned int found_rec:1;
108 unsigned int content_checked:1;
109 unsigned int owner_ref_checked:1;
110 unsigned int is_root:1;
111 unsigned int metadata:1;
112 unsigned int flag_block_full_backref:1;
115 struct inode_backref {
116 struct list_head list;
117 unsigned int found_dir_item:1;
118 unsigned int found_dir_index:1;
119 unsigned int found_inode_ref:1;
120 unsigned int filetype:8;
122 unsigned int ref_type;
129 struct root_item_record {
130 struct list_head list;
136 struct btrfs_key drop_key;
139 #define REF_ERR_NO_DIR_ITEM (1 << 0)
140 #define REF_ERR_NO_DIR_INDEX (1 << 1)
141 #define REF_ERR_NO_INODE_REF (1 << 2)
142 #define REF_ERR_DUP_DIR_ITEM (1 << 3)
143 #define REF_ERR_DUP_DIR_INDEX (1 << 4)
144 #define REF_ERR_DUP_INODE_REF (1 << 5)
145 #define REF_ERR_INDEX_UNMATCH (1 << 6)
146 #define REF_ERR_FILETYPE_UNMATCH (1 << 7)
147 #define REF_ERR_NAME_TOO_LONG (1 << 8) // 100
148 #define REF_ERR_NO_ROOT_REF (1 << 9)
149 #define REF_ERR_NO_ROOT_BACKREF (1 << 10)
150 #define REF_ERR_DUP_ROOT_REF (1 << 11)
151 #define REF_ERR_DUP_ROOT_BACKREF (1 << 12)
153 struct inode_record {
154 struct list_head backrefs;
155 unsigned int checked:1;
156 unsigned int merging:1;
157 unsigned int found_inode_item:1;
158 unsigned int found_dir_item:1;
159 unsigned int found_file_extent:1;
160 unsigned int found_csum_item:1;
161 unsigned int some_csum_missing:1;
162 unsigned int nodatasum:1;
175 u64 first_extent_gap;
180 #define I_ERR_NO_INODE_ITEM (1 << 0)
181 #define I_ERR_NO_ORPHAN_ITEM (1 << 1)
182 #define I_ERR_DUP_INODE_ITEM (1 << 2)
183 #define I_ERR_DUP_DIR_INDEX (1 << 3)
184 #define I_ERR_ODD_DIR_ITEM (1 << 4)
185 #define I_ERR_ODD_FILE_EXTENT (1 << 5)
186 #define I_ERR_BAD_FILE_EXTENT (1 << 6)
187 #define I_ERR_FILE_EXTENT_OVERLAP (1 << 7)
188 #define I_ERR_FILE_EXTENT_DISCOUNT (1 << 8) // 100
189 #define I_ERR_DIR_ISIZE_WRONG (1 << 9)
190 #define I_ERR_FILE_NBYTES_WRONG (1 << 10) // 400
191 #define I_ERR_ODD_CSUM_ITEM (1 << 11)
192 #define I_ERR_SOME_CSUM_MISSING (1 << 12)
193 #define I_ERR_LINK_COUNT_WRONG (1 << 13)
195 struct root_backref {
196 struct list_head list;
197 unsigned int found_dir_item:1;
198 unsigned int found_dir_index:1;
199 unsigned int found_back_ref:1;
200 unsigned int found_forward_ref:1;
201 unsigned int reachable:1;
211 struct list_head backrefs;
212 struct cache_extent cache;
213 unsigned int found_root_item:1;
219 struct cache_extent cache;
224 struct cache_extent cache;
225 struct cache_tree root_cache;
226 struct cache_tree inode_cache;
227 struct inode_record *current;
236 struct walk_control {
237 struct cache_tree shared;
238 struct shared_node *nodes[BTRFS_MAX_LEVEL];
244 struct btrfs_key key;
246 struct list_head list;
249 static void reset_cached_block_groups(struct btrfs_fs_info *fs_info);
251 static void record_root_in_trans(struct btrfs_trans_handle *trans,
252 struct btrfs_root *root)
254 if (root->last_trans != trans->transid) {
255 root->track_dirty = 1;
256 root->last_trans = trans->transid;
257 root->commit_root = root->node;
258 extent_buffer_get(root->node);
262 static u8 imode_to_type(u32 imode)
265 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
266 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
267 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
268 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
269 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
270 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
271 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
272 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
275 return btrfs_type_by_mode[(imode & S_IFMT) >> S_SHIFT];
279 static int device_record_compare(struct rb_node *node1, struct rb_node *node2)
281 struct device_record *rec1;
282 struct device_record *rec2;
284 rec1 = rb_entry(node1, struct device_record, node);
285 rec2 = rb_entry(node2, struct device_record, node);
286 if (rec1->devid > rec2->devid)
288 else if (rec1->devid < rec2->devid)
294 static struct inode_record *clone_inode_rec(struct inode_record *orig_rec)
296 struct inode_record *rec;
297 struct inode_backref *backref;
298 struct inode_backref *orig;
301 rec = malloc(sizeof(*rec));
302 memcpy(rec, orig_rec, sizeof(*rec));
304 INIT_LIST_HEAD(&rec->backrefs);
306 list_for_each_entry(orig, &orig_rec->backrefs, list) {
307 size = sizeof(*orig) + orig->namelen + 1;
308 backref = malloc(size);
309 memcpy(backref, orig, size);
310 list_add_tail(&backref->list, &rec->backrefs);
315 static void print_inode_error(struct btrfs_root *root, struct inode_record *rec)
317 u64 root_objectid = root->root_key.objectid;
318 int errors = rec->errors;
322 /* reloc root errors, we print its corresponding fs root objectid*/
323 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
324 root_objectid = root->root_key.offset;
325 fprintf(stderr, "reloc");
327 fprintf(stderr, "root %llu inode %llu errors %x",
328 (unsigned long long) root_objectid,
329 (unsigned long long) rec->ino, rec->errors);
331 if (errors & I_ERR_NO_INODE_ITEM)
332 fprintf(stderr, ", no inode item");
333 if (errors & I_ERR_NO_ORPHAN_ITEM)
334 fprintf(stderr, ", no orphan item");
335 if (errors & I_ERR_DUP_INODE_ITEM)
336 fprintf(stderr, ", dup inode item");
337 if (errors & I_ERR_DUP_DIR_INDEX)
338 fprintf(stderr, ", dup dir index");
339 if (errors & I_ERR_ODD_DIR_ITEM)
340 fprintf(stderr, ", odd dir item");
341 if (errors & I_ERR_ODD_FILE_EXTENT)
342 fprintf(stderr, ", odd file extent");
343 if (errors & I_ERR_BAD_FILE_EXTENT)
344 fprintf(stderr, ", bad file extent");
345 if (errors & I_ERR_FILE_EXTENT_OVERLAP)
346 fprintf(stderr, ", file extent overlap");
347 if (errors & I_ERR_FILE_EXTENT_DISCOUNT)
348 fprintf(stderr, ", file extent discount");
349 if (errors & I_ERR_DIR_ISIZE_WRONG)
350 fprintf(stderr, ", dir isize wrong");
351 if (errors & I_ERR_FILE_NBYTES_WRONG)
352 fprintf(stderr, ", nbytes wrong");
353 if (errors & I_ERR_ODD_CSUM_ITEM)
354 fprintf(stderr, ", odd csum item");
355 if (errors & I_ERR_SOME_CSUM_MISSING)
356 fprintf(stderr, ", some csum missing");
357 if (errors & I_ERR_LINK_COUNT_WRONG)
358 fprintf(stderr, ", link count wrong");
359 fprintf(stderr, "\n");
362 static void print_ref_error(int errors)
364 if (errors & REF_ERR_NO_DIR_ITEM)
365 fprintf(stderr, ", no dir item");
366 if (errors & REF_ERR_NO_DIR_INDEX)
367 fprintf(stderr, ", no dir index");
368 if (errors & REF_ERR_NO_INODE_REF)
369 fprintf(stderr, ", no inode ref");
370 if (errors & REF_ERR_DUP_DIR_ITEM)
371 fprintf(stderr, ", dup dir item");
372 if (errors & REF_ERR_DUP_DIR_INDEX)
373 fprintf(stderr, ", dup dir index");
374 if (errors & REF_ERR_DUP_INODE_REF)
375 fprintf(stderr, ", dup inode ref");
376 if (errors & REF_ERR_INDEX_UNMATCH)
377 fprintf(stderr, ", index unmatch");
378 if (errors & REF_ERR_FILETYPE_UNMATCH)
379 fprintf(stderr, ", filetype unmatch");
380 if (errors & REF_ERR_NAME_TOO_LONG)
381 fprintf(stderr, ", name too long");
382 if (errors & REF_ERR_NO_ROOT_REF)
383 fprintf(stderr, ", no root ref");
384 if (errors & REF_ERR_NO_ROOT_BACKREF)
385 fprintf(stderr, ", no root backref");
386 if (errors & REF_ERR_DUP_ROOT_REF)
387 fprintf(stderr, ", dup root ref");
388 if (errors & REF_ERR_DUP_ROOT_BACKREF)
389 fprintf(stderr, ", dup root backref");
390 fprintf(stderr, "\n");
393 static struct inode_record *get_inode_rec(struct cache_tree *inode_cache,
396 struct ptr_node *node;
397 struct cache_extent *cache;
398 struct inode_record *rec = NULL;
401 cache = lookup_cache_extent(inode_cache, ino, 1);
403 node = container_of(cache, struct ptr_node, cache);
405 if (mod && rec->refs > 1) {
406 node->data = clone_inode_rec(rec);
411 rec = calloc(1, sizeof(*rec));
413 rec->extent_start = (u64)-1;
414 rec->first_extent_gap = (u64)-1;
416 INIT_LIST_HEAD(&rec->backrefs);
418 node = malloc(sizeof(*node));
419 node->cache.start = ino;
420 node->cache.size = 1;
423 if (ino == BTRFS_FREE_INO_OBJECTID)
426 ret = insert_cache_extent(inode_cache, &node->cache);
432 static void free_inode_rec(struct inode_record *rec)
434 struct inode_backref *backref;
439 while (!list_empty(&rec->backrefs)) {
440 backref = list_entry(rec->backrefs.next,
441 struct inode_backref, list);
442 list_del(&backref->list);
448 static int can_free_inode_rec(struct inode_record *rec)
450 if (!rec->errors && rec->checked && rec->found_inode_item &&
451 rec->nlink == rec->found_link && list_empty(&rec->backrefs))
456 static void maybe_free_inode_rec(struct cache_tree *inode_cache,
457 struct inode_record *rec)
459 struct cache_extent *cache;
460 struct inode_backref *tmp, *backref;
461 struct ptr_node *node;
462 unsigned char filetype;
464 if (!rec->found_inode_item)
467 filetype = imode_to_type(rec->imode);
468 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
469 if (backref->found_dir_item && backref->found_dir_index) {
470 if (backref->filetype != filetype)
471 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
472 if (!backref->errors && backref->found_inode_ref) {
473 list_del(&backref->list);
479 if (!rec->checked || rec->merging)
482 if (S_ISDIR(rec->imode)) {
483 if (rec->found_size != rec->isize)
484 rec->errors |= I_ERR_DIR_ISIZE_WRONG;
485 if (rec->found_file_extent)
486 rec->errors |= I_ERR_ODD_FILE_EXTENT;
487 } else if (S_ISREG(rec->imode) || S_ISLNK(rec->imode)) {
488 if (rec->found_dir_item)
489 rec->errors |= I_ERR_ODD_DIR_ITEM;
490 if (rec->found_size != rec->nbytes)
491 rec->errors |= I_ERR_FILE_NBYTES_WRONG;
492 if (rec->extent_start == (u64)-1 || rec->extent_start > 0)
493 rec->first_extent_gap = 0;
494 if (rec->nlink > 0 && !no_holes &&
495 (rec->extent_end < rec->isize ||
496 rec->first_extent_gap < rec->isize))
497 rec->errors |= I_ERR_FILE_EXTENT_DISCOUNT;
500 if (S_ISREG(rec->imode) || S_ISLNK(rec->imode)) {
501 if (rec->found_csum_item && rec->nodatasum)
502 rec->errors |= I_ERR_ODD_CSUM_ITEM;
503 if (rec->some_csum_missing && !rec->nodatasum)
504 rec->errors |= I_ERR_SOME_CSUM_MISSING;
507 BUG_ON(rec->refs != 1);
508 if (can_free_inode_rec(rec)) {
509 cache = lookup_cache_extent(inode_cache, rec->ino, 1);
510 node = container_of(cache, struct ptr_node, cache);
511 BUG_ON(node->data != rec);
512 remove_cache_extent(inode_cache, &node->cache);
518 static int check_orphan_item(struct btrfs_root *root, u64 ino)
520 struct btrfs_path path;
521 struct btrfs_key key;
524 key.objectid = BTRFS_ORPHAN_OBJECTID;
525 key.type = BTRFS_ORPHAN_ITEM_KEY;
528 btrfs_init_path(&path);
529 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
530 btrfs_release_path(&path);
536 static int process_inode_item(struct extent_buffer *eb,
537 int slot, struct btrfs_key *key,
538 struct shared_node *active_node)
540 struct inode_record *rec;
541 struct btrfs_inode_item *item;
543 rec = active_node->current;
544 BUG_ON(rec->ino != key->objectid || rec->refs > 1);
545 if (rec->found_inode_item) {
546 rec->errors |= I_ERR_DUP_INODE_ITEM;
549 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
550 rec->nlink = btrfs_inode_nlink(eb, item);
551 rec->isize = btrfs_inode_size(eb, item);
552 rec->nbytes = btrfs_inode_nbytes(eb, item);
553 rec->imode = btrfs_inode_mode(eb, item);
554 if (btrfs_inode_flags(eb, item) & BTRFS_INODE_NODATASUM)
556 rec->found_inode_item = 1;
558 rec->errors |= I_ERR_NO_ORPHAN_ITEM;
559 maybe_free_inode_rec(&active_node->inode_cache, rec);
563 static struct inode_backref *get_inode_backref(struct inode_record *rec,
565 int namelen, u64 dir)
567 struct inode_backref *backref;
569 list_for_each_entry(backref, &rec->backrefs, list) {
570 if (rec->ino == BTRFS_MULTIPLE_OBJECTIDS)
572 if (backref->dir != dir || backref->namelen != namelen)
574 if (memcmp(name, backref->name, namelen))
579 backref = malloc(sizeof(*backref) + namelen + 1);
580 memset(backref, 0, sizeof(*backref));
582 backref->namelen = namelen;
583 memcpy(backref->name, name, namelen);
584 backref->name[namelen] = '\0';
585 list_add_tail(&backref->list, &rec->backrefs);
589 static int add_inode_backref(struct cache_tree *inode_cache,
590 u64 ino, u64 dir, u64 index,
591 const char *name, int namelen,
592 int filetype, int itemtype, int errors)
594 struct inode_record *rec;
595 struct inode_backref *backref;
597 rec = get_inode_rec(inode_cache, ino, 1);
598 backref = get_inode_backref(rec, name, namelen, dir);
600 backref->errors |= errors;
601 if (itemtype == BTRFS_DIR_INDEX_KEY) {
602 if (backref->found_dir_index)
603 backref->errors |= REF_ERR_DUP_DIR_INDEX;
604 if (backref->found_inode_ref && backref->index != index)
605 backref->errors |= REF_ERR_INDEX_UNMATCH;
606 if (backref->found_dir_item && backref->filetype != filetype)
607 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
609 backref->index = index;
610 backref->filetype = filetype;
611 backref->found_dir_index = 1;
612 } else if (itemtype == BTRFS_DIR_ITEM_KEY) {
614 if (backref->found_dir_item)
615 backref->errors |= REF_ERR_DUP_DIR_ITEM;
616 if (backref->found_dir_index && backref->filetype != filetype)
617 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
619 backref->filetype = filetype;
620 backref->found_dir_item = 1;
621 } else if ((itemtype == BTRFS_INODE_REF_KEY) ||
622 (itemtype == BTRFS_INODE_EXTREF_KEY)) {
623 if (backref->found_inode_ref)
624 backref->errors |= REF_ERR_DUP_INODE_REF;
625 if (backref->found_dir_index && backref->index != index)
626 backref->errors |= REF_ERR_INDEX_UNMATCH;
628 backref->index = index;
630 backref->ref_type = itemtype;
631 backref->found_inode_ref = 1;
636 maybe_free_inode_rec(inode_cache, rec);
640 static int merge_inode_recs(struct inode_record *src, struct inode_record *dst,
641 struct cache_tree *dst_cache)
643 struct inode_backref *backref;
647 list_for_each_entry(backref, &src->backrefs, list) {
648 if (backref->found_dir_index) {
649 add_inode_backref(dst_cache, dst->ino, backref->dir,
650 backref->index, backref->name,
651 backref->namelen, backref->filetype,
652 BTRFS_DIR_INDEX_KEY, backref->errors);
654 if (backref->found_dir_item) {
656 add_inode_backref(dst_cache, dst->ino,
657 backref->dir, 0, backref->name,
658 backref->namelen, backref->filetype,
659 BTRFS_DIR_ITEM_KEY, backref->errors);
661 if (backref->found_inode_ref) {
662 add_inode_backref(dst_cache, dst->ino,
663 backref->dir, backref->index,
664 backref->name, backref->namelen, 0,
665 backref->ref_type, backref->errors);
669 if (src->found_dir_item)
670 dst->found_dir_item = 1;
671 if (src->found_file_extent)
672 dst->found_file_extent = 1;
673 if (src->found_csum_item)
674 dst->found_csum_item = 1;
675 if (src->some_csum_missing)
676 dst->some_csum_missing = 1;
677 if (dst->first_extent_gap > src->first_extent_gap)
678 dst->first_extent_gap = src->first_extent_gap;
680 BUG_ON(src->found_link < dir_count);
681 dst->found_link += src->found_link - dir_count;
682 dst->found_size += src->found_size;
683 if (src->extent_start != (u64)-1) {
684 if (dst->extent_start == (u64)-1) {
685 dst->extent_start = src->extent_start;
686 dst->extent_end = src->extent_end;
688 if (dst->extent_end > src->extent_start)
689 dst->errors |= I_ERR_FILE_EXTENT_OVERLAP;
690 else if (dst->extent_end < src->extent_start &&
691 dst->extent_end < dst->first_extent_gap)
692 dst->first_extent_gap = dst->extent_end;
693 if (dst->extent_end < src->extent_end)
694 dst->extent_end = src->extent_end;
698 dst->errors |= src->errors;
699 if (src->found_inode_item) {
700 if (!dst->found_inode_item) {
701 dst->nlink = src->nlink;
702 dst->isize = src->isize;
703 dst->nbytes = src->nbytes;
704 dst->imode = src->imode;
705 dst->nodatasum = src->nodatasum;
706 dst->found_inode_item = 1;
708 dst->errors |= I_ERR_DUP_INODE_ITEM;
716 static int splice_shared_node(struct shared_node *src_node,
717 struct shared_node *dst_node)
719 struct cache_extent *cache;
720 struct ptr_node *node, *ins;
721 struct cache_tree *src, *dst;
722 struct inode_record *rec, *conflict;
727 if (--src_node->refs == 0)
729 if (src_node->current)
730 current_ino = src_node->current->ino;
732 src = &src_node->root_cache;
733 dst = &dst_node->root_cache;
735 cache = search_cache_extent(src, 0);
737 node = container_of(cache, struct ptr_node, cache);
739 cache = next_cache_extent(cache);
742 remove_cache_extent(src, &node->cache);
745 ins = malloc(sizeof(*ins));
746 ins->cache.start = node->cache.start;
747 ins->cache.size = node->cache.size;
751 ret = insert_cache_extent(dst, &ins->cache);
752 if (ret == -EEXIST) {
753 conflict = get_inode_rec(dst, rec->ino, 1);
754 merge_inode_recs(rec, conflict, dst);
756 conflict->checked = 1;
757 if (dst_node->current == conflict)
758 dst_node->current = NULL;
760 maybe_free_inode_rec(dst, conflict);
768 if (src == &src_node->root_cache) {
769 src = &src_node->inode_cache;
770 dst = &dst_node->inode_cache;
774 if (current_ino > 0 && (!dst_node->current ||
775 current_ino > dst_node->current->ino)) {
776 if (dst_node->current) {
777 dst_node->current->checked = 1;
778 maybe_free_inode_rec(dst, dst_node->current);
780 dst_node->current = get_inode_rec(dst, current_ino, 1);
785 static void free_inode_ptr(struct cache_extent *cache)
787 struct ptr_node *node;
788 struct inode_record *rec;
790 node = container_of(cache, struct ptr_node, cache);
796 FREE_EXTENT_CACHE_BASED_TREE(inode_recs, free_inode_ptr);
798 static struct shared_node *find_shared_node(struct cache_tree *shared,
801 struct cache_extent *cache;
802 struct shared_node *node;
804 cache = lookup_cache_extent(shared, bytenr, 1);
806 node = container_of(cache, struct shared_node, cache);
812 static int add_shared_node(struct cache_tree *shared, u64 bytenr, u32 refs)
815 struct shared_node *node;
817 node = calloc(1, sizeof(*node));
818 node->cache.start = bytenr;
819 node->cache.size = 1;
820 cache_tree_init(&node->root_cache);
821 cache_tree_init(&node->inode_cache);
824 ret = insert_cache_extent(shared, &node->cache);
829 static int enter_shared_node(struct btrfs_root *root, u64 bytenr, u32 refs,
830 struct walk_control *wc, int level)
832 struct shared_node *node;
833 struct shared_node *dest;
835 if (level == wc->active_node)
838 BUG_ON(wc->active_node <= level);
839 node = find_shared_node(&wc->shared, bytenr);
841 add_shared_node(&wc->shared, bytenr, refs);
842 node = find_shared_node(&wc->shared, bytenr);
843 wc->nodes[level] = node;
844 wc->active_node = level;
848 if (wc->root_level == wc->active_node &&
849 btrfs_root_refs(&root->root_item) == 0) {
850 if (--node->refs == 0) {
851 free_inode_recs_tree(&node->root_cache);
852 free_inode_recs_tree(&node->inode_cache);
853 remove_cache_extent(&wc->shared, &node->cache);
859 dest = wc->nodes[wc->active_node];
860 splice_shared_node(node, dest);
861 if (node->refs == 0) {
862 remove_cache_extent(&wc->shared, &node->cache);
868 static int leave_shared_node(struct btrfs_root *root,
869 struct walk_control *wc, int level)
871 struct shared_node *node;
872 struct shared_node *dest;
875 if (level == wc->root_level)
878 for (i = level + 1; i < BTRFS_MAX_LEVEL; i++) {
882 BUG_ON(i >= BTRFS_MAX_LEVEL);
884 node = wc->nodes[wc->active_node];
885 wc->nodes[wc->active_node] = NULL;
888 dest = wc->nodes[wc->active_node];
889 if (wc->active_node < wc->root_level ||
890 btrfs_root_refs(&root->root_item) > 0) {
891 BUG_ON(node->refs <= 1);
892 splice_shared_node(node, dest);
894 BUG_ON(node->refs < 2);
903 * 1 - if the root with id child_root_id is a child of root parent_root_id
904 * 0 - if the root child_root_id isn't a child of the root parent_root_id but
905 * has other root(s) as parent(s)
906 * 2 - if the root child_root_id doesn't have any parent roots
908 static int is_child_root(struct btrfs_root *root, u64 parent_root_id,
911 struct btrfs_path path;
912 struct btrfs_key key;
913 struct extent_buffer *leaf;
917 btrfs_init_path(&path);
919 key.objectid = parent_root_id;
920 key.type = BTRFS_ROOT_REF_KEY;
921 key.offset = child_root_id;
922 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path,
926 btrfs_release_path(&path);
930 key.objectid = child_root_id;
931 key.type = BTRFS_ROOT_BACKREF_KEY;
933 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path,
939 leaf = path.nodes[0];
940 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
941 ret = btrfs_next_leaf(root->fs_info->tree_root, &path);
944 leaf = path.nodes[0];
947 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
948 if (key.objectid != child_root_id ||
949 key.type != BTRFS_ROOT_BACKREF_KEY)
954 if (key.offset == parent_root_id) {
955 btrfs_release_path(&path);
962 btrfs_release_path(&path);
965 return has_parent ? 0 : 2;
968 static int process_dir_item(struct btrfs_root *root,
969 struct extent_buffer *eb,
970 int slot, struct btrfs_key *key,
971 struct shared_node *active_node)
981 struct btrfs_dir_item *di;
982 struct inode_record *rec;
983 struct cache_tree *root_cache;
984 struct cache_tree *inode_cache;
985 struct btrfs_key location;
986 char namebuf[BTRFS_NAME_LEN];
988 root_cache = &active_node->root_cache;
989 inode_cache = &active_node->inode_cache;
990 rec = active_node->current;
991 rec->found_dir_item = 1;
993 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
994 total = btrfs_item_size_nr(eb, slot);
995 while (cur < total) {
997 btrfs_dir_item_key_to_cpu(eb, di, &location);
998 name_len = btrfs_dir_name_len(eb, di);
999 data_len = btrfs_dir_data_len(eb, di);
1000 filetype = btrfs_dir_type(eb, di);
1002 rec->found_size += name_len;
1003 if (name_len <= BTRFS_NAME_LEN) {
1007 len = BTRFS_NAME_LEN;
1008 error = REF_ERR_NAME_TOO_LONG;
1010 read_extent_buffer(eb, namebuf, (unsigned long)(di + 1), len);
1012 if (location.type == BTRFS_INODE_ITEM_KEY) {
1013 add_inode_backref(inode_cache, location.objectid,
1014 key->objectid, key->offset, namebuf,
1015 len, filetype, key->type, error);
1016 } else if (location.type == BTRFS_ROOT_ITEM_KEY) {
1017 add_inode_backref(root_cache, location.objectid,
1018 key->objectid, key->offset,
1019 namebuf, len, filetype,
1022 fprintf(stderr, "invalid location in dir item %u\n",
1024 add_inode_backref(inode_cache, BTRFS_MULTIPLE_OBJECTIDS,
1025 key->objectid, key->offset, namebuf,
1026 len, filetype, key->type, error);
1029 len = sizeof(*di) + name_len + data_len;
1030 di = (struct btrfs_dir_item *)((char *)di + len);
1033 if (key->type == BTRFS_DIR_INDEX_KEY && nritems > 1)
1034 rec->errors |= I_ERR_DUP_DIR_INDEX;
1039 static int process_inode_ref(struct extent_buffer *eb,
1040 int slot, struct btrfs_key *key,
1041 struct shared_node *active_node)
1049 struct cache_tree *inode_cache;
1050 struct btrfs_inode_ref *ref;
1051 char namebuf[BTRFS_NAME_LEN];
1053 inode_cache = &active_node->inode_cache;
1055 ref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1056 total = btrfs_item_size_nr(eb, slot);
1057 while (cur < total) {
1058 name_len = btrfs_inode_ref_name_len(eb, ref);
1059 index = btrfs_inode_ref_index(eb, ref);
1060 if (name_len <= BTRFS_NAME_LEN) {
1064 len = BTRFS_NAME_LEN;
1065 error = REF_ERR_NAME_TOO_LONG;
1067 read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
1068 add_inode_backref(inode_cache, key->objectid, key->offset,
1069 index, namebuf, len, 0, key->type, error);
1071 len = sizeof(*ref) + name_len;
1072 ref = (struct btrfs_inode_ref *)((char *)ref + len);
1078 static int process_inode_extref(struct extent_buffer *eb,
1079 int slot, struct btrfs_key *key,
1080 struct shared_node *active_node)
1089 struct cache_tree *inode_cache;
1090 struct btrfs_inode_extref *extref;
1091 char namebuf[BTRFS_NAME_LEN];
1093 inode_cache = &active_node->inode_cache;
1095 extref = btrfs_item_ptr(eb, slot, struct btrfs_inode_extref);
1096 total = btrfs_item_size_nr(eb, slot);
1097 while (cur < total) {
1098 name_len = btrfs_inode_extref_name_len(eb, extref);
1099 index = btrfs_inode_extref_index(eb, extref);
1100 parent = btrfs_inode_extref_parent(eb, extref);
1101 if (name_len <= BTRFS_NAME_LEN) {
1105 len = BTRFS_NAME_LEN;
1106 error = REF_ERR_NAME_TOO_LONG;
1108 read_extent_buffer(eb, namebuf,
1109 (unsigned long)(extref + 1), len);
1110 add_inode_backref(inode_cache, key->objectid, parent,
1111 index, namebuf, len, 0, key->type, error);
1113 len = sizeof(*extref) + name_len;
1114 extref = (struct btrfs_inode_extref *)((char *)extref + len);
1121 static int count_csum_range(struct btrfs_root *root, u64 start,
1122 u64 len, u64 *found)
1124 struct btrfs_key key;
1125 struct btrfs_path path;
1126 struct extent_buffer *leaf;
1131 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
1133 btrfs_init_path(&path);
1135 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1137 key.type = BTRFS_EXTENT_CSUM_KEY;
1139 ret = btrfs_search_slot(NULL, root->fs_info->csum_root,
1143 if (ret > 0 && path.slots[0] > 0) {
1144 leaf = path.nodes[0];
1145 btrfs_item_key_to_cpu(leaf, &key, path.slots[0] - 1);
1146 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
1147 key.type == BTRFS_EXTENT_CSUM_KEY)
1152 leaf = path.nodes[0];
1153 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
1154 ret = btrfs_next_leaf(root->fs_info->csum_root, &path);
1159 leaf = path.nodes[0];
1162 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1163 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
1164 key.type != BTRFS_EXTENT_CSUM_KEY)
1167 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1168 if (key.offset >= start + len)
1171 if (key.offset > start)
1174 size = btrfs_item_size_nr(leaf, path.slots[0]);
1175 csum_end = key.offset + (size / csum_size) * root->sectorsize;
1176 if (csum_end > start) {
1177 size = min(csum_end - start, len);
1186 btrfs_release_path(&path);
1192 static int process_file_extent(struct btrfs_root *root,
1193 struct extent_buffer *eb,
1194 int slot, struct btrfs_key *key,
1195 struct shared_node *active_node)
1197 struct inode_record *rec;
1198 struct btrfs_file_extent_item *fi;
1200 u64 disk_bytenr = 0;
1201 u64 extent_offset = 0;
1202 u64 mask = root->sectorsize - 1;
1206 rec = active_node->current;
1207 BUG_ON(rec->ino != key->objectid || rec->refs > 1);
1208 rec->found_file_extent = 1;
1210 if (rec->extent_start == (u64)-1) {
1211 rec->extent_start = key->offset;
1212 rec->extent_end = key->offset;
1215 if (rec->extent_end > key->offset)
1216 rec->errors |= I_ERR_FILE_EXTENT_OVERLAP;
1217 else if (rec->extent_end < key->offset &&
1218 rec->extent_end < rec->first_extent_gap)
1219 rec->first_extent_gap = rec->extent_end;
1221 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
1222 extent_type = btrfs_file_extent_type(eb, fi);
1224 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1225 num_bytes = btrfs_file_extent_inline_len(eb, slot, fi);
1227 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1228 rec->found_size += num_bytes;
1229 num_bytes = (num_bytes + mask) & ~mask;
1230 } else if (extent_type == BTRFS_FILE_EXTENT_REG ||
1231 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1232 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1233 disk_bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1234 extent_offset = btrfs_file_extent_offset(eb, fi);
1235 if (num_bytes == 0 || (num_bytes & mask))
1236 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1237 if (num_bytes + extent_offset >
1238 btrfs_file_extent_ram_bytes(eb, fi))
1239 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1240 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC &&
1241 (btrfs_file_extent_compression(eb, fi) ||
1242 btrfs_file_extent_encryption(eb, fi) ||
1243 btrfs_file_extent_other_encoding(eb, fi)))
1244 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1245 if (disk_bytenr > 0)
1246 rec->found_size += num_bytes;
1248 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1250 rec->extent_end = key->offset + num_bytes;
1252 if (disk_bytenr > 0) {
1254 if (btrfs_file_extent_compression(eb, fi))
1255 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1257 disk_bytenr += extent_offset;
1259 ret = count_csum_range(root, disk_bytenr, num_bytes, &found);
1262 if (extent_type == BTRFS_FILE_EXTENT_REG) {
1264 rec->found_csum_item = 1;
1265 if (found < num_bytes)
1266 rec->some_csum_missing = 1;
1267 } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1269 rec->errors |= I_ERR_ODD_CSUM_ITEM;
1275 static int process_one_leaf(struct btrfs_root *root, struct extent_buffer *eb,
1276 struct walk_control *wc)
1278 struct btrfs_key key;
1282 struct cache_tree *inode_cache;
1283 struct shared_node *active_node;
1285 if (wc->root_level == wc->active_node &&
1286 btrfs_root_refs(&root->root_item) == 0)
1289 active_node = wc->nodes[wc->active_node];
1290 inode_cache = &active_node->inode_cache;
1291 nritems = btrfs_header_nritems(eb);
1292 for (i = 0; i < nritems; i++) {
1293 btrfs_item_key_to_cpu(eb, &key, i);
1295 if (key.objectid == BTRFS_FREE_SPACE_OBJECTID)
1297 if (key.type == BTRFS_ORPHAN_ITEM_KEY)
1300 if (active_node->current == NULL ||
1301 active_node->current->ino < key.objectid) {
1302 if (active_node->current) {
1303 active_node->current->checked = 1;
1304 maybe_free_inode_rec(inode_cache,
1305 active_node->current);
1307 active_node->current = get_inode_rec(inode_cache,
1311 case BTRFS_DIR_ITEM_KEY:
1312 case BTRFS_DIR_INDEX_KEY:
1313 ret = process_dir_item(root, eb, i, &key, active_node);
1315 case BTRFS_INODE_REF_KEY:
1316 ret = process_inode_ref(eb, i, &key, active_node);
1318 case BTRFS_INODE_EXTREF_KEY:
1319 ret = process_inode_extref(eb, i, &key, active_node);
1321 case BTRFS_INODE_ITEM_KEY:
1322 ret = process_inode_item(eb, i, &key, active_node);
1324 case BTRFS_EXTENT_DATA_KEY:
1325 ret = process_file_extent(root, eb, i, &key,
1335 static void reada_walk_down(struct btrfs_root *root,
1336 struct extent_buffer *node, int slot)
1345 level = btrfs_header_level(node);
1349 nritems = btrfs_header_nritems(node);
1350 blocksize = btrfs_level_size(root, level - 1);
1351 for (i = slot; i < nritems; i++) {
1352 bytenr = btrfs_node_blockptr(node, i);
1353 ptr_gen = btrfs_node_ptr_generation(node, i);
1354 readahead_tree_block(root, bytenr, blocksize, ptr_gen);
1359 * Check the child node/leaf by the following condition:
1360 * 1. the first item key of the node/leaf should be the same with the one
1362 * 2. block in parent node should match the child node/leaf.
1363 * 3. generation of parent node and child's header should be consistent.
1365 * Or the child node/leaf pointed by the key in parent is not valid.
1367 * We hope to check leaf owner too, but since subvol may share leaves,
1368 * which makes leaf owner check not so strong, key check should be
1369 * sufficient enough for that case.
1371 static int check_child_node(struct btrfs_root *root,
1372 struct extent_buffer *parent, int slot,
1373 struct extent_buffer *child)
1375 struct btrfs_key parent_key;
1376 struct btrfs_key child_key;
1379 btrfs_node_key_to_cpu(parent, &parent_key, slot);
1380 if (btrfs_header_level(child) == 0)
1381 btrfs_item_key_to_cpu(child, &child_key, 0);
1383 btrfs_node_key_to_cpu(child, &child_key, 0);
1385 if (memcmp(&parent_key, &child_key, sizeof(parent_key))) {
1388 "Wrong key of child node/leaf, wanted: (%llu, %u, %llu), have: (%llu, %u, %llu)\n",
1389 parent_key.objectid, parent_key.type, parent_key.offset,
1390 child_key.objectid, child_key.type, child_key.offset);
1392 if (btrfs_header_bytenr(child) != btrfs_node_blockptr(parent, slot)) {
1394 fprintf(stderr, "Wrong block of child node/leaf, wanted: %llu, have: %llu\n",
1395 btrfs_node_blockptr(parent, slot),
1396 btrfs_header_bytenr(child));
1398 if (btrfs_node_ptr_generation(parent, slot) !=
1399 btrfs_header_generation(child)) {
1401 fprintf(stderr, "Wrong generation of child node/leaf, wanted: %llu, have: %llu\n",
1402 btrfs_header_generation(child),
1403 btrfs_node_ptr_generation(parent, slot));
1408 static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
1409 struct walk_control *wc, int *level)
1411 enum btrfs_tree_block_status status;
1414 struct extent_buffer *next;
1415 struct extent_buffer *cur;
1420 WARN_ON(*level < 0);
1421 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1422 ret = btrfs_lookup_extent_info(NULL, root,
1423 path->nodes[*level]->start,
1424 *level, 1, &refs, NULL);
1431 ret = enter_shared_node(root, path->nodes[*level]->start,
1439 while (*level >= 0) {
1440 WARN_ON(*level < 0);
1441 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1442 cur = path->nodes[*level];
1444 if (btrfs_header_level(cur) != *level)
1447 if (path->slots[*level] >= btrfs_header_nritems(cur))
1450 ret = process_one_leaf(root, cur, wc);
1455 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1456 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1457 blocksize = btrfs_level_size(root, *level - 1);
1458 ret = btrfs_lookup_extent_info(NULL, root, bytenr, *level - 1,
1464 ret = enter_shared_node(root, bytenr, refs,
1467 path->slots[*level]++;
1472 next = btrfs_find_tree_block(root, bytenr, blocksize);
1473 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
1474 free_extent_buffer(next);
1475 reada_walk_down(root, cur, path->slots[*level]);
1476 next = read_tree_block(root, bytenr, blocksize,
1479 struct btrfs_key node_key;
1481 btrfs_node_key_to_cpu(path->nodes[*level],
1483 path->slots[*level]);
1484 btrfs_add_corrupt_extent_record(root->fs_info,
1486 path->nodes[*level]->start,
1487 root->leafsize, *level);
1493 ret = check_child_node(root, cur, path->slots[*level], next);
1499 if (btrfs_is_leaf(next))
1500 status = btrfs_check_leaf(root, NULL, next);
1502 status = btrfs_check_node(root, NULL, next);
1503 if (status != BTRFS_TREE_BLOCK_CLEAN) {
1504 free_extent_buffer(next);
1509 *level = *level - 1;
1510 free_extent_buffer(path->nodes[*level]);
1511 path->nodes[*level] = next;
1512 path->slots[*level] = 0;
1515 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
1519 static int walk_up_tree(struct btrfs_root *root, struct btrfs_path *path,
1520 struct walk_control *wc, int *level)
1523 struct extent_buffer *leaf;
1525 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1526 leaf = path->nodes[i];
1527 if (path->slots[i] + 1 < btrfs_header_nritems(leaf)) {
1532 free_extent_buffer(path->nodes[*level]);
1533 path->nodes[*level] = NULL;
1534 BUG_ON(*level > wc->active_node);
1535 if (*level == wc->active_node)
1536 leave_shared_node(root, wc, *level);
1543 static int check_root_dir(struct inode_record *rec)
1545 struct inode_backref *backref;
1548 if (!rec->found_inode_item || rec->errors)
1550 if (rec->nlink != 1 || rec->found_link != 0)
1552 if (list_empty(&rec->backrefs))
1554 backref = list_entry(rec->backrefs.next, struct inode_backref, list);
1555 if (!backref->found_inode_ref)
1557 if (backref->index != 0 || backref->namelen != 2 ||
1558 memcmp(backref->name, "..", 2))
1560 if (backref->found_dir_index || backref->found_dir_item)
1567 static int repair_inode_isize(struct btrfs_trans_handle *trans,
1568 struct btrfs_root *root, struct btrfs_path *path,
1569 struct inode_record *rec)
1571 struct btrfs_inode_item *ei;
1572 struct btrfs_key key;
1575 key.objectid = rec->ino;
1576 key.type = BTRFS_INODE_ITEM_KEY;
1577 key.offset = (u64)-1;
1579 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1583 if (!path->slots[0]) {
1590 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1591 if (key.objectid != rec->ino) {
1596 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1597 struct btrfs_inode_item);
1598 btrfs_set_inode_size(path->nodes[0], ei, rec->found_size);
1599 btrfs_mark_buffer_dirty(path->nodes[0]);
1600 rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
1601 printf("reset isize for dir %Lu root %Lu\n", rec->ino,
1602 root->root_key.objectid);
1604 btrfs_release_path(path);
1608 static int repair_inode_orphan_item(struct btrfs_trans_handle *trans,
1609 struct btrfs_root *root,
1610 struct btrfs_path *path,
1611 struct inode_record *rec)
1615 ret = btrfs_add_orphan_item(trans, root, path, rec->ino);
1616 btrfs_release_path(path);
1618 rec->errors &= ~I_ERR_NO_ORPHAN_ITEM;
1622 static int add_missing_dir_index(struct btrfs_root *root,
1623 struct cache_tree *inode_cache,
1624 struct inode_record *rec,
1625 struct inode_backref *backref)
1627 struct btrfs_path *path;
1628 struct btrfs_trans_handle *trans;
1629 struct btrfs_dir_item *dir_item;
1630 struct extent_buffer *leaf;
1631 struct btrfs_key key;
1632 struct btrfs_disk_key disk_key;
1633 struct inode_record *dir_rec;
1634 unsigned long name_ptr;
1635 u32 data_size = sizeof(*dir_item) + backref->namelen;
1638 path = btrfs_alloc_path();
1642 trans = btrfs_start_transaction(root, 1);
1643 if (IS_ERR(trans)) {
1644 btrfs_free_path(path);
1645 return PTR_ERR(trans);
1648 fprintf(stderr, "repairing missing dir index item for inode %llu\n",
1649 (unsigned long long)rec->ino);
1650 key.objectid = backref->dir;
1651 key.type = BTRFS_DIR_INDEX_KEY;
1652 key.offset = backref->index;
1654 ret = btrfs_insert_empty_item(trans, root, path, &key, data_size);
1657 leaf = path->nodes[0];
1658 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
1660 disk_key.objectid = cpu_to_le64(rec->ino);
1661 disk_key.type = BTRFS_INODE_ITEM_KEY;
1662 disk_key.offset = 0;
1664 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
1665 btrfs_set_dir_type(leaf, dir_item, imode_to_type(rec->imode));
1666 btrfs_set_dir_data_len(leaf, dir_item, 0);
1667 btrfs_set_dir_name_len(leaf, dir_item, backref->namelen);
1668 name_ptr = (unsigned long)(dir_item + 1);
1669 write_extent_buffer(leaf, backref->name, name_ptr, backref->namelen);
1670 btrfs_mark_buffer_dirty(leaf);
1671 btrfs_free_path(path);
1672 btrfs_commit_transaction(trans, root);
1674 backref->found_dir_index = 1;
1675 dir_rec = get_inode_rec(inode_cache, backref->dir, 0);
1678 dir_rec->found_size += backref->namelen;
1679 if (dir_rec->found_size == dir_rec->isize &&
1680 (dir_rec->errors & I_ERR_DIR_ISIZE_WRONG))
1681 dir_rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
1682 if (dir_rec->found_size != dir_rec->isize)
1683 dir_rec->errors |= I_ERR_DIR_ISIZE_WRONG;
1688 static int delete_dir_index(struct btrfs_root *root,
1689 struct cache_tree *inode_cache,
1690 struct inode_record *rec,
1691 struct inode_backref *backref)
1693 struct btrfs_trans_handle *trans;
1694 struct btrfs_dir_item *di;
1695 struct btrfs_path *path;
1698 path = btrfs_alloc_path();
1702 trans = btrfs_start_transaction(root, 1);
1703 if (IS_ERR(trans)) {
1704 btrfs_free_path(path);
1705 return PTR_ERR(trans);
1709 fprintf(stderr, "Deleting bad dir index [%llu,%u,%llu] root %llu\n",
1710 (unsigned long long)backref->dir,
1711 BTRFS_DIR_INDEX_KEY, (unsigned long long)backref->index,
1712 (unsigned long long)root->objectid);
1714 di = btrfs_lookup_dir_index(trans, root, path, backref->dir,
1715 backref->name, backref->namelen,
1716 backref->index, -1);
1719 btrfs_free_path(path);
1720 btrfs_commit_transaction(trans, root);
1727 ret = btrfs_del_item(trans, root, path);
1729 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1731 btrfs_free_path(path);
1732 btrfs_commit_transaction(trans, root);
1736 static int create_inode_item(struct btrfs_root *root,
1737 struct inode_record *rec,
1738 struct inode_backref *backref, int root_dir)
1740 struct btrfs_trans_handle *trans;
1741 struct btrfs_inode_item inode_item;
1742 time_t now = time(NULL);
1745 trans = btrfs_start_transaction(root, 1);
1746 if (IS_ERR(trans)) {
1747 ret = PTR_ERR(trans);
1751 fprintf(stderr, "root %llu inode %llu recreating inode item, this may "
1752 "be incomplete, please check permissions and content after "
1753 "the fsck completes.\n", (unsigned long long)root->objectid,
1754 (unsigned long long)rec->ino);
1756 memset(&inode_item, 0, sizeof(inode_item));
1757 btrfs_set_stack_inode_generation(&inode_item, trans->transid);
1759 btrfs_set_stack_inode_nlink(&inode_item, 1);
1761 btrfs_set_stack_inode_nlink(&inode_item, rec->found_link);
1762 btrfs_set_stack_inode_nbytes(&inode_item, rec->found_size);
1763 if (rec->found_dir_item) {
1764 if (rec->found_file_extent)
1765 fprintf(stderr, "root %llu inode %llu has both a dir "
1766 "item and extents, unsure if it is a dir or a "
1767 "regular file so setting it as a directory\n",
1768 (unsigned long long)root->objectid,
1769 (unsigned long long)rec->ino);
1770 btrfs_set_stack_inode_mode(&inode_item, S_IFDIR | 0755);
1771 btrfs_set_stack_inode_size(&inode_item, rec->found_size);
1772 } else if (!rec->found_dir_item) {
1773 btrfs_set_stack_inode_size(&inode_item, rec->extent_end);
1774 btrfs_set_stack_inode_mode(&inode_item, S_IFREG | 0755);
1776 btrfs_set_stack_timespec_sec(&inode_item.atime, now);
1777 btrfs_set_stack_timespec_nsec(&inode_item.atime, 0);
1778 btrfs_set_stack_timespec_sec(&inode_item.ctime, now);
1779 btrfs_set_stack_timespec_nsec(&inode_item.ctime, 0);
1780 btrfs_set_stack_timespec_sec(&inode_item.mtime, now);
1781 btrfs_set_stack_timespec_nsec(&inode_item.mtime, 0);
1782 btrfs_set_stack_timespec_sec(&inode_item.otime, 0);
1783 btrfs_set_stack_timespec_nsec(&inode_item.otime, 0);
1785 ret = btrfs_insert_inode(trans, root, rec->ino, &inode_item);
1787 btrfs_commit_transaction(trans, root);
1791 static int repair_inode_backrefs(struct btrfs_root *root,
1792 struct inode_record *rec,
1793 struct cache_tree *inode_cache,
1796 struct inode_backref *tmp, *backref;
1797 u64 root_dirid = btrfs_root_dirid(&root->root_item);
1801 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
1802 if (!delete && rec->ino == root_dirid) {
1803 if (!rec->found_inode_item) {
1804 ret = create_inode_item(root, rec, backref, 1);
1811 /* Index 0 for root dir's are special, don't mess with it */
1812 if (rec->ino == root_dirid && backref->index == 0)
1816 ((backref->found_dir_index && !backref->found_inode_ref) ||
1817 (backref->found_dir_index && backref->found_inode_ref &&
1818 (backref->errors & REF_ERR_INDEX_UNMATCH)))) {
1819 ret = delete_dir_index(root, inode_cache, rec, backref);
1823 list_del(&backref->list);
1827 if (!delete && !backref->found_dir_index &&
1828 backref->found_dir_item && backref->found_inode_ref) {
1829 ret = add_missing_dir_index(root, inode_cache, rec,
1834 if (backref->found_dir_item &&
1835 backref->found_dir_index &&
1836 backref->found_dir_index) {
1837 if (!backref->errors &&
1838 backref->found_inode_ref) {
1839 list_del(&backref->list);
1845 if (!delete && (!backref->found_dir_index &&
1846 !backref->found_dir_item &&
1847 backref->found_inode_ref)) {
1848 struct btrfs_trans_handle *trans;
1849 struct btrfs_key location;
1851 ret = check_dir_conflict(root, backref->name,
1857 * let nlink fixing routine to handle it,
1858 * which can do it better.
1863 location.objectid = rec->ino;
1864 location.type = BTRFS_INODE_ITEM_KEY;
1865 location.offset = 0;
1867 trans = btrfs_start_transaction(root, 1);
1868 if (IS_ERR(trans)) {
1869 ret = PTR_ERR(trans);
1872 fprintf(stderr, "adding missing dir index/item pair "
1874 (unsigned long long)rec->ino);
1875 ret = btrfs_insert_dir_item(trans, root, backref->name,
1877 backref->dir, &location,
1878 imode_to_type(rec->imode),
1881 btrfs_commit_transaction(trans, root);
1885 if (!delete && (backref->found_inode_ref &&
1886 backref->found_dir_index &&
1887 backref->found_dir_item &&
1888 !(backref->errors & REF_ERR_INDEX_UNMATCH) &&
1889 !rec->found_inode_item)) {
1890 ret = create_inode_item(root, rec, backref, 0);
1897 return ret ? ret : repaired;
1901 * To determine the file type for nlink/inode_item repair
1903 * Return 0 if file type is found and BTRFS_FT_* is stored into type.
1904 * Return -ENOENT if file type is not found.
1906 static int find_file_type(struct inode_record *rec, u8 *type)
1908 struct inode_backref *backref;
1910 /* For inode item recovered case */
1911 if (rec->found_inode_item) {
1912 *type = imode_to_type(rec->imode);
1916 list_for_each_entry(backref, &rec->backrefs, list) {
1917 if (backref->found_dir_index || backref->found_dir_item) {
1918 *type = backref->filetype;
1926 * To determine the file name for nlink repair
1928 * Return 0 if file name is found, set name and namelen.
1929 * Return -ENOENT if file name is not found.
1931 static int find_file_name(struct inode_record *rec,
1932 char *name, int *namelen)
1934 struct inode_backref *backref;
1936 list_for_each_entry(backref, &rec->backrefs, list) {
1937 if (backref->found_dir_index || backref->found_dir_item ||
1938 backref->found_inode_ref) {
1939 memcpy(name, backref->name, backref->namelen);
1940 *namelen = backref->namelen;
1947 /* Reset the nlink of the inode to the correct one */
1948 static int reset_nlink(struct btrfs_trans_handle *trans,
1949 struct btrfs_root *root,
1950 struct btrfs_path *path,
1951 struct inode_record *rec)
1953 struct inode_backref *backref;
1954 struct inode_backref *tmp;
1955 struct btrfs_key key;
1956 struct btrfs_inode_item *inode_item;
1959 /* We don't believe this either, reset it and iterate backref */
1960 rec->found_link = 0;
1962 /* Remove all backref including the valid ones */
1963 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
1964 ret = btrfs_unlink(trans, root, rec->ino, backref->dir,
1965 backref->index, backref->name,
1966 backref->namelen, 0);
1970 /* remove invalid backref, so it won't be added back */
1971 if (!(backref->found_dir_index &&
1972 backref->found_dir_item &&
1973 backref->found_inode_ref)) {
1974 list_del(&backref->list);
1981 /* Set nlink to 0 */
1982 key.objectid = rec->ino;
1983 key.type = BTRFS_INODE_ITEM_KEY;
1985 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1992 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1993 struct btrfs_inode_item);
1994 btrfs_set_inode_nlink(path->nodes[0], inode_item, 0);
1995 btrfs_mark_buffer_dirty(path->nodes[0]);
1996 btrfs_release_path(path);
1999 * Add back valid inode_ref/dir_item/dir_index,
2000 * add_link() will handle the nlink inc, so new nlink must be correct
2002 list_for_each_entry(backref, &rec->backrefs, list) {
2003 ret = btrfs_add_link(trans, root, rec->ino, backref->dir,
2004 backref->name, backref->namelen,
2005 backref->ref_type, &backref->index, 1);
2010 btrfs_release_path(path);
2014 static int repair_inode_nlinks(struct btrfs_trans_handle *trans,
2015 struct btrfs_root *root,
2016 struct btrfs_path *path,
2017 struct inode_record *rec)
2019 char *dir_name = "lost+found";
2020 char namebuf[BTRFS_NAME_LEN] = {0};
2025 int name_recovered = 0;
2026 int type_recovered = 0;
2030 * Get file name and type first before these invalid inode ref
2031 * are deleted by remove_all_invalid_backref()
2033 name_recovered = !find_file_name(rec, namebuf, &namelen);
2034 type_recovered = !find_file_type(rec, &type);
2036 if (!name_recovered) {
2037 printf("Can't get file name for inode %llu, using '%llu' as fallback\n",
2038 rec->ino, rec->ino);
2039 namelen = count_digits(rec->ino);
2040 sprintf(namebuf, "%llu", rec->ino);
2043 if (!type_recovered) {
2044 printf("Can't get file type for inode %llu, using FILE as fallback\n",
2046 type = BTRFS_FT_REG_FILE;
2050 ret = reset_nlink(trans, root, path, rec);
2053 "Failed to reset nlink for inode %llu: %s\n",
2054 rec->ino, strerror(-ret));
2058 if (rec->found_link == 0) {
2059 lost_found_ino = root->highest_inode;
2060 if (lost_found_ino >= BTRFS_LAST_FREE_OBJECTID) {
2065 ret = btrfs_mkdir(trans, root, dir_name, strlen(dir_name),
2066 BTRFS_FIRST_FREE_OBJECTID, &lost_found_ino,
2069 fprintf(stderr, "Failed to create '%s' dir: %s",
2070 dir_name, strerror(-ret));
2073 ret = btrfs_add_link(trans, root, rec->ino, lost_found_ino,
2074 namebuf, namelen, type, NULL, 1);
2075 if (ret == -EEXIST) {
2077 * Conflicting file name, add ".INO" as suffix * +1 for '.'
2079 if (namelen + count_digits(rec->ino) + 1 >
2084 snprintf(namebuf + namelen, BTRFS_NAME_LEN - namelen,
2086 namelen += count_digits(rec->ino) + 1;
2087 ret = btrfs_add_link(trans, root, rec->ino,
2088 lost_found_ino, namebuf,
2089 namelen, type, NULL, 1);
2093 "Failed to link the inode %llu to %s dir: %s",
2094 rec->ino, dir_name, strerror(-ret));
2098 * Just increase the found_link, don't actually add the
2099 * backref. This will make things easier and this inode
2100 * record will be freed after the repair is done.
2101 * So fsck will not report problem about this inode.
2104 printf("Moving file '%.*s' to '%s' dir since it has no valid backref\n",
2105 namelen, namebuf, dir_name);
2107 rec->errors &= ~I_ERR_LINK_COUNT_WRONG;
2108 printf("Fixed the nlink of inode %llu\n", rec->ino);
2110 btrfs_release_path(path);
2115 * Check if there is any normal(reg or prealloc) file extent for given
2117 * This is used to determine the file type when neither its dir_index/item or
2118 * inode_item exists.
2120 * This will *NOT* report error, if any error happens, just consider it does
2121 * not have any normal file extent.
2123 static int find_normal_file_extent(struct btrfs_root *root, u64 ino)
2125 struct btrfs_path *path;
2126 struct btrfs_key key;
2127 struct btrfs_key found_key;
2128 struct btrfs_file_extent_item *fi;
2132 path = btrfs_alloc_path();
2136 key.type = BTRFS_EXTENT_DATA_KEY;
2139 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2144 if (ret && path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2145 ret = btrfs_next_leaf(root, path);
2152 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2154 if (found_key.objectid != ino ||
2155 found_key.type != BTRFS_EXTENT_DATA_KEY)
2157 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
2158 struct btrfs_file_extent_item);
2159 type = btrfs_file_extent_type(path->nodes[0], fi);
2160 if (type != BTRFS_FILE_EXTENT_INLINE) {
2166 btrfs_free_path(path);
2170 static u32 btrfs_type_to_imode(u8 type)
2172 static u32 imode_by_btrfs_type[] = {
2173 [BTRFS_FT_REG_FILE] = S_IFREG,
2174 [BTRFS_FT_DIR] = S_IFDIR,
2175 [BTRFS_FT_CHRDEV] = S_IFCHR,
2176 [BTRFS_FT_BLKDEV] = S_IFBLK,
2177 [BTRFS_FT_FIFO] = S_IFIFO,
2178 [BTRFS_FT_SOCK] = S_IFSOCK,
2179 [BTRFS_FT_SYMLINK] = S_IFLNK,
2182 return imode_by_btrfs_type[(type)];
2185 static int repair_inode_no_item(struct btrfs_trans_handle *trans,
2186 struct btrfs_root *root,
2187 struct btrfs_path *path,
2188 struct inode_record *rec)
2192 int type_recovered = 0;
2197 * 1. salvage data from existing file extent and
2198 * punch hole to keep fi ext consistent.
2199 * 2. salvage data from extent tree
2201 printf("Trying to rebuild inode:%llu\n", rec->ino);
2203 type_recovered = !find_file_type(rec, &filetype);
2206 * Try to determine inode type if type not found.
2208 * For found regular file extent, it must be FILE.
2209 * For found dir_item/index, it must be DIR.
2211 * For undetermined one, use FILE as fallback.
2214 * 1. If found extent belong to it in extent tree, it must be FILE
2215 * Need extra hook in extent tree scan.
2216 * 2. If found backref(inode_index/item is already handled) to it,
2218 * Need new inode-inode ref structure to allow search for that.
2220 if (!type_recovered) {
2221 if (rec->found_file_extent &&
2222 find_normal_file_extent(root, rec->ino)) {
2224 filetype = BTRFS_FT_REG_FILE;
2225 } else if (rec->found_dir_item) {
2227 filetype = BTRFS_FT_DIR;
2229 printf("Can't determint the filetype for inode %llu, assume it is a normal file\n",
2232 filetype = BTRFS_FT_REG_FILE;
2236 ret = btrfs_new_inode(trans, root, rec->ino,
2237 mode | btrfs_type_to_imode(filetype));
2242 * Here inode rebuild is done, we only rebuild the inode item,
2243 * don't repair the nlink(like move to lost+found).
2244 * That is the job of nlink repair.
2246 * We just fill the record and return
2248 rec->found_dir_item = 1;
2249 rec->imode = mode | btrfs_type_to_imode(filetype);
2251 rec->errors &= ~I_ERR_NO_INODE_ITEM;
2252 /* Ensure the inode_nlinks repair function will be called */
2253 rec->errors |= I_ERR_LINK_COUNT_WRONG;
2258 static int try_repair_inode(struct btrfs_root *root, struct inode_record *rec)
2260 struct btrfs_trans_handle *trans;
2261 struct btrfs_path *path;
2264 if (!(rec->errors & (I_ERR_DIR_ISIZE_WRONG |
2265 I_ERR_NO_ORPHAN_ITEM |
2266 I_ERR_LINK_COUNT_WRONG |
2267 I_ERR_NO_INODE_ITEM)))
2270 path = btrfs_alloc_path();
2275 * For nlink repair, it may create a dir and add link, so
2276 * 2 for parent(256)'s dir_index and dir_item
2277 * 2 for lost+found dir's inode_item and inode_ref
2278 * 1 for the new inode_ref of the file
2279 * 2 for lost+found dir's dir_index and dir_item for the file
2281 trans = btrfs_start_transaction(root, 7);
2282 if (IS_ERR(trans)) {
2283 btrfs_free_path(path);
2284 return PTR_ERR(trans);
2287 if (rec->errors & I_ERR_NO_INODE_ITEM)
2288 ret = repair_inode_no_item(trans, root, path, rec);
2289 if (!ret && rec->errors & I_ERR_DIR_ISIZE_WRONG)
2290 ret = repair_inode_isize(trans, root, path, rec);
2291 if (!ret && rec->errors & I_ERR_NO_ORPHAN_ITEM)
2292 ret = repair_inode_orphan_item(trans, root, path, rec);
2293 if (!ret && rec->errors & I_ERR_LINK_COUNT_WRONG)
2294 ret = repair_inode_nlinks(trans, root, path, rec);
2295 btrfs_commit_transaction(trans, root);
2296 btrfs_free_path(path);
2300 static int check_inode_recs(struct btrfs_root *root,
2301 struct cache_tree *inode_cache)
2303 struct cache_extent *cache;
2304 struct ptr_node *node;
2305 struct inode_record *rec;
2306 struct inode_backref *backref;
2311 u64 root_dirid = btrfs_root_dirid(&root->root_item);
2313 if (btrfs_root_refs(&root->root_item) == 0) {
2314 if (!cache_tree_empty(inode_cache))
2315 fprintf(stderr, "warning line %d\n", __LINE__);
2320 * We need to record the highest inode number for later 'lost+found'
2322 * We must select a ino not used/refered by any existing inode, or
2323 * 'lost+found' ino may be a missing ino in a corrupted leaf,
2324 * this may cause 'lost+found' dir has wrong nlinks.
2326 cache = last_cache_extent(inode_cache);
2328 node = container_of(cache, struct ptr_node, cache);
2330 if (rec->ino > root->highest_inode)
2331 root->highest_inode = rec->ino;
2335 * We need to repair backrefs first because we could change some of the
2336 * errors in the inode recs.
2338 * We also need to go through and delete invalid backrefs first and then
2339 * add the correct ones second. We do this because we may get EEXIST
2340 * when adding back the correct index because we hadn't yet deleted the
2343 * For example, if we were missing a dir index then the directories
2344 * isize would be wrong, so if we fixed the isize to what we thought it
2345 * would be and then fixed the backref we'd still have a invalid fs, so
2346 * we need to add back the dir index and then check to see if the isize
2351 if (stage == 3 && !err)
2354 cache = search_cache_extent(inode_cache, 0);
2355 while (repair && cache) {
2356 node = container_of(cache, struct ptr_node, cache);
2358 cache = next_cache_extent(cache);
2360 /* Need to free everything up and rescan */
2362 remove_cache_extent(inode_cache, &node->cache);
2364 free_inode_rec(rec);
2368 if (list_empty(&rec->backrefs))
2371 ret = repair_inode_backrefs(root, rec, inode_cache,
2385 rec = get_inode_rec(inode_cache, root_dirid, 0);
2387 ret = check_root_dir(rec);
2389 fprintf(stderr, "root %llu root dir %llu error\n",
2390 (unsigned long long)root->root_key.objectid,
2391 (unsigned long long)root_dirid);
2392 print_inode_error(root, rec);
2397 struct btrfs_trans_handle *trans;
2399 trans = btrfs_start_transaction(root, 1);
2400 if (IS_ERR(trans)) {
2401 err = PTR_ERR(trans);
2406 "root %llu missing its root dir, recreating\n",
2407 (unsigned long long)root->objectid);
2409 ret = btrfs_make_root_dir(trans, root, root_dirid);
2412 btrfs_commit_transaction(trans, root);
2416 fprintf(stderr, "root %llu root dir %llu not found\n",
2417 (unsigned long long)root->root_key.objectid,
2418 (unsigned long long)root_dirid);
2422 cache = search_cache_extent(inode_cache, 0);
2425 node = container_of(cache, struct ptr_node, cache);
2427 remove_cache_extent(inode_cache, &node->cache);
2429 if (rec->ino == root_dirid ||
2430 rec->ino == BTRFS_ORPHAN_OBJECTID) {
2431 free_inode_rec(rec);
2435 if (rec->errors & I_ERR_NO_ORPHAN_ITEM) {
2436 ret = check_orphan_item(root, rec->ino);
2438 rec->errors &= ~I_ERR_NO_ORPHAN_ITEM;
2439 if (can_free_inode_rec(rec)) {
2440 free_inode_rec(rec);
2445 if (!rec->found_inode_item)
2446 rec->errors |= I_ERR_NO_INODE_ITEM;
2447 if (rec->found_link != rec->nlink)
2448 rec->errors |= I_ERR_LINK_COUNT_WRONG;
2450 ret = try_repair_inode(root, rec);
2451 if (ret == 0 && can_free_inode_rec(rec)) {
2452 free_inode_rec(rec);
2458 if (!(repair && ret == 0))
2460 print_inode_error(root, rec);
2461 list_for_each_entry(backref, &rec->backrefs, list) {
2462 if (!backref->found_dir_item)
2463 backref->errors |= REF_ERR_NO_DIR_ITEM;
2464 if (!backref->found_dir_index)
2465 backref->errors |= REF_ERR_NO_DIR_INDEX;
2466 if (!backref->found_inode_ref)
2467 backref->errors |= REF_ERR_NO_INODE_REF;
2468 fprintf(stderr, "\tunresolved ref dir %llu index %llu"
2469 " namelen %u name %s filetype %d errors %x",
2470 (unsigned long long)backref->dir,
2471 (unsigned long long)backref->index,
2472 backref->namelen, backref->name,
2473 backref->filetype, backref->errors);
2474 print_ref_error(backref->errors);
2476 free_inode_rec(rec);
2478 return (error > 0) ? -1 : 0;
2481 static struct root_record *get_root_rec(struct cache_tree *root_cache,
2484 struct cache_extent *cache;
2485 struct root_record *rec = NULL;
2488 cache = lookup_cache_extent(root_cache, objectid, 1);
2490 rec = container_of(cache, struct root_record, cache);
2492 rec = calloc(1, sizeof(*rec));
2493 rec->objectid = objectid;
2494 INIT_LIST_HEAD(&rec->backrefs);
2495 rec->cache.start = objectid;
2496 rec->cache.size = 1;
2498 ret = insert_cache_extent(root_cache, &rec->cache);
2504 static struct root_backref *get_root_backref(struct root_record *rec,
2505 u64 ref_root, u64 dir, u64 index,
2506 const char *name, int namelen)
2508 struct root_backref *backref;
2510 list_for_each_entry(backref, &rec->backrefs, list) {
2511 if (backref->ref_root != ref_root || backref->dir != dir ||
2512 backref->namelen != namelen)
2514 if (memcmp(name, backref->name, namelen))
2519 backref = malloc(sizeof(*backref) + namelen + 1);
2520 memset(backref, 0, sizeof(*backref));
2521 backref->ref_root = ref_root;
2523 backref->index = index;
2524 backref->namelen = namelen;
2525 memcpy(backref->name, name, namelen);
2526 backref->name[namelen] = '\0';
2527 list_add_tail(&backref->list, &rec->backrefs);
2531 static void free_root_record(struct cache_extent *cache)
2533 struct root_record *rec;
2534 struct root_backref *backref;
2536 rec = container_of(cache, struct root_record, cache);
2537 while (!list_empty(&rec->backrefs)) {
2538 backref = list_entry(rec->backrefs.next,
2539 struct root_backref, list);
2540 list_del(&backref->list);
2547 FREE_EXTENT_CACHE_BASED_TREE(root_recs, free_root_record);
2549 static int add_root_backref(struct cache_tree *root_cache,
2550 u64 root_id, u64 ref_root, u64 dir, u64 index,
2551 const char *name, int namelen,
2552 int item_type, int errors)
2554 struct root_record *rec;
2555 struct root_backref *backref;
2557 rec = get_root_rec(root_cache, root_id);
2558 backref = get_root_backref(rec, ref_root, dir, index, name, namelen);
2560 backref->errors |= errors;
2562 if (item_type != BTRFS_DIR_ITEM_KEY) {
2563 if (backref->found_dir_index || backref->found_back_ref ||
2564 backref->found_forward_ref) {
2565 if (backref->index != index)
2566 backref->errors |= REF_ERR_INDEX_UNMATCH;
2568 backref->index = index;
2572 if (item_type == BTRFS_DIR_ITEM_KEY) {
2573 if (backref->found_forward_ref)
2575 backref->found_dir_item = 1;
2576 } else if (item_type == BTRFS_DIR_INDEX_KEY) {
2577 backref->found_dir_index = 1;
2578 } else if (item_type == BTRFS_ROOT_REF_KEY) {
2579 if (backref->found_forward_ref)
2580 backref->errors |= REF_ERR_DUP_ROOT_REF;
2581 else if (backref->found_dir_item)
2583 backref->found_forward_ref = 1;
2584 } else if (item_type == BTRFS_ROOT_BACKREF_KEY) {
2585 if (backref->found_back_ref)
2586 backref->errors |= REF_ERR_DUP_ROOT_BACKREF;
2587 backref->found_back_ref = 1;
2592 if (backref->found_forward_ref && backref->found_dir_item)
2593 backref->reachable = 1;
2597 static int merge_root_recs(struct btrfs_root *root,
2598 struct cache_tree *src_cache,
2599 struct cache_tree *dst_cache)
2601 struct cache_extent *cache;
2602 struct ptr_node *node;
2603 struct inode_record *rec;
2604 struct inode_backref *backref;
2607 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2608 free_inode_recs_tree(src_cache);
2613 cache = search_cache_extent(src_cache, 0);
2616 node = container_of(cache, struct ptr_node, cache);
2618 remove_cache_extent(src_cache, &node->cache);
2621 ret = is_child_root(root, root->objectid, rec->ino);
2627 list_for_each_entry(backref, &rec->backrefs, list) {
2628 BUG_ON(backref->found_inode_ref);
2629 if (backref->found_dir_item)
2630 add_root_backref(dst_cache, rec->ino,
2631 root->root_key.objectid, backref->dir,
2632 backref->index, backref->name,
2633 backref->namelen, BTRFS_DIR_ITEM_KEY,
2635 if (backref->found_dir_index)
2636 add_root_backref(dst_cache, rec->ino,
2637 root->root_key.objectid, backref->dir,
2638 backref->index, backref->name,
2639 backref->namelen, BTRFS_DIR_INDEX_KEY,
2643 free_inode_rec(rec);
2650 static int check_root_refs(struct btrfs_root *root,
2651 struct cache_tree *root_cache)
2653 struct root_record *rec;
2654 struct root_record *ref_root;
2655 struct root_backref *backref;
2656 struct cache_extent *cache;
2662 rec = get_root_rec(root_cache, BTRFS_FS_TREE_OBJECTID);
2665 /* fixme: this can not detect circular references */
2668 cache = search_cache_extent(root_cache, 0);
2672 rec = container_of(cache, struct root_record, cache);
2673 cache = next_cache_extent(cache);
2675 if (rec->found_ref == 0)
2678 list_for_each_entry(backref, &rec->backrefs, list) {
2679 if (!backref->reachable)
2682 ref_root = get_root_rec(root_cache,
2684 if (ref_root->found_ref > 0)
2687 backref->reachable = 0;
2689 if (rec->found_ref == 0)
2695 cache = search_cache_extent(root_cache, 0);
2699 rec = container_of(cache, struct root_record, cache);
2700 cache = next_cache_extent(cache);
2702 if (rec->found_ref == 0 &&
2703 rec->objectid >= BTRFS_FIRST_FREE_OBJECTID &&
2704 rec->objectid <= BTRFS_LAST_FREE_OBJECTID) {
2705 ret = check_orphan_item(root->fs_info->tree_root,
2711 * If we don't have a root item then we likely just have
2712 * a dir item in a snapshot for this root but no actual
2713 * ref key or anything so it's meaningless.
2715 if (!rec->found_root_item)
2718 fprintf(stderr, "fs tree %llu not referenced\n",
2719 (unsigned long long)rec->objectid);
2723 if (rec->found_ref > 0 && !rec->found_root_item)
2725 list_for_each_entry(backref, &rec->backrefs, list) {
2726 if (!backref->found_dir_item)
2727 backref->errors |= REF_ERR_NO_DIR_ITEM;
2728 if (!backref->found_dir_index)
2729 backref->errors |= REF_ERR_NO_DIR_INDEX;
2730 if (!backref->found_back_ref)
2731 backref->errors |= REF_ERR_NO_ROOT_BACKREF;
2732 if (!backref->found_forward_ref)
2733 backref->errors |= REF_ERR_NO_ROOT_REF;
2734 if (backref->reachable && backref->errors)
2741 fprintf(stderr, "fs tree %llu refs %u %s\n",
2742 (unsigned long long)rec->objectid, rec->found_ref,
2743 rec->found_root_item ? "" : "not found");
2745 list_for_each_entry(backref, &rec->backrefs, list) {
2746 if (!backref->reachable)
2748 if (!backref->errors && rec->found_root_item)
2750 fprintf(stderr, "\tunresolved ref root %llu dir %llu"
2751 " index %llu namelen %u name %s errors %x\n",
2752 (unsigned long long)backref->ref_root,
2753 (unsigned long long)backref->dir,
2754 (unsigned long long)backref->index,
2755 backref->namelen, backref->name,
2757 print_ref_error(backref->errors);
2760 return errors > 0 ? 1 : 0;
2763 static int process_root_ref(struct extent_buffer *eb, int slot,
2764 struct btrfs_key *key,
2765 struct cache_tree *root_cache)
2771 struct btrfs_root_ref *ref;
2772 char namebuf[BTRFS_NAME_LEN];
2775 ref = btrfs_item_ptr(eb, slot, struct btrfs_root_ref);
2777 dirid = btrfs_root_ref_dirid(eb, ref);
2778 index = btrfs_root_ref_sequence(eb, ref);
2779 name_len = btrfs_root_ref_name_len(eb, ref);
2781 if (name_len <= BTRFS_NAME_LEN) {
2785 len = BTRFS_NAME_LEN;
2786 error = REF_ERR_NAME_TOO_LONG;
2788 read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
2790 if (key->type == BTRFS_ROOT_REF_KEY) {
2791 add_root_backref(root_cache, key->offset, key->objectid, dirid,
2792 index, namebuf, len, key->type, error);
2794 add_root_backref(root_cache, key->objectid, key->offset, dirid,
2795 index, namebuf, len, key->type, error);
2800 static void free_corrupt_block(struct cache_extent *cache)
2802 struct btrfs_corrupt_block *corrupt;
2804 corrupt = container_of(cache, struct btrfs_corrupt_block, cache);
2808 FREE_EXTENT_CACHE_BASED_TREE(corrupt_blocks, free_corrupt_block);
2811 * Repair the btree of the given root.
2813 * The fix is to remove the node key in corrupt_blocks cache_tree.
2814 * and rebalance the tree.
2815 * After the fix, the btree should be writeable.
2817 static int repair_btree(struct btrfs_root *root,
2818 struct cache_tree *corrupt_blocks)
2820 struct btrfs_trans_handle *trans;
2821 struct btrfs_path *path;
2822 struct btrfs_corrupt_block *corrupt;
2823 struct cache_extent *cache;
2824 struct btrfs_key key;
2829 if (cache_tree_empty(corrupt_blocks))
2832 path = btrfs_alloc_path();
2836 trans = btrfs_start_transaction(root, 1);
2837 if (IS_ERR(trans)) {
2838 ret = PTR_ERR(trans);
2839 fprintf(stderr, "Error starting transaction: %s\n",
2843 cache = first_cache_extent(corrupt_blocks);
2845 corrupt = container_of(cache, struct btrfs_corrupt_block,
2847 level = corrupt->level;
2848 path->lowest_level = level;
2849 key.objectid = corrupt->key.objectid;
2850 key.type = corrupt->key.type;
2851 key.offset = corrupt->key.offset;
2854 * Here we don't want to do any tree balance, since it may
2855 * cause a balance with corrupted brother leaf/node,
2856 * so ins_len set to 0 here.
2857 * Balance will be done after all corrupt node/leaf is deleted.
2859 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2862 offset = btrfs_node_blockptr(path->nodes[level],
2863 path->slots[level]);
2865 /* Remove the ptr */
2866 ret = btrfs_del_ptr(trans, root, path, level,
2867 path->slots[level]);
2871 * Remove the corresponding extent
2872 * return value is not concerned.
2874 btrfs_release_path(path);
2875 ret = btrfs_free_extent(trans, root, offset, root->nodesize,
2876 0, root->root_key.objectid,
2878 cache = next_cache_extent(cache);
2881 /* Balance the btree using btrfs_search_slot() */
2882 cache = first_cache_extent(corrupt_blocks);
2884 corrupt = container_of(cache, struct btrfs_corrupt_block,
2886 memcpy(&key, &corrupt->key, sizeof(key));
2887 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2890 /* return will always >0 since it won't find the item */
2892 btrfs_release_path(path);
2893 cache = next_cache_extent(cache);
2896 btrfs_commit_transaction(trans, root);
2898 btrfs_free_path(path);
2902 static int check_fs_root(struct btrfs_root *root,
2903 struct cache_tree *root_cache,
2904 struct walk_control *wc)
2910 struct btrfs_path path;
2911 struct shared_node root_node;
2912 struct root_record *rec;
2913 struct btrfs_root_item *root_item = &root->root_item;
2914 struct cache_tree corrupt_blocks;
2915 enum btrfs_tree_block_status status;
2918 * Reuse the corrupt_block cache tree to record corrupted tree block
2920 * Unlike the usage in extent tree check, here we do it in a per
2921 * fs/subvol tree base.
2923 cache_tree_init(&corrupt_blocks);
2924 root->fs_info->corrupt_blocks = &corrupt_blocks;
2925 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2926 rec = get_root_rec(root_cache, root->root_key.objectid);
2927 if (btrfs_root_refs(root_item) > 0)
2928 rec->found_root_item = 1;
2931 btrfs_init_path(&path);
2932 memset(&root_node, 0, sizeof(root_node));
2933 cache_tree_init(&root_node.root_cache);
2934 cache_tree_init(&root_node.inode_cache);
2936 level = btrfs_header_level(root->node);
2937 memset(wc->nodes, 0, sizeof(wc->nodes));
2938 wc->nodes[level] = &root_node;
2939 wc->active_node = level;
2940 wc->root_level = level;
2942 /* We may not have checked the root block, lets do that now */
2943 if (btrfs_is_leaf(root->node))
2944 status = btrfs_check_leaf(root, NULL, root->node);
2946 status = btrfs_check_node(root, NULL, root->node);
2947 if (status != BTRFS_TREE_BLOCK_CLEAN)
2950 if (btrfs_root_refs(root_item) > 0 ||
2951 btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2952 path.nodes[level] = root->node;
2953 extent_buffer_get(root->node);
2954 path.slots[level] = 0;
2956 struct btrfs_key key;
2957 struct btrfs_disk_key found_key;
2959 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2960 level = root_item->drop_level;
2961 path.lowest_level = level;
2962 wret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
2965 btrfs_node_key(path.nodes[level], &found_key,
2967 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2968 sizeof(found_key)));
2972 wret = walk_down_tree(root, &path, wc, &level);
2978 wret = walk_up_tree(root, &path, wc, &level);
2985 btrfs_release_path(&path);
2987 if (!cache_tree_empty(&corrupt_blocks)) {
2988 struct cache_extent *cache;
2989 struct btrfs_corrupt_block *corrupt;
2991 printf("The following tree block(s) is corrupted in tree %llu:\n",
2992 root->root_key.objectid);
2993 cache = first_cache_extent(&corrupt_blocks);
2995 corrupt = container_of(cache,
2996 struct btrfs_corrupt_block,
2998 printf("\ttree block bytenr: %llu, level: %d, node key: (%llu, %u, %llu)\n",
2999 cache->start, corrupt->level,
3000 corrupt->key.objectid, corrupt->key.type,
3001 corrupt->key.offset);
3002 cache = next_cache_extent(cache);
3005 printf("Try to repair the btree for root %llu\n",
3006 root->root_key.objectid);
3007 ret = repair_btree(root, &corrupt_blocks);
3009 fprintf(stderr, "Failed to repair btree: %s\n",
3012 printf("Btree for root %llu is fixed\n",
3013 root->root_key.objectid);
3017 err = merge_root_recs(root, &root_node.root_cache, root_cache);
3021 if (root_node.current) {
3022 root_node.current->checked = 1;
3023 maybe_free_inode_rec(&root_node.inode_cache,
3027 err = check_inode_recs(root, &root_node.inode_cache);
3031 free_corrupt_blocks_tree(&corrupt_blocks);
3032 root->fs_info->corrupt_blocks = NULL;
3036 static int fs_root_objectid(u64 objectid)
3038 if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
3039 objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3041 return is_fstree(objectid);
3044 static int check_fs_roots(struct btrfs_root *root,
3045 struct cache_tree *root_cache)
3047 struct btrfs_path path;
3048 struct btrfs_key key;
3049 struct walk_control wc;
3050 struct extent_buffer *leaf, *tree_node;
3051 struct btrfs_root *tmp_root;
3052 struct btrfs_root *tree_root = root->fs_info->tree_root;
3057 * Just in case we made any changes to the extent tree that weren't
3058 * reflected into the free space cache yet.
3061 reset_cached_block_groups(root->fs_info);
3062 memset(&wc, 0, sizeof(wc));
3063 cache_tree_init(&wc.shared);
3064 btrfs_init_path(&path);
3069 key.type = BTRFS_ROOT_ITEM_KEY;
3070 ret = btrfs_search_slot(NULL, tree_root, &key, &path, 0, 0);
3075 tree_node = tree_root->node;
3077 if (tree_node != tree_root->node) {
3078 free_root_recs_tree(root_cache);
3079 btrfs_release_path(&path);
3082 leaf = path.nodes[0];
3083 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
3084 ret = btrfs_next_leaf(tree_root, &path);
3090 leaf = path.nodes[0];
3092 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
3093 if (key.type == BTRFS_ROOT_ITEM_KEY &&
3094 fs_root_objectid(key.objectid)) {
3095 if (key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
3096 tmp_root = btrfs_read_fs_root_no_cache(
3097 root->fs_info, &key);
3099 key.offset = (u64)-1;
3100 tmp_root = btrfs_read_fs_root(
3101 root->fs_info, &key);
3103 if (IS_ERR(tmp_root)) {
3107 ret = check_fs_root(tmp_root, root_cache, &wc);
3108 if (ret == -EAGAIN) {
3109 free_root_recs_tree(root_cache);
3110 btrfs_release_path(&path);
3115 if (key.objectid == BTRFS_TREE_RELOC_OBJECTID)
3116 btrfs_free_fs_root(tmp_root);
3117 } else if (key.type == BTRFS_ROOT_REF_KEY ||
3118 key.type == BTRFS_ROOT_BACKREF_KEY) {
3119 process_root_ref(leaf, path.slots[0], &key,
3126 btrfs_release_path(&path);
3128 free_extent_cache_tree(&wc.shared);
3129 if (!cache_tree_empty(&wc.shared))
3130 fprintf(stderr, "warning line %d\n", __LINE__);
3135 static int all_backpointers_checked(struct extent_record *rec, int print_errs)
3137 struct list_head *cur = rec->backrefs.next;
3138 struct extent_backref *back;
3139 struct tree_backref *tback;
3140 struct data_backref *dback;
3144 while(cur != &rec->backrefs) {
3145 back = list_entry(cur, struct extent_backref, list);
3147 if (!back->found_extent_tree) {
3151 if (back->is_data) {
3152 dback = (struct data_backref *)back;
3153 fprintf(stderr, "Backref %llu %s %llu"
3154 " owner %llu offset %llu num_refs %lu"
3155 " not found in extent tree\n",
3156 (unsigned long long)rec->start,
3157 back->full_backref ?
3159 back->full_backref ?
3160 (unsigned long long)dback->parent:
3161 (unsigned long long)dback->root,
3162 (unsigned long long)dback->owner,
3163 (unsigned long long)dback->offset,
3164 (unsigned long)dback->num_refs);
3166 tback = (struct tree_backref *)back;
3167 fprintf(stderr, "Backref %llu parent %llu"
3168 " root %llu not found in extent tree\n",
3169 (unsigned long long)rec->start,
3170 (unsigned long long)tback->parent,
3171 (unsigned long long)tback->root);
3174 if (!back->is_data && !back->found_ref) {
3178 tback = (struct tree_backref *)back;
3179 fprintf(stderr, "Backref %llu %s %llu not referenced back %p\n",
3180 (unsigned long long)rec->start,
3181 back->full_backref ? "parent" : "root",
3182 back->full_backref ?
3183 (unsigned long long)tback->parent :
3184 (unsigned long long)tback->root, back);
3186 if (back->is_data) {
3187 dback = (struct data_backref *)back;
3188 if (dback->found_ref != dback->num_refs) {
3192 fprintf(stderr, "Incorrect local backref count"
3193 " on %llu %s %llu owner %llu"
3194 " offset %llu found %u wanted %u back %p\n",
3195 (unsigned long long)rec->start,
3196 back->full_backref ?
3198 back->full_backref ?
3199 (unsigned long long)dback->parent:
3200 (unsigned long long)dback->root,
3201 (unsigned long long)dback->owner,
3202 (unsigned long long)dback->offset,
3203 dback->found_ref, dback->num_refs, back);
3205 if (dback->disk_bytenr != rec->start) {
3209 fprintf(stderr, "Backref disk bytenr does not"
3210 " match extent record, bytenr=%llu, "
3211 "ref bytenr=%llu\n",
3212 (unsigned long long)rec->start,
3213 (unsigned long long)dback->disk_bytenr);
3216 if (dback->bytes != rec->nr) {
3220 fprintf(stderr, "Backref bytes do not match "
3221 "extent backref, bytenr=%llu, ref "
3222 "bytes=%llu, backref bytes=%llu\n",
3223 (unsigned long long)rec->start,
3224 (unsigned long long)rec->nr,
3225 (unsigned long long)dback->bytes);
3228 if (!back->is_data) {
3231 dback = (struct data_backref *)back;
3232 found += dback->found_ref;
3235 if (found != rec->refs) {
3239 fprintf(stderr, "Incorrect global backref count "
3240 "on %llu found %llu wanted %llu\n",
3241 (unsigned long long)rec->start,
3242 (unsigned long long)found,
3243 (unsigned long long)rec->refs);
3249 static int free_all_extent_backrefs(struct extent_record *rec)
3251 struct extent_backref *back;
3252 struct list_head *cur;
3253 while (!list_empty(&rec->backrefs)) {
3254 cur = rec->backrefs.next;
3255 back = list_entry(cur, struct extent_backref, list);
3262 static void free_extent_record_cache(struct btrfs_fs_info *fs_info,
3263 struct cache_tree *extent_cache)
3265 struct cache_extent *cache;
3266 struct extent_record *rec;
3269 cache = first_cache_extent(extent_cache);
3272 rec = container_of(cache, struct extent_record, cache);
3273 btrfs_unpin_extent(fs_info, rec->start, rec->max_size);
3274 remove_cache_extent(extent_cache, cache);
3275 free_all_extent_backrefs(rec);
3280 static int maybe_free_extent_rec(struct cache_tree *extent_cache,
3281 struct extent_record *rec)
3283 if (rec->content_checked && rec->owner_ref_checked &&
3284 rec->extent_item_refs == rec->refs && rec->refs > 0 &&
3285 rec->num_duplicates == 0 && !all_backpointers_checked(rec, 0)) {
3286 remove_cache_extent(extent_cache, &rec->cache);
3287 free_all_extent_backrefs(rec);
3288 list_del_init(&rec->list);
3294 static int check_owner_ref(struct btrfs_root *root,
3295 struct extent_record *rec,
3296 struct extent_buffer *buf)
3298 struct extent_backref *node;
3299 struct tree_backref *back;
3300 struct btrfs_root *ref_root;
3301 struct btrfs_key key;
3302 struct btrfs_path path;
3303 struct extent_buffer *parent;
3308 list_for_each_entry(node, &rec->backrefs, list) {
3311 if (!node->found_ref)
3313 if (node->full_backref)
3315 back = (struct tree_backref *)node;
3316 if (btrfs_header_owner(buf) == back->root)
3319 BUG_ON(rec->is_root);
3321 /* try to find the block by search corresponding fs tree */
3322 key.objectid = btrfs_header_owner(buf);
3323 key.type = BTRFS_ROOT_ITEM_KEY;
3324 key.offset = (u64)-1;
3326 ref_root = btrfs_read_fs_root(root->fs_info, &key);
3327 if (IS_ERR(ref_root))
3330 level = btrfs_header_level(buf);
3332 btrfs_item_key_to_cpu(buf, &key, 0);
3334 btrfs_node_key_to_cpu(buf, &key, 0);
3336 btrfs_init_path(&path);
3337 path.lowest_level = level + 1;
3338 ret = btrfs_search_slot(NULL, ref_root, &key, &path, 0, 0);
3342 parent = path.nodes[level + 1];
3343 if (parent && buf->start == btrfs_node_blockptr(parent,
3344 path.slots[level + 1]))
3347 btrfs_release_path(&path);
3348 return found ? 0 : 1;
3351 static int is_extent_tree_record(struct extent_record *rec)
3353 struct list_head *cur = rec->backrefs.next;
3354 struct extent_backref *node;
3355 struct tree_backref *back;
3358 while(cur != &rec->backrefs) {
3359 node = list_entry(cur, struct extent_backref, list);
3363 back = (struct tree_backref *)node;
3364 if (node->full_backref)
3366 if (back->root == BTRFS_EXTENT_TREE_OBJECTID)
3373 static int record_bad_block_io(struct btrfs_fs_info *info,
3374 struct cache_tree *extent_cache,
3377 struct extent_record *rec;
3378 struct cache_extent *cache;
3379 struct btrfs_key key;
3381 cache = lookup_cache_extent(extent_cache, start, len);
3385 rec = container_of(cache, struct extent_record, cache);
3386 if (!is_extent_tree_record(rec))
3389 btrfs_disk_key_to_cpu(&key, &rec->parent_key);
3390 return btrfs_add_corrupt_extent_record(info, &key, start, len, 0);
3393 static int swap_values(struct btrfs_root *root, struct btrfs_path *path,
3394 struct extent_buffer *buf, int slot)
3396 if (btrfs_header_level(buf)) {
3397 struct btrfs_key_ptr ptr1, ptr2;
3399 read_extent_buffer(buf, &ptr1, btrfs_node_key_ptr_offset(slot),
3400 sizeof(struct btrfs_key_ptr));
3401 read_extent_buffer(buf, &ptr2,
3402 btrfs_node_key_ptr_offset(slot + 1),
3403 sizeof(struct btrfs_key_ptr));
3404 write_extent_buffer(buf, &ptr1,
3405 btrfs_node_key_ptr_offset(slot + 1),
3406 sizeof(struct btrfs_key_ptr));
3407 write_extent_buffer(buf, &ptr2,
3408 btrfs_node_key_ptr_offset(slot),
3409 sizeof(struct btrfs_key_ptr));
3411 struct btrfs_disk_key key;
3412 btrfs_node_key(buf, &key, 0);
3413 btrfs_fixup_low_keys(root, path, &key,
3414 btrfs_header_level(buf) + 1);
3417 struct btrfs_item *item1, *item2;
3418 struct btrfs_key k1, k2;
3419 char *item1_data, *item2_data;
3420 u32 item1_offset, item2_offset, item1_size, item2_size;
3422 item1 = btrfs_item_nr(slot);
3423 item2 = btrfs_item_nr(slot + 1);
3424 btrfs_item_key_to_cpu(buf, &k1, slot);
3425 btrfs_item_key_to_cpu(buf, &k2, slot + 1);
3426 item1_offset = btrfs_item_offset(buf, item1);
3427 item2_offset = btrfs_item_offset(buf, item2);
3428 item1_size = btrfs_item_size(buf, item1);
3429 item2_size = btrfs_item_size(buf, item2);
3431 item1_data = malloc(item1_size);
3434 item2_data = malloc(item2_size);
3440 read_extent_buffer(buf, item1_data, item1_offset, item1_size);
3441 read_extent_buffer(buf, item2_data, item2_offset, item2_size);
3443 write_extent_buffer(buf, item1_data, item2_offset, item2_size);
3444 write_extent_buffer(buf, item2_data, item1_offset, item1_size);
3448 btrfs_set_item_offset(buf, item1, item2_offset);
3449 btrfs_set_item_offset(buf, item2, item1_offset);
3450 btrfs_set_item_size(buf, item1, item2_size);
3451 btrfs_set_item_size(buf, item2, item1_size);
3453 path->slots[0] = slot;
3454 btrfs_set_item_key_unsafe(root, path, &k2);
3455 path->slots[0] = slot + 1;
3456 btrfs_set_item_key_unsafe(root, path, &k1);
3461 static int fix_key_order(struct btrfs_trans_handle *trans,
3462 struct btrfs_root *root,
3463 struct btrfs_path *path)
3465 struct extent_buffer *buf;
3466 struct btrfs_key k1, k2;
3468 int level = path->lowest_level;
3471 buf = path->nodes[level];
3472 for (i = 0; i < btrfs_header_nritems(buf) - 1; i++) {
3474 btrfs_node_key_to_cpu(buf, &k1, i);
3475 btrfs_node_key_to_cpu(buf, &k2, i + 1);
3477 btrfs_item_key_to_cpu(buf, &k1, i);
3478 btrfs_item_key_to_cpu(buf, &k2, i + 1);
3480 if (btrfs_comp_cpu_keys(&k1, &k2) < 0)
3482 ret = swap_values(root, path, buf, i);
3485 btrfs_mark_buffer_dirty(buf);
3491 static int delete_bogus_item(struct btrfs_trans_handle *trans,
3492 struct btrfs_root *root,
3493 struct btrfs_path *path,
3494 struct extent_buffer *buf, int slot)
3496 struct btrfs_key key;
3497 int nritems = btrfs_header_nritems(buf);
3499 btrfs_item_key_to_cpu(buf, &key, slot);
3501 /* These are all the keys we can deal with missing. */
3502 if (key.type != BTRFS_DIR_INDEX_KEY &&
3503 key.type != BTRFS_EXTENT_ITEM_KEY &&
3504 key.type != BTRFS_METADATA_ITEM_KEY &&
3505 key.type != BTRFS_TREE_BLOCK_REF_KEY &&
3506 key.type != BTRFS_EXTENT_DATA_REF_KEY)
3509 printf("Deleting bogus item [%llu,%u,%llu] at slot %d on block %llu\n",
3510 (unsigned long long)key.objectid, key.type,
3511 (unsigned long long)key.offset, slot, buf->start);
3512 memmove_extent_buffer(buf, btrfs_item_nr_offset(slot),
3513 btrfs_item_nr_offset(slot + 1),
3514 sizeof(struct btrfs_item) *
3515 (nritems - slot - 1));
3516 btrfs_set_header_nritems(buf, nritems - 1);
3518 struct btrfs_disk_key disk_key;
3520 btrfs_item_key(buf, &disk_key, 0);
3521 btrfs_fixup_low_keys(root, path, &disk_key, 1);
3523 btrfs_mark_buffer_dirty(buf);
3527 static int fix_item_offset(struct btrfs_trans_handle *trans,
3528 struct btrfs_root *root,
3529 struct btrfs_path *path)
3531 struct extent_buffer *buf;
3535 /* We should only get this for leaves */
3536 BUG_ON(path->lowest_level);
3537 buf = path->nodes[0];
3539 for (i = 0; i < btrfs_header_nritems(buf); i++) {
3540 unsigned int shift = 0, offset;
3542 if (i == 0 && btrfs_item_end_nr(buf, i) !=
3543 BTRFS_LEAF_DATA_SIZE(root)) {
3544 if (btrfs_item_end_nr(buf, i) >
3545 BTRFS_LEAF_DATA_SIZE(root)) {
3546 ret = delete_bogus_item(trans, root, path,
3550 fprintf(stderr, "item is off the end of the "
3551 "leaf, can't fix\n");
3555 shift = BTRFS_LEAF_DATA_SIZE(root) -
3556 btrfs_item_end_nr(buf, i);
3557 } else if (i > 0 && btrfs_item_end_nr(buf, i) !=
3558 btrfs_item_offset_nr(buf, i - 1)) {
3559 if (btrfs_item_end_nr(buf, i) >
3560 btrfs_item_offset_nr(buf, i - 1)) {
3561 ret = delete_bogus_item(trans, root, path,
3565 fprintf(stderr, "items overlap, can't fix\n");
3569 shift = btrfs_item_offset_nr(buf, i - 1) -
3570 btrfs_item_end_nr(buf, i);
3575 printf("Shifting item nr %d by %u bytes in block %llu\n",
3576 i, shift, (unsigned long long)buf->start);
3577 offset = btrfs_item_offset_nr(buf, i);
3578 memmove_extent_buffer(buf,
3579 btrfs_leaf_data(buf) + offset + shift,
3580 btrfs_leaf_data(buf) + offset,
3581 btrfs_item_size_nr(buf, i));
3582 btrfs_set_item_offset(buf, btrfs_item_nr(i),
3584 btrfs_mark_buffer_dirty(buf);
3588 * We may have moved things, in which case we want to exit so we don't
3589 * write those changes out. Once we have proper abort functionality in
3590 * progs this can be changed to something nicer.
3597 * Attempt to fix basic block failures. If we can't fix it for whatever reason
3598 * then just return -EIO.
3600 static int try_to_fix_bad_block(struct btrfs_trans_handle *trans,
3601 struct btrfs_root *root,
3602 struct extent_buffer *buf,
3603 enum btrfs_tree_block_status status)
3605 struct ulist *roots;
3606 struct ulist_node *node;
3607 struct btrfs_root *search_root;
3608 struct btrfs_path *path;
3609 struct ulist_iterator iter;
3610 struct btrfs_key root_key, key;
3613 if (status != BTRFS_TREE_BLOCK_BAD_KEY_ORDER &&
3614 status != BTRFS_TREE_BLOCK_INVALID_OFFSETS)
3617 path = btrfs_alloc_path();
3621 ret = btrfs_find_all_roots(trans, root->fs_info, buf->start,
3624 btrfs_free_path(path);
3628 ULIST_ITER_INIT(&iter);
3629 while ((node = ulist_next(roots, &iter))) {
3630 root_key.objectid = node->val;
3631 root_key.type = BTRFS_ROOT_ITEM_KEY;
3632 root_key.offset = (u64)-1;
3634 search_root = btrfs_read_fs_root(root->fs_info, &root_key);
3640 record_root_in_trans(trans, search_root);
3642 path->lowest_level = btrfs_header_level(buf);
3643 path->skip_check_block = 1;
3644 if (path->lowest_level)
3645 btrfs_node_key_to_cpu(buf, &key, 0);
3647 btrfs_item_key_to_cpu(buf, &key, 0);
3648 ret = btrfs_search_slot(trans, search_root, &key, path, 0, 1);
3653 if (status == BTRFS_TREE_BLOCK_BAD_KEY_ORDER)
3654 ret = fix_key_order(trans, search_root, path);
3655 else if (status == BTRFS_TREE_BLOCK_INVALID_OFFSETS)
3656 ret = fix_item_offset(trans, search_root, path);
3659 btrfs_release_path(path);
3662 btrfs_free_path(path);
3666 static int check_block(struct btrfs_trans_handle *trans,
3667 struct btrfs_root *root,
3668 struct cache_tree *extent_cache,
3669 struct extent_buffer *buf, u64 flags)
3671 struct extent_record *rec;
3672 struct cache_extent *cache;
3673 struct btrfs_key key;
3674 enum btrfs_tree_block_status status;
3678 cache = lookup_cache_extent(extent_cache, buf->start, buf->len);
3681 rec = container_of(cache, struct extent_record, cache);
3682 rec->generation = btrfs_header_generation(buf);
3684 level = btrfs_header_level(buf);
3685 if (btrfs_header_nritems(buf) > 0) {
3688 btrfs_item_key_to_cpu(buf, &key, 0);
3690 btrfs_node_key_to_cpu(buf, &key, 0);
3692 rec->info_objectid = key.objectid;
3694 rec->info_level = level;
3696 if (btrfs_is_leaf(buf))
3697 status = btrfs_check_leaf(root, &rec->parent_key, buf);
3699 status = btrfs_check_node(root, &rec->parent_key, buf);
3701 if (status != BTRFS_TREE_BLOCK_CLEAN) {
3703 status = try_to_fix_bad_block(trans, root, buf,
3705 if (status != BTRFS_TREE_BLOCK_CLEAN) {
3707 fprintf(stderr, "bad block %llu\n",
3708 (unsigned long long)buf->start);
3711 * Signal to callers we need to start the scan over
3712 * again since we'll have cow'ed blocks.
3717 rec->content_checked = 1;
3718 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
3719 rec->owner_ref_checked = 1;
3721 ret = check_owner_ref(root, rec, buf);
3723 rec->owner_ref_checked = 1;
3727 maybe_free_extent_rec(extent_cache, rec);
3731 static struct tree_backref *find_tree_backref(struct extent_record *rec,
3732 u64 parent, u64 root)
3734 struct list_head *cur = rec->backrefs.next;
3735 struct extent_backref *node;
3736 struct tree_backref *back;
3738 while(cur != &rec->backrefs) {
3739 node = list_entry(cur, struct extent_backref, list);
3743 back = (struct tree_backref *)node;
3745 if (!node->full_backref)
3747 if (parent == back->parent)
3750 if (node->full_backref)
3752 if (back->root == root)
3759 static struct tree_backref *alloc_tree_backref(struct extent_record *rec,
3760 u64 parent, u64 root)
3762 struct tree_backref *ref = malloc(sizeof(*ref));
3763 memset(&ref->node, 0, sizeof(ref->node));
3765 ref->parent = parent;
3766 ref->node.full_backref = 1;
3769 ref->node.full_backref = 0;
3771 list_add_tail(&ref->node.list, &rec->backrefs);
3776 static struct data_backref *find_data_backref(struct extent_record *rec,
3777 u64 parent, u64 root,
3778 u64 owner, u64 offset,
3780 u64 disk_bytenr, u64 bytes)
3782 struct list_head *cur = rec->backrefs.next;
3783 struct extent_backref *node;
3784 struct data_backref *back;
3786 while(cur != &rec->backrefs) {
3787 node = list_entry(cur, struct extent_backref, list);
3791 back = (struct data_backref *)node;
3793 if (!node->full_backref)
3795 if (parent == back->parent)
3798 if (node->full_backref)
3800 if (back->root == root && back->owner == owner &&
3801 back->offset == offset) {
3802 if (found_ref && node->found_ref &&
3803 (back->bytes != bytes ||
3804 back->disk_bytenr != disk_bytenr))
3813 static struct data_backref *alloc_data_backref(struct extent_record *rec,
3814 u64 parent, u64 root,
3815 u64 owner, u64 offset,
3818 struct data_backref *ref = malloc(sizeof(*ref));
3819 memset(&ref->node, 0, sizeof(ref->node));
3820 ref->node.is_data = 1;
3823 ref->parent = parent;
3826 ref->node.full_backref = 1;
3830 ref->offset = offset;
3831 ref->node.full_backref = 0;
3833 ref->bytes = max_size;
3836 list_add_tail(&ref->node.list, &rec->backrefs);
3837 if (max_size > rec->max_size)
3838 rec->max_size = max_size;
3842 static int add_extent_rec(struct cache_tree *extent_cache,
3843 struct btrfs_key *parent_key, u64 parent_gen,
3844 u64 start, u64 nr, u64 extent_item_refs,
3845 int is_root, int inc_ref, int set_checked,
3846 int metadata, int extent_rec, u64 max_size)
3848 struct extent_record *rec;
3849 struct cache_extent *cache;
3853 cache = lookup_cache_extent(extent_cache, start, nr);
3855 rec = container_of(cache, struct extent_record, cache);
3859 rec->nr = max(nr, max_size);
3862 * We need to make sure to reset nr to whatever the extent
3863 * record says was the real size, this way we can compare it to
3867 if (start != rec->start || rec->found_rec) {
3868 struct extent_record *tmp;
3871 if (list_empty(&rec->list))
3872 list_add_tail(&rec->list,
3873 &duplicate_extents);
3876 * We have to do this song and dance in case we
3877 * find an extent record that falls inside of
3878 * our current extent record but does not have
3879 * the same objectid.
3881 tmp = malloc(sizeof(*tmp));
3885 tmp->max_size = max_size;
3888 tmp->metadata = metadata;
3889 tmp->extent_item_refs = extent_item_refs;
3890 INIT_LIST_HEAD(&tmp->list);
3891 list_add_tail(&tmp->list, &rec->dups);
3892 rec->num_duplicates++;
3899 if (extent_item_refs && !dup) {
3900 if (rec->extent_item_refs) {
3901 fprintf(stderr, "block %llu rec "
3902 "extent_item_refs %llu, passed %llu\n",
3903 (unsigned long long)start,
3904 (unsigned long long)
3905 rec->extent_item_refs,
3906 (unsigned long long)extent_item_refs);
3908 rec->extent_item_refs = extent_item_refs;
3913 rec->content_checked = 1;
3914 rec->owner_ref_checked = 1;
3918 btrfs_cpu_key_to_disk(&rec->parent_key, parent_key);
3920 rec->parent_generation = parent_gen;
3922 if (rec->max_size < max_size)
3923 rec->max_size = max_size;
3925 maybe_free_extent_rec(extent_cache, rec);
3928 rec = malloc(sizeof(*rec));
3930 rec->max_size = max_size;
3931 rec->nr = max(nr, max_size);
3932 rec->found_rec = !!extent_rec;
3933 rec->content_checked = 0;
3934 rec->owner_ref_checked = 0;
3935 rec->num_duplicates = 0;
3936 rec->metadata = metadata;
3937 INIT_LIST_HEAD(&rec->backrefs);
3938 INIT_LIST_HEAD(&rec->dups);
3939 INIT_LIST_HEAD(&rec->list);
3951 if (extent_item_refs)
3952 rec->extent_item_refs = extent_item_refs;
3954 rec->extent_item_refs = 0;
3957 btrfs_cpu_key_to_disk(&rec->parent_key, parent_key);
3959 memset(&rec->parent_key, 0, sizeof(*parent_key));
3962 rec->parent_generation = parent_gen;
3964 rec->parent_generation = 0;
3966 rec->cache.start = start;
3967 rec->cache.size = nr;
3968 ret = insert_cache_extent(extent_cache, &rec->cache);
3972 rec->content_checked = 1;
3973 rec->owner_ref_checked = 1;
3978 static int add_tree_backref(struct cache_tree *extent_cache, u64 bytenr,
3979 u64 parent, u64 root, int found_ref)
3981 struct extent_record *rec;
3982 struct tree_backref *back;
3983 struct cache_extent *cache;
3985 cache = lookup_cache_extent(extent_cache, bytenr, 1);
3987 add_extent_rec(extent_cache, NULL, 0, bytenr,
3988 1, 0, 0, 0, 0, 1, 0, 0);
3989 cache = lookup_cache_extent(extent_cache, bytenr, 1);
3994 rec = container_of(cache, struct extent_record, cache);
3995 if (rec->start != bytenr) {
3999 back = find_tree_backref(rec, parent, root);
4001 back = alloc_tree_backref(rec, parent, root);
4004 if (back->node.found_ref) {
4005 fprintf(stderr, "Extent back ref already exists "
4006 "for %llu parent %llu root %llu \n",
4007 (unsigned long long)bytenr,
4008 (unsigned long long)parent,
4009 (unsigned long long)root);
4011 back->node.found_ref = 1;
4013 if (back->node.found_extent_tree) {
4014 fprintf(stderr, "Extent back ref already exists "
4015 "for %llu parent %llu root %llu \n",
4016 (unsigned long long)bytenr,
4017 (unsigned long long)parent,
4018 (unsigned long long)root);
4020 back->node.found_extent_tree = 1;
4022 maybe_free_extent_rec(extent_cache, rec);
4026 static int add_data_backref(struct cache_tree *extent_cache, u64 bytenr,
4027 u64 parent, u64 root, u64 owner, u64 offset,
4028 u32 num_refs, int found_ref, u64 max_size)
4030 struct extent_record *rec;
4031 struct data_backref *back;
4032 struct cache_extent *cache;
4034 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4036 add_extent_rec(extent_cache, NULL, 0, bytenr, 1, 0, 0, 0, 0,
4038 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4043 rec = container_of(cache, struct extent_record, cache);
4044 if (rec->max_size < max_size)
4045 rec->max_size = max_size;
4048 * If found_ref is set then max_size is the real size and must match the
4049 * existing refs. So if we have already found a ref then we need to
4050 * make sure that this ref matches the existing one, otherwise we need
4051 * to add a new backref so we can notice that the backrefs don't match
4052 * and we need to figure out who is telling the truth. This is to
4053 * account for that awful fsync bug I introduced where we'd end up with
4054 * a btrfs_file_extent_item that would have its length include multiple
4055 * prealloc extents or point inside of a prealloc extent.
4057 back = find_data_backref(rec, parent, root, owner, offset, found_ref,
4060 back = alloc_data_backref(rec, parent, root, owner, offset,
4064 BUG_ON(num_refs != 1);
4065 if (back->node.found_ref)
4066 BUG_ON(back->bytes != max_size);
4067 back->node.found_ref = 1;
4068 back->found_ref += 1;
4069 back->bytes = max_size;
4070 back->disk_bytenr = bytenr;
4072 rec->content_checked = 1;
4073 rec->owner_ref_checked = 1;
4075 if (back->node.found_extent_tree) {
4076 fprintf(stderr, "Extent back ref already exists "
4077 "for %llu parent %llu root %llu "
4078 "owner %llu offset %llu num_refs %lu\n",
4079 (unsigned long long)bytenr,
4080 (unsigned long long)parent,
4081 (unsigned long long)root,
4082 (unsigned long long)owner,
4083 (unsigned long long)offset,
4084 (unsigned long)num_refs);
4086 back->num_refs = num_refs;
4087 back->node.found_extent_tree = 1;
4089 maybe_free_extent_rec(extent_cache, rec);
4093 static int add_pending(struct cache_tree *pending,
4094 struct cache_tree *seen, u64 bytenr, u32 size)
4097 ret = add_cache_extent(seen, bytenr, size);
4100 add_cache_extent(pending, bytenr, size);
4104 static int pick_next_pending(struct cache_tree *pending,
4105 struct cache_tree *reada,
4106 struct cache_tree *nodes,
4107 u64 last, struct block_info *bits, int bits_nr,
4110 unsigned long node_start = last;
4111 struct cache_extent *cache;
4114 cache = search_cache_extent(reada, 0);
4116 bits[0].start = cache->start;
4117 bits[0].size = cache->size;
4122 if (node_start > 32768)
4123 node_start -= 32768;
4125 cache = search_cache_extent(nodes, node_start);
4127 cache = search_cache_extent(nodes, 0);
4130 cache = search_cache_extent(pending, 0);
4135 bits[ret].start = cache->start;
4136 bits[ret].size = cache->size;
4137 cache = next_cache_extent(cache);
4139 } while (cache && ret < bits_nr);
4145 bits[ret].start = cache->start;
4146 bits[ret].size = cache->size;
4147 cache = next_cache_extent(cache);
4149 } while (cache && ret < bits_nr);
4151 if (bits_nr - ret > 8) {
4152 u64 lookup = bits[0].start + bits[0].size;
4153 struct cache_extent *next;
4154 next = search_cache_extent(pending, lookup);
4156 if (next->start - lookup > 32768)
4158 bits[ret].start = next->start;
4159 bits[ret].size = next->size;
4160 lookup = next->start + next->size;
4164 next = next_cache_extent(next);
4172 static void free_chunk_record(struct cache_extent *cache)
4174 struct chunk_record *rec;
4176 rec = container_of(cache, struct chunk_record, cache);
4177 list_del_init(&rec->list);
4178 list_del_init(&rec->dextents);
4182 void free_chunk_cache_tree(struct cache_tree *chunk_cache)
4184 cache_tree_free_extents(chunk_cache, free_chunk_record);
4187 static void free_device_record(struct rb_node *node)
4189 struct device_record *rec;
4191 rec = container_of(node, struct device_record, node);
4195 FREE_RB_BASED_TREE(device_cache, free_device_record);
4197 int insert_block_group_record(struct block_group_tree *tree,
4198 struct block_group_record *bg_rec)
4202 ret = insert_cache_extent(&tree->tree, &bg_rec->cache);
4206 list_add_tail(&bg_rec->list, &tree->block_groups);
4210 static void free_block_group_record(struct cache_extent *cache)
4212 struct block_group_record *rec;
4214 rec = container_of(cache, struct block_group_record, cache);
4215 list_del_init(&rec->list);
4219 void free_block_group_tree(struct block_group_tree *tree)
4221 cache_tree_free_extents(&tree->tree, free_block_group_record);
4224 int insert_device_extent_record(struct device_extent_tree *tree,
4225 struct device_extent_record *de_rec)
4230 * Device extent is a bit different from the other extents, because
4231 * the extents which belong to the different devices may have the
4232 * same start and size, so we need use the special extent cache
4233 * search/insert functions.
4235 ret = insert_cache_extent2(&tree->tree, &de_rec->cache);
4239 list_add_tail(&de_rec->chunk_list, &tree->no_chunk_orphans);
4240 list_add_tail(&de_rec->device_list, &tree->no_device_orphans);
4244 static void free_device_extent_record(struct cache_extent *cache)
4246 struct device_extent_record *rec;
4248 rec = container_of(cache, struct device_extent_record, cache);
4249 if (!list_empty(&rec->chunk_list))
4250 list_del_init(&rec->chunk_list);
4251 if (!list_empty(&rec->device_list))
4252 list_del_init(&rec->device_list);
4256 void free_device_extent_tree(struct device_extent_tree *tree)
4258 cache_tree_free_extents(&tree->tree, free_device_extent_record);
4261 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4262 static int process_extent_ref_v0(struct cache_tree *extent_cache,
4263 struct extent_buffer *leaf, int slot)
4265 struct btrfs_extent_ref_v0 *ref0;
4266 struct btrfs_key key;
4268 btrfs_item_key_to_cpu(leaf, &key, slot);
4269 ref0 = btrfs_item_ptr(leaf, slot, struct btrfs_extent_ref_v0);
4270 if (btrfs_ref_objectid_v0(leaf, ref0) < BTRFS_FIRST_FREE_OBJECTID) {
4271 add_tree_backref(extent_cache, key.objectid, key.offset, 0, 0);
4273 add_data_backref(extent_cache, key.objectid, key.offset, 0,
4274 0, 0, btrfs_ref_count_v0(leaf, ref0), 0, 0);
4280 struct chunk_record *btrfs_new_chunk_record(struct extent_buffer *leaf,
4281 struct btrfs_key *key,
4284 struct btrfs_chunk *ptr;
4285 struct chunk_record *rec;
4288 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4289 num_stripes = btrfs_chunk_num_stripes(leaf, ptr);
4291 rec = malloc(btrfs_chunk_record_size(num_stripes));
4293 fprintf(stderr, "memory allocation failed\n");
4297 memset(rec, 0, btrfs_chunk_record_size(num_stripes));
4299 INIT_LIST_HEAD(&rec->list);
4300 INIT_LIST_HEAD(&rec->dextents);
4303 rec->cache.start = key->offset;
4304 rec->cache.size = btrfs_chunk_length(leaf, ptr);
4306 rec->generation = btrfs_header_generation(leaf);
4308 rec->objectid = key->objectid;
4309 rec->type = key->type;
4310 rec->offset = key->offset;
4312 rec->length = rec->cache.size;
4313 rec->owner = btrfs_chunk_owner(leaf, ptr);
4314 rec->stripe_len = btrfs_chunk_stripe_len(leaf, ptr);
4315 rec->type_flags = btrfs_chunk_type(leaf, ptr);
4316 rec->io_width = btrfs_chunk_io_width(leaf, ptr);
4317 rec->io_align = btrfs_chunk_io_align(leaf, ptr);
4318 rec->sector_size = btrfs_chunk_sector_size(leaf, ptr);
4319 rec->num_stripes = num_stripes;
4320 rec->sub_stripes = btrfs_chunk_sub_stripes(leaf, ptr);
4322 for (i = 0; i < rec->num_stripes; ++i) {
4323 rec->stripes[i].devid =
4324 btrfs_stripe_devid_nr(leaf, ptr, i);
4325 rec->stripes[i].offset =
4326 btrfs_stripe_offset_nr(leaf, ptr, i);
4327 read_extent_buffer(leaf, rec->stripes[i].dev_uuid,
4328 (unsigned long)btrfs_stripe_dev_uuid_nr(ptr, i),
4335 static int process_chunk_item(struct cache_tree *chunk_cache,
4336 struct btrfs_key *key, struct extent_buffer *eb,
4339 struct chunk_record *rec;
4342 rec = btrfs_new_chunk_record(eb, key, slot);
4343 ret = insert_cache_extent(chunk_cache, &rec->cache);
4345 fprintf(stderr, "Chunk[%llu, %llu] existed.\n",
4346 rec->offset, rec->length);
4353 static int process_device_item(struct rb_root *dev_cache,
4354 struct btrfs_key *key, struct extent_buffer *eb, int slot)
4356 struct btrfs_dev_item *ptr;
4357 struct device_record *rec;
4360 ptr = btrfs_item_ptr(eb,
4361 slot, struct btrfs_dev_item);
4363 rec = malloc(sizeof(*rec));
4365 fprintf(stderr, "memory allocation failed\n");
4369 rec->devid = key->offset;
4370 rec->generation = btrfs_header_generation(eb);
4372 rec->objectid = key->objectid;
4373 rec->type = key->type;
4374 rec->offset = key->offset;
4376 rec->devid = btrfs_device_id(eb, ptr);
4377 rec->total_byte = btrfs_device_total_bytes(eb, ptr);
4378 rec->byte_used = btrfs_device_bytes_used(eb, ptr);
4380 ret = rb_insert(dev_cache, &rec->node, device_record_compare);
4382 fprintf(stderr, "Device[%llu] existed.\n", rec->devid);
4389 struct block_group_record *
4390 btrfs_new_block_group_record(struct extent_buffer *leaf, struct btrfs_key *key,
4393 struct btrfs_block_group_item *ptr;
4394 struct block_group_record *rec;
4396 rec = malloc(sizeof(*rec));
4398 fprintf(stderr, "memory allocation failed\n");
4401 memset(rec, 0, sizeof(*rec));
4403 rec->cache.start = key->objectid;
4404 rec->cache.size = key->offset;
4406 rec->generation = btrfs_header_generation(leaf);
4408 rec->objectid = key->objectid;
4409 rec->type = key->type;
4410 rec->offset = key->offset;
4412 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_block_group_item);
4413 rec->flags = btrfs_disk_block_group_flags(leaf, ptr);
4415 INIT_LIST_HEAD(&rec->list);
4420 static int process_block_group_item(struct block_group_tree *block_group_cache,
4421 struct btrfs_key *key,
4422 struct extent_buffer *eb, int slot)
4424 struct block_group_record *rec;
4427 rec = btrfs_new_block_group_record(eb, key, slot);
4428 ret = insert_block_group_record(block_group_cache, rec);
4430 fprintf(stderr, "Block Group[%llu, %llu] existed.\n",
4431 rec->objectid, rec->offset);
4438 struct device_extent_record *
4439 btrfs_new_device_extent_record(struct extent_buffer *leaf,
4440 struct btrfs_key *key, int slot)
4442 struct device_extent_record *rec;
4443 struct btrfs_dev_extent *ptr;
4445 rec = malloc(sizeof(*rec));
4447 fprintf(stderr, "memory allocation failed\n");
4450 memset(rec, 0, sizeof(*rec));
4452 rec->cache.objectid = key->objectid;
4453 rec->cache.start = key->offset;
4455 rec->generation = btrfs_header_generation(leaf);
4457 rec->objectid = key->objectid;
4458 rec->type = key->type;
4459 rec->offset = key->offset;
4461 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
4462 rec->chunk_objecteid =
4463 btrfs_dev_extent_chunk_objectid(leaf, ptr);
4465 btrfs_dev_extent_chunk_offset(leaf, ptr);
4466 rec->length = btrfs_dev_extent_length(leaf, ptr);
4467 rec->cache.size = rec->length;
4469 INIT_LIST_HEAD(&rec->chunk_list);
4470 INIT_LIST_HEAD(&rec->device_list);
4476 process_device_extent_item(struct device_extent_tree *dev_extent_cache,
4477 struct btrfs_key *key, struct extent_buffer *eb,
4480 struct device_extent_record *rec;
4483 rec = btrfs_new_device_extent_record(eb, key, slot);
4484 ret = insert_device_extent_record(dev_extent_cache, rec);
4487 "Device extent[%llu, %llu, %llu] existed.\n",
4488 rec->objectid, rec->offset, rec->length);
4495 static int process_extent_item(struct btrfs_root *root,
4496 struct cache_tree *extent_cache,
4497 struct extent_buffer *eb, int slot)
4499 struct btrfs_extent_item *ei;
4500 struct btrfs_extent_inline_ref *iref;
4501 struct btrfs_extent_data_ref *dref;
4502 struct btrfs_shared_data_ref *sref;
4503 struct btrfs_key key;
4507 u32 item_size = btrfs_item_size_nr(eb, slot);
4513 btrfs_item_key_to_cpu(eb, &key, slot);
4515 if (key.type == BTRFS_METADATA_ITEM_KEY) {
4517 num_bytes = root->leafsize;
4519 num_bytes = key.offset;
4522 if (item_size < sizeof(*ei)) {
4523 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4524 struct btrfs_extent_item_v0 *ei0;
4525 BUG_ON(item_size != sizeof(*ei0));
4526 ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
4527 refs = btrfs_extent_refs_v0(eb, ei0);
4531 return add_extent_rec(extent_cache, NULL, 0, key.objectid,
4532 num_bytes, refs, 0, 0, 0, metadata, 1,
4536 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
4537 refs = btrfs_extent_refs(eb, ei);
4539 add_extent_rec(extent_cache, NULL, 0, key.objectid, num_bytes,
4540 refs, 0, 0, 0, metadata, 1, num_bytes);
4542 ptr = (unsigned long)(ei + 1);
4543 if (btrfs_extent_flags(eb, ei) & BTRFS_EXTENT_FLAG_TREE_BLOCK &&
4544 key.type == BTRFS_EXTENT_ITEM_KEY)
4545 ptr += sizeof(struct btrfs_tree_block_info);
4547 end = (unsigned long)ei + item_size;
4549 iref = (struct btrfs_extent_inline_ref *)ptr;
4550 type = btrfs_extent_inline_ref_type(eb, iref);
4551 offset = btrfs_extent_inline_ref_offset(eb, iref);
4553 case BTRFS_TREE_BLOCK_REF_KEY:
4554 add_tree_backref(extent_cache, key.objectid,
4557 case BTRFS_SHARED_BLOCK_REF_KEY:
4558 add_tree_backref(extent_cache, key.objectid,
4561 case BTRFS_EXTENT_DATA_REF_KEY:
4562 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
4563 add_data_backref(extent_cache, key.objectid, 0,
4564 btrfs_extent_data_ref_root(eb, dref),
4565 btrfs_extent_data_ref_objectid(eb,
4567 btrfs_extent_data_ref_offset(eb, dref),
4568 btrfs_extent_data_ref_count(eb, dref),
4571 case BTRFS_SHARED_DATA_REF_KEY:
4572 sref = (struct btrfs_shared_data_ref *)(iref + 1);
4573 add_data_backref(extent_cache, key.objectid, offset,
4575 btrfs_shared_data_ref_count(eb, sref),
4579 fprintf(stderr, "corrupt extent record: key %Lu %u %Lu\n",
4580 key.objectid, key.type, num_bytes);
4583 ptr += btrfs_extent_inline_ref_size(type);
4590 static int check_cache_range(struct btrfs_root *root,
4591 struct btrfs_block_group_cache *cache,
4592 u64 offset, u64 bytes)
4594 struct btrfs_free_space *entry;
4600 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4601 bytenr = btrfs_sb_offset(i);
4602 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
4603 cache->key.objectid, bytenr, 0,
4604 &logical, &nr, &stripe_len);
4609 if (logical[nr] + stripe_len <= offset)
4611 if (offset + bytes <= logical[nr])
4613 if (logical[nr] == offset) {
4614 if (stripe_len >= bytes) {
4618 bytes -= stripe_len;
4619 offset += stripe_len;
4620 } else if (logical[nr] < offset) {
4621 if (logical[nr] + stripe_len >=
4626 bytes = (offset + bytes) -
4627 (logical[nr] + stripe_len);
4628 offset = logical[nr] + stripe_len;
4631 * Could be tricky, the super may land in the
4632 * middle of the area we're checking. First
4633 * check the easiest case, it's at the end.
4635 if (logical[nr] + stripe_len >=
4637 bytes = logical[nr] - offset;
4641 /* Check the left side */
4642 ret = check_cache_range(root, cache,
4644 logical[nr] - offset);
4650 /* Now we continue with the right side */
4651 bytes = (offset + bytes) -
4652 (logical[nr] + stripe_len);
4653 offset = logical[nr] + stripe_len;
4660 entry = btrfs_find_free_space(cache->free_space_ctl, offset, bytes);
4662 fprintf(stderr, "There is no free space entry for %Lu-%Lu\n",
4663 offset, offset+bytes);
4667 if (entry->offset != offset) {
4668 fprintf(stderr, "Wanted offset %Lu, found %Lu\n", offset,
4673 if (entry->bytes != bytes) {
4674 fprintf(stderr, "Wanted bytes %Lu, found %Lu for off %Lu\n",
4675 bytes, entry->bytes, offset);
4679 unlink_free_space(cache->free_space_ctl, entry);
4684 static int verify_space_cache(struct btrfs_root *root,
4685 struct btrfs_block_group_cache *cache)
4687 struct btrfs_path *path;
4688 struct extent_buffer *leaf;
4689 struct btrfs_key key;
4693 path = btrfs_alloc_path();
4697 root = root->fs_info->extent_root;
4699 last = max_t(u64, cache->key.objectid, BTRFS_SUPER_INFO_OFFSET);
4701 key.objectid = last;
4703 key.type = BTRFS_EXTENT_ITEM_KEY;
4705 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4710 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4711 ret = btrfs_next_leaf(root, path);
4719 leaf = path->nodes[0];
4720 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4721 if (key.objectid >= cache->key.offset + cache->key.objectid)
4723 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
4724 key.type != BTRFS_METADATA_ITEM_KEY) {
4729 if (last == key.objectid) {
4730 if (key.type == BTRFS_EXTENT_ITEM_KEY)
4731 last = key.objectid + key.offset;
4733 last = key.objectid + root->leafsize;
4738 ret = check_cache_range(root, cache, last,
4739 key.objectid - last);
4742 if (key.type == BTRFS_EXTENT_ITEM_KEY)
4743 last = key.objectid + key.offset;
4745 last = key.objectid + root->leafsize;
4749 if (last < cache->key.objectid + cache->key.offset)
4750 ret = check_cache_range(root, cache, last,
4751 cache->key.objectid +
4752 cache->key.offset - last);
4755 btrfs_free_path(path);
4758 !RB_EMPTY_ROOT(&cache->free_space_ctl->free_space_offset)) {
4759 fprintf(stderr, "There are still entries left in the space "
4767 static int check_space_cache(struct btrfs_root *root)
4769 struct btrfs_block_group_cache *cache;
4770 u64 start = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
4774 if (btrfs_super_cache_generation(root->fs_info->super_copy) != -1ULL &&
4775 btrfs_super_generation(root->fs_info->super_copy) !=
4776 btrfs_super_cache_generation(root->fs_info->super_copy)) {
4777 printf("cache and super generation don't match, space cache "
4778 "will be invalidated\n");
4783 cache = btrfs_lookup_first_block_group(root->fs_info, start);
4787 start = cache->key.objectid + cache->key.offset;
4788 if (!cache->free_space_ctl) {
4789 if (btrfs_init_free_space_ctl(cache,
4790 root->sectorsize)) {
4795 btrfs_remove_free_space_cache(cache);
4798 ret = load_free_space_cache(root->fs_info, cache);
4802 ret = verify_space_cache(root, cache);
4804 fprintf(stderr, "cache appears valid but isnt %Lu\n",
4805 cache->key.objectid);
4810 return error ? -EINVAL : 0;
4813 static int read_extent_data(struct btrfs_root *root, char *data,
4814 u64 logical, u64 *len, int mirror)
4817 struct btrfs_multi_bio *multi = NULL;
4818 struct btrfs_fs_info *info = root->fs_info;
4819 struct btrfs_device *device;
4823 ret = btrfs_map_block(&info->mapping_tree, READ, logical, len,
4824 &multi, mirror, NULL);
4826 fprintf(stderr, "Couldn't map the block %llu\n",
4830 device = multi->stripes[0].dev;
4832 if (device->fd == 0)
4837 ret = pread64(device->fd, data, *len, multi->stripes[0].physical);
4847 static int check_extent_csums(struct btrfs_root *root, u64 bytenr,
4848 u64 num_bytes, unsigned long leaf_offset,
4849 struct extent_buffer *eb) {
4852 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
4854 unsigned long csum_offset;
4858 u64 data_checked = 0;
4864 if (num_bytes % root->sectorsize)
4867 data = malloc(num_bytes);
4871 while (offset < num_bytes) {
4874 read_len = num_bytes - offset;
4875 /* read as much space once a time */
4876 ret = read_extent_data(root, data + offset,
4877 bytenr + offset, &read_len, mirror);
4881 /* verify every 4k data's checksum */
4882 while (data_checked < read_len) {
4884 tmp = offset + data_checked;
4886 csum = btrfs_csum_data(NULL, (char *)data + tmp,
4887 csum, root->sectorsize);
4888 btrfs_csum_final(csum, (char *)&csum);
4890 csum_offset = leaf_offset +
4891 tmp / root->sectorsize * csum_size;
4892 read_extent_buffer(eb, (char *)&csum_expected,
4893 csum_offset, csum_size);
4894 /* try another mirror */
4895 if (csum != csum_expected) {
4896 fprintf(stderr, "mirror %d bytenr %llu csum %u expected csum %u\n",
4897 mirror, bytenr + tmp,
4898 csum, csum_expected);
4899 num_copies = btrfs_num_copies(
4900 &root->fs_info->mapping_tree,
4902 if (mirror < num_copies - 1) {
4907 data_checked += root->sectorsize;
4916 static int check_extent_exists(struct btrfs_root *root, u64 bytenr,
4919 struct btrfs_path *path;
4920 struct extent_buffer *leaf;
4921 struct btrfs_key key;
4924 path = btrfs_alloc_path();
4926 fprintf(stderr, "Error allocing path\n");
4930 key.objectid = bytenr;
4931 key.type = BTRFS_EXTENT_ITEM_KEY;
4932 key.offset = (u64)-1;
4935 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
4938 fprintf(stderr, "Error looking up extent record %d\n", ret);
4939 btrfs_free_path(path);
4942 if (path->slots[0] > 0) {
4945 ret = btrfs_prev_leaf(root, path);
4948 } else if (ret > 0) {
4955 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4958 * Block group items come before extent items if they have the same
4959 * bytenr, so walk back one more just in case. Dear future traveler,
4960 * first congrats on mastering time travel. Now if it's not too much
4961 * trouble could you go back to 2006 and tell Chris to make the
4962 * BLOCK_GROUP_ITEM_KEY (and BTRFS_*_REF_KEY) lower than the
4963 * EXTENT_ITEM_KEY please?
4965 while (key.type > BTRFS_EXTENT_ITEM_KEY) {
4966 if (path->slots[0] > 0) {
4969 ret = btrfs_prev_leaf(root, path);
4972 } else if (ret > 0) {
4977 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4981 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4982 ret = btrfs_next_leaf(root, path);
4984 fprintf(stderr, "Error going to next leaf "
4986 btrfs_free_path(path);
4992 leaf = path->nodes[0];
4993 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4994 if (key.type != BTRFS_EXTENT_ITEM_KEY) {
4998 if (key.objectid + key.offset < bytenr) {
5002 if (key.objectid > bytenr + num_bytes)
5005 if (key.objectid == bytenr) {
5006 if (key.offset >= num_bytes) {
5010 num_bytes -= key.offset;
5011 bytenr += key.offset;
5012 } else if (key.objectid < bytenr) {
5013 if (key.objectid + key.offset >= bytenr + num_bytes) {
5017 num_bytes = (bytenr + num_bytes) -
5018 (key.objectid + key.offset);
5019 bytenr = key.objectid + key.offset;
5021 if (key.objectid + key.offset < bytenr + num_bytes) {
5022 u64 new_start = key.objectid + key.offset;
5023 u64 new_bytes = bytenr + num_bytes - new_start;
5026 * Weird case, the extent is in the middle of
5027 * our range, we'll have to search one side
5028 * and then the other. Not sure if this happens
5029 * in real life, but no harm in coding it up
5030 * anyway just in case.
5032 btrfs_release_path(path);
5033 ret = check_extent_exists(root, new_start,
5036 fprintf(stderr, "Right section didn't "
5040 num_bytes = key.objectid - bytenr;
5043 num_bytes = key.objectid - bytenr;
5050 if (num_bytes && !ret) {
5051 fprintf(stderr, "There are no extents for csum range "
5052 "%Lu-%Lu\n", bytenr, bytenr+num_bytes);
5056 btrfs_free_path(path);
5060 static int check_csums(struct btrfs_root *root)
5062 struct btrfs_path *path;
5063 struct extent_buffer *leaf;
5064 struct btrfs_key key;
5065 u64 offset = 0, num_bytes = 0;
5066 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
5070 unsigned long leaf_offset;
5072 root = root->fs_info->csum_root;
5073 if (!extent_buffer_uptodate(root->node)) {
5074 fprintf(stderr, "No valid csum tree found\n");
5078 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
5079 key.type = BTRFS_EXTENT_CSUM_KEY;
5082 path = btrfs_alloc_path();
5086 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5088 fprintf(stderr, "Error searching csum tree %d\n", ret);
5089 btrfs_free_path(path);
5093 if (ret > 0 && path->slots[0])
5098 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5099 ret = btrfs_next_leaf(root, path);
5101 fprintf(stderr, "Error going to next leaf "
5108 leaf = path->nodes[0];
5110 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5111 if (key.type != BTRFS_EXTENT_CSUM_KEY) {
5116 data_len = (btrfs_item_size_nr(leaf, path->slots[0]) /
5117 csum_size) * root->sectorsize;
5118 if (!check_data_csum)
5119 goto skip_csum_check;
5120 leaf_offset = btrfs_item_ptr_offset(leaf, path->slots[0]);
5121 ret = check_extent_csums(root, key.offset, data_len,
5127 offset = key.offset;
5128 } else if (key.offset != offset + num_bytes) {
5129 ret = check_extent_exists(root, offset, num_bytes);
5131 fprintf(stderr, "Csum exists for %Lu-%Lu but "
5132 "there is no extent record\n",
5133 offset, offset+num_bytes);
5136 offset = key.offset;
5139 num_bytes += data_len;
5143 btrfs_free_path(path);
5147 static int is_dropped_key(struct btrfs_key *key,
5148 struct btrfs_key *drop_key) {
5149 if (key->objectid < drop_key->objectid)
5151 else if (key->objectid == drop_key->objectid) {
5152 if (key->type < drop_key->type)
5154 else if (key->type == drop_key->type) {
5155 if (key->offset < drop_key->offset)
5162 static int calc_extent_flag(struct btrfs_root *root,
5163 struct cache_tree *extent_cache,
5164 struct extent_buffer *buf,
5165 struct root_item_record *ri,
5169 int nritems = btrfs_header_nritems(buf);
5170 struct btrfs_key key;
5171 struct extent_record *rec;
5172 struct cache_extent *cache;
5173 struct data_backref *dback;
5174 struct tree_backref *tback;
5175 struct extent_buffer *new_buf;
5185 * Except file/reloc tree, we can not have
5188 if (ri->objectid < BTRFS_FIRST_FREE_OBJECTID)
5193 if (buf->start == ri->bytenr)
5195 if (btrfs_is_leaf(buf)) {
5197 * we are searching from original root, world
5198 * peace is achieved, we use normal backref.
5200 owner = btrfs_header_owner(buf);
5201 if (owner == ri->objectid)
5204 * we check every eb here, and if any of
5205 * eb dosen't have original root refers
5206 * to this eb, we set full backref flag for
5207 * this extent, otherwise normal backref.
5209 for (i = 0; i < nritems; i++) {
5210 struct btrfs_file_extent_item *fi;
5211 btrfs_item_key_to_cpu(buf, &key, i);
5213 if (key.type != BTRFS_EXTENT_DATA_KEY)
5215 fi = btrfs_item_ptr(buf, i,
5216 struct btrfs_file_extent_item);
5217 if (btrfs_file_extent_type(buf, fi) ==
5218 BTRFS_FILE_EXTENT_INLINE)
5220 if (btrfs_file_extent_disk_bytenr(buf, fi) == 0)
5222 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
5223 cache = lookup_cache_extent(extent_cache, bytenr, 1);
5226 offset = btrfs_file_extent_offset(buf, fi);
5227 rec = container_of(cache, struct extent_record, cache);
5228 dback = find_data_backref(rec, 0, ri->objectid, owner,
5229 key.offset - offset, 1, bytenr, bytenr);
5235 level = btrfs_header_level(buf);
5236 for (i = 0; i < nritems; i++) {
5237 ptr = btrfs_node_blockptr(buf, i);
5238 size = btrfs_level_size(root, level);
5240 new_buf = read_tree_block(root, ptr, size, 0);
5241 if (!extent_buffer_uptodate(new_buf)) {
5242 free_extent_buffer(new_buf);
5247 * we are searching from origin root, world
5248 * peace is achieved, we use normal backref.
5250 owner = btrfs_header_owner(new_buf);
5251 free_extent_buffer(new_buf);
5252 if (owner == ri->objectid)
5255 cache = lookup_cache_extent(extent_cache, ptr, size);
5258 rec = container_of(cache, struct extent_record, cache);
5259 tback = find_tree_backref(rec, 0, owner);
5267 cache = lookup_cache_extent(extent_cache, buf->start, 1);
5268 /* we have added this extent before */
5270 rec = container_of(cache, struct extent_record, cache);
5271 rec->flag_block_full_backref = 0;
5274 *flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5275 cache = lookup_cache_extent(extent_cache, buf->start, 1);
5276 /* we have added this extent before */
5278 rec = container_of(cache, struct extent_record, cache);
5279 rec->flag_block_full_backref = 1;
5283 static int run_next_block(struct btrfs_trans_handle *trans,
5284 struct btrfs_root *root,
5285 struct block_info *bits,
5288 struct cache_tree *pending,
5289 struct cache_tree *seen,
5290 struct cache_tree *reada,
5291 struct cache_tree *nodes,
5292 struct cache_tree *extent_cache,
5293 struct cache_tree *chunk_cache,
5294 struct rb_root *dev_cache,
5295 struct block_group_tree *block_group_cache,
5296 struct device_extent_tree *dev_extent_cache,
5297 struct root_item_record *ri)
5299 struct extent_buffer *buf;
5310 struct btrfs_key key;
5311 struct cache_extent *cache;
5314 nritems = pick_next_pending(pending, reada, nodes, *last, bits,
5315 bits_nr, &reada_bits);
5320 for(i = 0; i < nritems; i++) {
5321 ret = add_cache_extent(reada, bits[i].start,
5326 /* fixme, get the parent transid */
5327 readahead_tree_block(root, bits[i].start,
5331 *last = bits[0].start;
5332 bytenr = bits[0].start;
5333 size = bits[0].size;
5335 cache = lookup_cache_extent(pending, bytenr, size);
5337 remove_cache_extent(pending, cache);
5340 cache = lookup_cache_extent(reada, bytenr, size);
5342 remove_cache_extent(reada, cache);
5345 cache = lookup_cache_extent(nodes, bytenr, size);
5347 remove_cache_extent(nodes, cache);
5350 cache = lookup_cache_extent(extent_cache, bytenr, size);
5352 struct extent_record *rec;
5354 rec = container_of(cache, struct extent_record, cache);
5355 gen = rec->parent_generation;
5358 /* fixme, get the real parent transid */
5359 buf = read_tree_block(root, bytenr, size, gen);
5360 if (!extent_buffer_uptodate(buf)) {
5361 record_bad_block_io(root->fs_info,
5362 extent_cache, bytenr, size);
5366 nritems = btrfs_header_nritems(buf);
5369 * FIXME, this only works only if we don't have any full
5372 if (!init_extent_tree) {
5373 ret = btrfs_lookup_extent_info(NULL, root, bytenr,
5374 btrfs_header_level(buf), 1, NULL,
5380 ret = calc_extent_flag(root, extent_cache, buf, ri, &flags);
5385 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5390 owner = btrfs_header_owner(buf);
5393 ret = check_block(trans, root, extent_cache, buf, flags);
5397 if (btrfs_is_leaf(buf)) {
5398 btree_space_waste += btrfs_leaf_free_space(root, buf);
5399 for (i = 0; i < nritems; i++) {
5400 struct btrfs_file_extent_item *fi;
5401 btrfs_item_key_to_cpu(buf, &key, i);
5402 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
5403 process_extent_item(root, extent_cache, buf,
5407 if (key.type == BTRFS_METADATA_ITEM_KEY) {
5408 process_extent_item(root, extent_cache, buf,
5412 if (key.type == BTRFS_EXTENT_CSUM_KEY) {
5414 btrfs_item_size_nr(buf, i);
5417 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5418 process_chunk_item(chunk_cache, &key, buf, i);
5421 if (key.type == BTRFS_DEV_ITEM_KEY) {
5422 process_device_item(dev_cache, &key, buf, i);
5425 if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5426 process_block_group_item(block_group_cache,
5430 if (key.type == BTRFS_DEV_EXTENT_KEY) {
5431 process_device_extent_item(dev_extent_cache,
5436 if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
5437 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5438 process_extent_ref_v0(extent_cache, buf, i);
5445 if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
5446 add_tree_backref(extent_cache, key.objectid, 0,
5450 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
5451 add_tree_backref(extent_cache, key.objectid,
5455 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
5456 struct btrfs_extent_data_ref *ref;
5457 ref = btrfs_item_ptr(buf, i,
5458 struct btrfs_extent_data_ref);
5459 add_data_backref(extent_cache,
5461 btrfs_extent_data_ref_root(buf, ref),
5462 btrfs_extent_data_ref_objectid(buf,
5464 btrfs_extent_data_ref_offset(buf, ref),
5465 btrfs_extent_data_ref_count(buf, ref),
5466 0, root->sectorsize);
5469 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
5470 struct btrfs_shared_data_ref *ref;
5471 ref = btrfs_item_ptr(buf, i,
5472 struct btrfs_shared_data_ref);
5473 add_data_backref(extent_cache,
5474 key.objectid, key.offset, 0, 0, 0,
5475 btrfs_shared_data_ref_count(buf, ref),
5476 0, root->sectorsize);
5479 if (key.type == BTRFS_ORPHAN_ITEM_KEY) {
5480 struct bad_item *bad;
5482 if (key.objectid == BTRFS_ORPHAN_OBJECTID)
5486 bad = malloc(sizeof(struct bad_item));
5489 INIT_LIST_HEAD(&bad->list);
5490 memcpy(&bad->key, &key,
5491 sizeof(struct btrfs_key));
5492 bad->root_id = owner;
5493 list_add_tail(&bad->list, &delete_items);
5496 if (key.type != BTRFS_EXTENT_DATA_KEY)
5498 fi = btrfs_item_ptr(buf, i,
5499 struct btrfs_file_extent_item);
5500 if (btrfs_file_extent_type(buf, fi) ==
5501 BTRFS_FILE_EXTENT_INLINE)
5503 if (btrfs_file_extent_disk_bytenr(buf, fi) == 0)
5506 data_bytes_allocated +=
5507 btrfs_file_extent_disk_num_bytes(buf, fi);
5508 if (data_bytes_allocated < root->sectorsize) {
5511 data_bytes_referenced +=
5512 btrfs_file_extent_num_bytes(buf, fi);
5513 add_data_backref(extent_cache,
5514 btrfs_file_extent_disk_bytenr(buf, fi),
5515 parent, owner, key.objectid, key.offset -
5516 btrfs_file_extent_offset(buf, fi), 1, 1,
5517 btrfs_file_extent_disk_num_bytes(buf, fi));
5521 struct btrfs_key first_key;
5523 first_key.objectid = 0;
5526 btrfs_item_key_to_cpu(buf, &first_key, 0);
5527 level = btrfs_header_level(buf);
5528 for (i = 0; i < nritems; i++) {
5529 ptr = btrfs_node_blockptr(buf, i);
5530 size = btrfs_level_size(root, level - 1);
5531 btrfs_node_key_to_cpu(buf, &key, i);
5533 if ((level == ri->drop_level)
5534 && is_dropped_key(&key, &ri->drop_key)) {
5538 ret = add_extent_rec(extent_cache, &key,
5539 btrfs_node_ptr_generation(buf, i),
5540 ptr, size, 0, 0, 1, 0, 1, 0,
5544 add_tree_backref(extent_cache, ptr, parent, owner, 1);
5547 add_pending(nodes, seen, ptr, size);
5549 add_pending(pending, seen, ptr, size);
5552 btree_space_waste += (BTRFS_NODEPTRS_PER_BLOCK(root) -
5553 nritems) * sizeof(struct btrfs_key_ptr);
5555 total_btree_bytes += buf->len;
5556 if (fs_root_objectid(btrfs_header_owner(buf)))
5557 total_fs_tree_bytes += buf->len;
5558 if (btrfs_header_owner(buf) == BTRFS_EXTENT_TREE_OBJECTID)
5559 total_extent_tree_bytes += buf->len;
5560 if (!found_old_backref &&
5561 btrfs_header_owner(buf) == BTRFS_TREE_RELOC_OBJECTID &&
5562 btrfs_header_backref_rev(buf) == BTRFS_MIXED_BACKREF_REV &&
5563 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
5564 found_old_backref = 1;
5566 free_extent_buffer(buf);
5570 static int add_root_to_pending(struct extent_buffer *buf,
5571 struct cache_tree *extent_cache,
5572 struct cache_tree *pending,
5573 struct cache_tree *seen,
5574 struct cache_tree *nodes,
5577 if (btrfs_header_level(buf) > 0)
5578 add_pending(nodes, seen, buf->start, buf->len);
5580 add_pending(pending, seen, buf->start, buf->len);
5581 add_extent_rec(extent_cache, NULL, 0, buf->start, buf->len,
5582 0, 1, 1, 0, 1, 0, buf->len);
5584 if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
5585 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
5586 add_tree_backref(extent_cache, buf->start, buf->start,
5589 add_tree_backref(extent_cache, buf->start, 0, objectid, 1);
5593 /* as we fix the tree, we might be deleting blocks that
5594 * we're tracking for repair. This hook makes sure we
5595 * remove any backrefs for blocks as we are fixing them.
5597 static int free_extent_hook(struct btrfs_trans_handle *trans,
5598 struct btrfs_root *root,
5599 u64 bytenr, u64 num_bytes, u64 parent,
5600 u64 root_objectid, u64 owner, u64 offset,
5603 struct extent_record *rec;
5604 struct cache_extent *cache;
5606 struct cache_tree *extent_cache = root->fs_info->fsck_extent_cache;
5608 is_data = owner >= BTRFS_FIRST_FREE_OBJECTID;
5609 cache = lookup_cache_extent(extent_cache, bytenr, num_bytes);
5613 rec = container_of(cache, struct extent_record, cache);
5615 struct data_backref *back;
5616 back = find_data_backref(rec, parent, root_objectid, owner,
5617 offset, 1, bytenr, num_bytes);
5620 if (back->node.found_ref) {
5621 back->found_ref -= refs_to_drop;
5623 rec->refs -= refs_to_drop;
5625 if (back->node.found_extent_tree) {
5626 back->num_refs -= refs_to_drop;
5627 if (rec->extent_item_refs)
5628 rec->extent_item_refs -= refs_to_drop;
5630 if (back->found_ref == 0)
5631 back->node.found_ref = 0;
5632 if (back->num_refs == 0)
5633 back->node.found_extent_tree = 0;
5635 if (!back->node.found_extent_tree && back->node.found_ref) {
5636 list_del(&back->node.list);
5640 struct tree_backref *back;
5641 back = find_tree_backref(rec, parent, root_objectid);
5644 if (back->node.found_ref) {
5647 back->node.found_ref = 0;
5649 if (back->node.found_extent_tree) {
5650 if (rec->extent_item_refs)
5651 rec->extent_item_refs--;
5652 back->node.found_extent_tree = 0;
5654 if (!back->node.found_extent_tree && back->node.found_ref) {
5655 list_del(&back->node.list);
5659 maybe_free_extent_rec(extent_cache, rec);
5664 static int delete_extent_records(struct btrfs_trans_handle *trans,
5665 struct btrfs_root *root,
5666 struct btrfs_path *path,
5667 u64 bytenr, u64 new_len)
5669 struct btrfs_key key;
5670 struct btrfs_key found_key;
5671 struct extent_buffer *leaf;
5676 key.objectid = bytenr;
5678 key.offset = (u64)-1;
5681 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
5688 if (path->slots[0] == 0)
5694 leaf = path->nodes[0];
5695 slot = path->slots[0];
5697 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5698 if (found_key.objectid != bytenr)
5701 if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
5702 found_key.type != BTRFS_METADATA_ITEM_KEY &&
5703 found_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
5704 found_key.type != BTRFS_EXTENT_DATA_REF_KEY &&
5705 found_key.type != BTRFS_EXTENT_REF_V0_KEY &&
5706 found_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
5707 found_key.type != BTRFS_SHARED_DATA_REF_KEY) {
5708 btrfs_release_path(path);
5709 if (found_key.type == 0) {
5710 if (found_key.offset == 0)
5712 key.offset = found_key.offset - 1;
5713 key.type = found_key.type;
5715 key.type = found_key.type - 1;
5716 key.offset = (u64)-1;
5720 fprintf(stderr, "repair deleting extent record: key %Lu %u %Lu\n",
5721 found_key.objectid, found_key.type, found_key.offset);
5723 ret = btrfs_del_item(trans, root->fs_info->extent_root, path);
5726 btrfs_release_path(path);
5728 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5729 found_key.type == BTRFS_METADATA_ITEM_KEY) {
5730 u64 bytes = (found_key.type == BTRFS_EXTENT_ITEM_KEY) ?
5731 found_key.offset : root->leafsize;
5733 ret = btrfs_update_block_group(trans, root, bytenr,
5740 btrfs_release_path(path);
5745 * for a single backref, this will allocate a new extent
5746 * and add the backref to it.
5748 static int record_extent(struct btrfs_trans_handle *trans,
5749 struct btrfs_fs_info *info,
5750 struct btrfs_path *path,
5751 struct extent_record *rec,
5752 struct extent_backref *back,
5753 int allocated, u64 flags)
5756 struct btrfs_root *extent_root = info->extent_root;
5757 struct extent_buffer *leaf;
5758 struct btrfs_key ins_key;
5759 struct btrfs_extent_item *ei;
5760 struct tree_backref *tback;
5761 struct data_backref *dback;
5762 struct btrfs_tree_block_info *bi;
5765 rec->max_size = max_t(u64, rec->max_size,
5766 info->extent_root->leafsize);
5769 u32 item_size = sizeof(*ei);
5772 item_size += sizeof(*bi);
5774 ins_key.objectid = rec->start;
5775 ins_key.offset = rec->max_size;
5776 ins_key.type = BTRFS_EXTENT_ITEM_KEY;
5778 ret = btrfs_insert_empty_item(trans, extent_root, path,
5779 &ins_key, item_size);
5783 leaf = path->nodes[0];
5784 ei = btrfs_item_ptr(leaf, path->slots[0],
5785 struct btrfs_extent_item);
5787 btrfs_set_extent_refs(leaf, ei, 0);
5788 btrfs_set_extent_generation(leaf, ei, rec->generation);
5790 if (back->is_data) {
5791 btrfs_set_extent_flags(leaf, ei,
5792 BTRFS_EXTENT_FLAG_DATA);
5794 struct btrfs_disk_key copy_key;;
5796 tback = (struct tree_backref *)back;
5797 bi = (struct btrfs_tree_block_info *)(ei + 1);
5798 memset_extent_buffer(leaf, 0, (unsigned long)bi,
5801 btrfs_set_disk_key_objectid(©_key,
5802 rec->info_objectid);
5803 btrfs_set_disk_key_type(©_key, 0);
5804 btrfs_set_disk_key_offset(©_key, 0);
5806 btrfs_set_tree_block_level(leaf, bi, rec->info_level);
5807 btrfs_set_tree_block_key(leaf, bi, ©_key);
5809 btrfs_set_extent_flags(leaf, ei,
5810 BTRFS_EXTENT_FLAG_TREE_BLOCK | flags);
5813 btrfs_mark_buffer_dirty(leaf);
5814 ret = btrfs_update_block_group(trans, extent_root, rec->start,
5815 rec->max_size, 1, 0);
5818 btrfs_release_path(path);
5821 if (back->is_data) {
5825 dback = (struct data_backref *)back;
5826 if (back->full_backref)
5827 parent = dback->parent;
5831 for (i = 0; i < dback->found_ref; i++) {
5832 /* if parent != 0, we're doing a full backref
5833 * passing BTRFS_FIRST_FREE_OBJECTID as the owner
5834 * just makes the backref allocator create a data
5837 ret = btrfs_inc_extent_ref(trans, info->extent_root,
5838 rec->start, rec->max_size,
5842 BTRFS_FIRST_FREE_OBJECTID :
5848 fprintf(stderr, "adding new data backref"
5849 " on %llu %s %llu owner %llu"
5850 " offset %llu found %d\n",
5851 (unsigned long long)rec->start,
5852 back->full_backref ?
5854 back->full_backref ?
5855 (unsigned long long)parent :
5856 (unsigned long long)dback->root,
5857 (unsigned long long)dback->owner,
5858 (unsigned long long)dback->offset,
5863 tback = (struct tree_backref *)back;
5864 if (back->full_backref)
5865 parent = tback->parent;
5869 ret = btrfs_inc_extent_ref(trans, info->extent_root,
5870 rec->start, rec->max_size,
5871 parent, tback->root, 0, 0);
5872 fprintf(stderr, "adding new tree backref on "
5873 "start %llu len %llu parent %llu root %llu\n",
5874 rec->start, rec->max_size, tback->parent, tback->root);
5879 btrfs_release_path(path);
5883 struct extent_entry {
5888 struct list_head list;
5891 static struct extent_entry *find_entry(struct list_head *entries,
5892 u64 bytenr, u64 bytes)
5894 struct extent_entry *entry = NULL;
5896 list_for_each_entry(entry, entries, list) {
5897 if (entry->bytenr == bytenr && entry->bytes == bytes)
5904 static struct extent_entry *find_most_right_entry(struct list_head *entries)
5906 struct extent_entry *entry, *best = NULL, *prev = NULL;
5908 list_for_each_entry(entry, entries, list) {
5915 * If there are as many broken entries as entries then we know
5916 * not to trust this particular entry.
5918 if (entry->broken == entry->count)
5922 * If our current entry == best then we can't be sure our best
5923 * is really the best, so we need to keep searching.
5925 if (best && best->count == entry->count) {
5931 /* Prev == entry, not good enough, have to keep searching */
5932 if (!prev->broken && prev->count == entry->count)
5936 best = (prev->count > entry->count) ? prev : entry;
5937 else if (best->count < entry->count)
5945 static int repair_ref(struct btrfs_trans_handle *trans,
5946 struct btrfs_fs_info *info, struct btrfs_path *path,
5947 struct data_backref *dback, struct extent_entry *entry)
5949 struct btrfs_root *root;
5950 struct btrfs_file_extent_item *fi;
5951 struct extent_buffer *leaf;
5952 struct btrfs_key key;
5956 key.objectid = dback->root;
5957 key.type = BTRFS_ROOT_ITEM_KEY;
5958 key.offset = (u64)-1;
5959 root = btrfs_read_fs_root(info, &key);
5961 fprintf(stderr, "Couldn't find root for our ref\n");
5966 * The backref points to the original offset of the extent if it was
5967 * split, so we need to search down to the offset we have and then walk
5968 * forward until we find the backref we're looking for.
5970 key.objectid = dback->owner;
5971 key.type = BTRFS_EXTENT_DATA_KEY;
5972 key.offset = dback->offset;
5973 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5975 fprintf(stderr, "Error looking up ref %d\n", ret);
5980 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5981 ret = btrfs_next_leaf(root, path);
5983 fprintf(stderr, "Couldn't find our ref, next\n");
5987 leaf = path->nodes[0];
5988 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5989 if (key.objectid != dback->owner ||
5990 key.type != BTRFS_EXTENT_DATA_KEY) {
5991 fprintf(stderr, "Couldn't find our ref, search\n");
5994 fi = btrfs_item_ptr(leaf, path->slots[0],
5995 struct btrfs_file_extent_item);
5996 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5997 bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
5999 if (bytenr == dback->disk_bytenr && bytes == dback->bytes)
6004 btrfs_release_path(path);
6007 * Have to make sure that this root gets updated when we commit the
6010 record_root_in_trans(trans, root);
6013 * Ok we have the key of the file extent we want to fix, now we can cow
6014 * down to the thing and fix it.
6016 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6018 fprintf(stderr, "Error cowing down to ref [%Lu, %u, %Lu]: %d\n",
6019 key.objectid, key.type, key.offset, ret);
6023 fprintf(stderr, "Well that's odd, we just found this key "
6024 "[%Lu, %u, %Lu]\n", key.objectid, key.type,
6028 leaf = path->nodes[0];
6029 fi = btrfs_item_ptr(leaf, path->slots[0],
6030 struct btrfs_file_extent_item);
6032 if (btrfs_file_extent_compression(leaf, fi) &&
6033 dback->disk_bytenr != entry->bytenr) {
6034 fprintf(stderr, "Ref doesn't match the record start and is "
6035 "compressed, please take a btrfs-image of this file "
6036 "system and send it to a btrfs developer so they can "
6037 "complete this functionality for bytenr %Lu\n",
6038 dback->disk_bytenr);
6042 if (dback->node.broken && dback->disk_bytenr != entry->bytenr) {
6043 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6044 } else if (dback->disk_bytenr > entry->bytenr) {
6045 u64 off_diff, offset;
6047 off_diff = dback->disk_bytenr - entry->bytenr;
6048 offset = btrfs_file_extent_offset(leaf, fi);
6049 if (dback->disk_bytenr + offset +
6050 btrfs_file_extent_num_bytes(leaf, fi) >
6051 entry->bytenr + entry->bytes) {
6052 fprintf(stderr, "Ref is past the entry end, please "
6053 "take a btrfs-image of this file system and "
6054 "send it to a btrfs developer, ref %Lu\n",
6055 dback->disk_bytenr);
6059 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6060 btrfs_set_file_extent_offset(leaf, fi, offset);
6061 } else if (dback->disk_bytenr < entry->bytenr) {
6064 offset = btrfs_file_extent_offset(leaf, fi);
6065 if (dback->disk_bytenr + offset < entry->bytenr) {
6066 fprintf(stderr, "Ref is before the entry start, please"
6067 " take a btrfs-image of this file system and "
6068 "send it to a btrfs developer, ref %Lu\n",
6069 dback->disk_bytenr);
6073 offset += dback->disk_bytenr;
6074 offset -= entry->bytenr;
6075 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6076 btrfs_set_file_extent_offset(leaf, fi, offset);
6079 btrfs_set_file_extent_disk_num_bytes(leaf, fi, entry->bytes);
6082 * Chances are if disk_num_bytes were wrong then so is ram_bytes, but
6083 * only do this if we aren't using compression, otherwise it's a
6086 if (!btrfs_file_extent_compression(leaf, fi))
6087 btrfs_set_file_extent_ram_bytes(leaf, fi, entry->bytes);
6089 printf("ram bytes may be wrong?\n");
6090 btrfs_mark_buffer_dirty(leaf);
6091 btrfs_release_path(path);
6095 static int verify_backrefs(struct btrfs_trans_handle *trans,
6096 struct btrfs_fs_info *info, struct btrfs_path *path,
6097 struct extent_record *rec)
6099 struct extent_backref *back;
6100 struct data_backref *dback;
6101 struct extent_entry *entry, *best = NULL;
6104 int broken_entries = 0;
6109 * Metadata is easy and the backrefs should always agree on bytenr and
6110 * size, if not we've got bigger issues.
6115 list_for_each_entry(back, &rec->backrefs, list) {
6116 if (back->full_backref || !back->is_data)
6119 dback = (struct data_backref *)back;
6122 * We only pay attention to backrefs that we found a real
6125 if (dback->found_ref == 0)
6129 * For now we only catch when the bytes don't match, not the
6130 * bytenr. We can easily do this at the same time, but I want
6131 * to have a fs image to test on before we just add repair
6132 * functionality willy-nilly so we know we won't screw up the
6136 entry = find_entry(&entries, dback->disk_bytenr,
6139 entry = malloc(sizeof(struct extent_entry));
6144 memset(entry, 0, sizeof(*entry));
6145 entry->bytenr = dback->disk_bytenr;
6146 entry->bytes = dback->bytes;
6147 list_add_tail(&entry->list, &entries);
6152 * If we only have on entry we may think the entries agree when
6153 * in reality they don't so we have to do some extra checking.
6155 if (dback->disk_bytenr != rec->start ||
6156 dback->bytes != rec->nr || back->broken)
6167 /* Yay all the backrefs agree, carry on good sir */
6168 if (nr_entries <= 1 && !mismatch)
6171 fprintf(stderr, "attempting to repair backref discrepency for bytenr "
6172 "%Lu\n", rec->start);
6175 * First we want to see if the backrefs can agree amongst themselves who
6176 * is right, so figure out which one of the entries has the highest
6179 best = find_most_right_entry(&entries);
6182 * Ok so we may have an even split between what the backrefs think, so
6183 * this is where we use the extent ref to see what it thinks.
6186 entry = find_entry(&entries, rec->start, rec->nr);
6187 if (!entry && (!broken_entries || !rec->found_rec)) {
6188 fprintf(stderr, "Backrefs don't agree with each other "
6189 "and extent record doesn't agree with anybody,"
6190 " so we can't fix bytenr %Lu bytes %Lu\n",
6191 rec->start, rec->nr);
6194 } else if (!entry) {
6196 * Ok our backrefs were broken, we'll assume this is the
6197 * correct value and add an entry for this range.
6199 entry = malloc(sizeof(struct extent_entry));
6204 memset(entry, 0, sizeof(*entry));
6205 entry->bytenr = rec->start;
6206 entry->bytes = rec->nr;
6207 list_add_tail(&entry->list, &entries);
6211 best = find_most_right_entry(&entries);
6213 fprintf(stderr, "Backrefs and extent record evenly "
6214 "split on who is right, this is going to "
6215 "require user input to fix bytenr %Lu bytes "
6216 "%Lu\n", rec->start, rec->nr);
6223 * I don't think this can happen currently as we'll abort() if we catch
6224 * this case higher up, but in case somebody removes that we still can't
6225 * deal with it properly here yet, so just bail out of that's the case.
6227 if (best->bytenr != rec->start) {
6228 fprintf(stderr, "Extent start and backref starts don't match, "
6229 "please use btrfs-image on this file system and send "
6230 "it to a btrfs developer so they can make fsck fix "
6231 "this particular case. bytenr is %Lu, bytes is %Lu\n",
6232 rec->start, rec->nr);
6238 * Ok great we all agreed on an extent record, let's go find the real
6239 * references and fix up the ones that don't match.
6241 list_for_each_entry(back, &rec->backrefs, list) {
6242 if (back->full_backref || !back->is_data)
6245 dback = (struct data_backref *)back;
6248 * Still ignoring backrefs that don't have a real ref attached
6251 if (dback->found_ref == 0)
6254 if (dback->bytes == best->bytes &&
6255 dback->disk_bytenr == best->bytenr)
6258 ret = repair_ref(trans, info, path, dback, best);
6264 * Ok we messed with the actual refs, which means we need to drop our
6265 * entire cache and go back and rescan. I know this is a huge pain and
6266 * adds a lot of extra work, but it's the only way to be safe. Once all
6267 * the backrefs agree we may not need to do anything to the extent
6272 while (!list_empty(&entries)) {
6273 entry = list_entry(entries.next, struct extent_entry, list);
6274 list_del_init(&entry->list);
6280 static int process_duplicates(struct btrfs_root *root,
6281 struct cache_tree *extent_cache,
6282 struct extent_record *rec)
6284 struct extent_record *good, *tmp;
6285 struct cache_extent *cache;
6289 * If we found a extent record for this extent then return, or if we
6290 * have more than one duplicate we are likely going to need to delete
6293 if (rec->found_rec || rec->num_duplicates > 1)
6296 /* Shouldn't happen but just in case */
6297 BUG_ON(!rec->num_duplicates);
6300 * So this happens if we end up with a backref that doesn't match the
6301 * actual extent entry. So either the backref is bad or the extent
6302 * entry is bad. Either way we want to have the extent_record actually
6303 * reflect what we found in the extent_tree, so we need to take the
6304 * duplicate out and use that as the extent_record since the only way we
6305 * get a duplicate is if we find a real life BTRFS_EXTENT_ITEM_KEY.
6307 remove_cache_extent(extent_cache, &rec->cache);
6309 good = list_entry(rec->dups.next, struct extent_record, list);
6310 list_del_init(&good->list);
6311 INIT_LIST_HEAD(&good->backrefs);
6312 INIT_LIST_HEAD(&good->dups);
6313 good->cache.start = good->start;
6314 good->cache.size = good->nr;
6315 good->content_checked = 0;
6316 good->owner_ref_checked = 0;
6317 good->num_duplicates = 0;
6318 good->refs = rec->refs;
6319 list_splice_init(&rec->backrefs, &good->backrefs);
6321 cache = lookup_cache_extent(extent_cache, good->start,
6325 tmp = container_of(cache, struct extent_record, cache);
6328 * If we find another overlapping extent and it's found_rec is
6329 * set then it's a duplicate and we need to try and delete
6332 if (tmp->found_rec || tmp->num_duplicates > 0) {
6333 if (list_empty(&good->list))
6334 list_add_tail(&good->list,
6335 &duplicate_extents);
6336 good->num_duplicates += tmp->num_duplicates + 1;
6337 list_splice_init(&tmp->dups, &good->dups);
6338 list_del_init(&tmp->list);
6339 list_add_tail(&tmp->list, &good->dups);
6340 remove_cache_extent(extent_cache, &tmp->cache);
6345 * Ok we have another non extent item backed extent rec, so lets
6346 * just add it to this extent and carry on like we did above.
6348 good->refs += tmp->refs;
6349 list_splice_init(&tmp->backrefs, &good->backrefs);
6350 remove_cache_extent(extent_cache, &tmp->cache);
6353 ret = insert_cache_extent(extent_cache, &good->cache);
6356 return good->num_duplicates ? 0 : 1;
6359 static int delete_duplicate_records(struct btrfs_trans_handle *trans,
6360 struct btrfs_root *root,
6361 struct extent_record *rec)
6363 LIST_HEAD(delete_list);
6364 struct btrfs_path *path;
6365 struct extent_record *tmp, *good, *n;
6368 struct btrfs_key key;
6370 path = btrfs_alloc_path();
6377 /* Find the record that covers all of the duplicates. */
6378 list_for_each_entry(tmp, &rec->dups, list) {
6379 if (good->start < tmp->start)
6381 if (good->nr > tmp->nr)
6384 if (tmp->start + tmp->nr < good->start + good->nr) {
6385 fprintf(stderr, "Ok we have overlapping extents that "
6386 "aren't completely covered by eachother, this "
6387 "is going to require more careful thought. "
6388 "The extents are [%Lu-%Lu] and [%Lu-%Lu]\n",
6389 tmp->start, tmp->nr, good->start, good->nr);
6396 list_add_tail(&rec->list, &delete_list);
6398 list_for_each_entry_safe(tmp, n, &rec->dups, list) {
6401 list_move_tail(&tmp->list, &delete_list);
6404 root = root->fs_info->extent_root;
6405 list_for_each_entry(tmp, &delete_list, list) {
6406 if (tmp->found_rec == 0)
6408 key.objectid = tmp->start;
6409 key.type = BTRFS_EXTENT_ITEM_KEY;
6410 key.offset = tmp->nr;
6412 /* Shouldn't happen but just in case */
6413 if (tmp->metadata) {
6414 fprintf(stderr, "Well this shouldn't happen, extent "
6415 "record overlaps but is metadata? "
6416 "[%Lu, %Lu]\n", tmp->start, tmp->nr);
6420 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
6426 ret = btrfs_del_item(trans, root, path);
6429 btrfs_release_path(path);
6434 while (!list_empty(&delete_list)) {
6435 tmp = list_entry(delete_list.next, struct extent_record, list);
6436 list_del_init(&tmp->list);
6442 while (!list_empty(&rec->dups)) {
6443 tmp = list_entry(rec->dups.next, struct extent_record, list);
6444 list_del_init(&tmp->list);
6448 btrfs_free_path(path);
6450 if (!ret && !nr_del)
6451 rec->num_duplicates = 0;
6453 return ret ? ret : nr_del;
6456 static int find_possible_backrefs(struct btrfs_trans_handle *trans,
6457 struct btrfs_fs_info *info,
6458 struct btrfs_path *path,
6459 struct cache_tree *extent_cache,
6460 struct extent_record *rec)
6462 struct btrfs_root *root;
6463 struct extent_backref *back;
6464 struct data_backref *dback;
6465 struct cache_extent *cache;
6466 struct btrfs_file_extent_item *fi;
6467 struct btrfs_key key;
6471 list_for_each_entry(back, &rec->backrefs, list) {
6472 /* Don't care about full backrefs (poor unloved backrefs) */
6473 if (back->full_backref || !back->is_data)
6476 dback = (struct data_backref *)back;
6478 /* We found this one, we don't need to do a lookup */
6479 if (dback->found_ref)
6482 key.objectid = dback->root;
6483 key.type = BTRFS_ROOT_ITEM_KEY;
6484 key.offset = (u64)-1;
6486 root = btrfs_read_fs_root(info, &key);
6488 /* No root, definitely a bad ref, skip */
6489 if (IS_ERR(root) && PTR_ERR(root) == -ENOENT)
6491 /* Other err, exit */
6493 return PTR_ERR(root);
6495 key.objectid = dback->owner;
6496 key.type = BTRFS_EXTENT_DATA_KEY;
6497 key.offset = dback->offset;
6498 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6500 btrfs_release_path(path);
6503 /* Didn't find it, we can carry on */
6508 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6509 struct btrfs_file_extent_item);
6510 bytenr = btrfs_file_extent_disk_bytenr(path->nodes[0], fi);
6511 bytes = btrfs_file_extent_disk_num_bytes(path->nodes[0], fi);
6512 btrfs_release_path(path);
6513 cache = lookup_cache_extent(extent_cache, bytenr, 1);
6515 struct extent_record *tmp;
6516 tmp = container_of(cache, struct extent_record, cache);
6519 * If we found an extent record for the bytenr for this
6520 * particular backref then we can't add it to our
6521 * current extent record. We only want to add backrefs
6522 * that don't have a corresponding extent item in the
6523 * extent tree since they likely belong to this record
6524 * and we need to fix it if it doesn't match bytenrs.
6530 dback->found_ref += 1;
6531 dback->disk_bytenr = bytenr;
6532 dback->bytes = bytes;
6535 * Set this so the verify backref code knows not to trust the
6536 * values in this backref.
6545 * when an incorrect extent item is found, this will delete
6546 * all of the existing entries for it and recreate them
6547 * based on what the tree scan found.
6549 static int fixup_extent_refs(struct btrfs_trans_handle *trans,
6550 struct btrfs_fs_info *info,
6551 struct cache_tree *extent_cache,
6552 struct extent_record *rec)
6555 struct btrfs_path *path;
6556 struct list_head *cur = rec->backrefs.next;
6557 struct cache_extent *cache;
6558 struct extent_backref *back;
6563 * remember our flags for recreating the extent.
6564 * FIXME, if we have cleared extent tree, we can not
6565 * lookup extent info in extent tree.
6567 if (!init_extent_tree) {
6568 ret = btrfs_lookup_extent_info(NULL, info->extent_root,
6569 rec->start, rec->max_size,
6570 rec->metadata, NULL, &flags);
6574 if (rec->flag_block_full_backref)
6575 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6578 path = btrfs_alloc_path();
6582 if (rec->refs != rec->extent_item_refs && !rec->metadata) {
6584 * Sometimes the backrefs themselves are so broken they don't
6585 * get attached to any meaningful rec, so first go back and
6586 * check any of our backrefs that we couldn't find and throw
6587 * them into the list if we find the backref so that
6588 * verify_backrefs can figure out what to do.
6590 ret = find_possible_backrefs(trans, info, path, extent_cache,
6596 /* step one, make sure all of the backrefs agree */
6597 ret = verify_backrefs(trans, info, path, rec);
6601 /* step two, delete all the existing records */
6602 ret = delete_extent_records(trans, info->extent_root, path,
6603 rec->start, rec->max_size);
6608 /* was this block corrupt? If so, don't add references to it */
6609 cache = lookup_cache_extent(info->corrupt_blocks,
6610 rec->start, rec->max_size);
6616 /* step three, recreate all the refs we did find */
6617 while(cur != &rec->backrefs) {
6618 back = list_entry(cur, struct extent_backref, list);
6622 * if we didn't find any references, don't create a
6625 if (!back->found_ref)
6628 ret = record_extent(trans, info, path, rec, back, allocated, flags);
6635 btrfs_free_path(path);
6639 /* right now we only prune from the extent allocation tree */
6640 static int prune_one_block(struct btrfs_trans_handle *trans,
6641 struct btrfs_fs_info *info,
6642 struct btrfs_corrupt_block *corrupt)
6645 struct btrfs_path path;
6646 struct extent_buffer *eb;
6650 int level = corrupt->level + 1;
6652 btrfs_init_path(&path);
6654 /* we want to stop at the parent to our busted block */
6655 path.lowest_level = level;
6657 ret = btrfs_search_slot(trans, info->extent_root,
6658 &corrupt->key, &path, -1, 1);
6663 eb = path.nodes[level];
6670 * hopefully the search gave us the block we want to prune,
6671 * lets try that first
6673 slot = path.slots[level];
6674 found = btrfs_node_blockptr(eb, slot);
6675 if (found == corrupt->cache.start)
6678 nritems = btrfs_header_nritems(eb);
6680 /* the search failed, lets scan this node and hope we find it */
6681 for (slot = 0; slot < nritems; slot++) {
6682 found = btrfs_node_blockptr(eb, slot);
6683 if (found == corrupt->cache.start)
6687 * we couldn't find the bad block. TODO, search all the nodes for pointers
6690 if (eb == info->extent_root->node) {
6695 btrfs_release_path(&path);
6700 printk("deleting pointer to block %Lu\n", corrupt->cache.start);
6701 ret = btrfs_del_ptr(trans, info->extent_root, &path, level, slot);
6704 btrfs_release_path(&path);
6708 static int prune_corrupt_blocks(struct btrfs_trans_handle *trans,
6709 struct btrfs_fs_info *info)
6711 struct cache_extent *cache;
6712 struct btrfs_corrupt_block *corrupt;
6714 cache = search_cache_extent(info->corrupt_blocks, 0);
6718 corrupt = container_of(cache, struct btrfs_corrupt_block, cache);
6719 prune_one_block(trans, info, corrupt);
6720 cache = next_cache_extent(cache);
6725 static void reset_cached_block_groups(struct btrfs_fs_info *fs_info)
6727 struct btrfs_block_group_cache *cache;
6732 ret = find_first_extent_bit(&fs_info->free_space_cache, 0,
6733 &start, &end, EXTENT_DIRTY);
6736 clear_extent_dirty(&fs_info->free_space_cache, start, end,
6742 cache = btrfs_lookup_first_block_group(fs_info, start);
6747 start = cache->key.objectid + cache->key.offset;
6751 static int check_extent_refs(struct btrfs_trans_handle *trans,
6752 struct btrfs_root *root,
6753 struct cache_tree *extent_cache)
6755 struct extent_record *rec;
6756 struct cache_extent *cache;
6764 * if we're doing a repair, we have to make sure
6765 * we don't allocate from the problem extents.
6766 * In the worst case, this will be all the
6769 cache = search_cache_extent(extent_cache, 0);
6771 rec = container_of(cache, struct extent_record, cache);
6772 btrfs_pin_extent(root->fs_info,
6773 rec->start, rec->max_size);
6774 cache = next_cache_extent(cache);
6777 /* pin down all the corrupted blocks too */
6778 cache = search_cache_extent(root->fs_info->corrupt_blocks, 0);
6780 btrfs_pin_extent(root->fs_info,
6781 cache->start, cache->size);
6782 cache = next_cache_extent(cache);
6784 prune_corrupt_blocks(trans, root->fs_info);
6785 reset_cached_block_groups(root->fs_info);
6789 * We need to delete any duplicate entries we find first otherwise we
6790 * could mess up the extent tree when we have backrefs that actually
6791 * belong to a different extent item and not the weird duplicate one.
6793 while (repair && !list_empty(&duplicate_extents)) {
6794 rec = list_entry(duplicate_extents.next, struct extent_record,
6796 list_del_init(&rec->list);
6798 /* Sometimes we can find a backref before we find an actual
6799 * extent, so we need to process it a little bit to see if there
6800 * truly are multiple EXTENT_ITEM_KEY's for the same range, or
6801 * if this is a backref screwup. If we need to delete stuff
6802 * process_duplicates() will return 0, otherwise it will return
6805 if (process_duplicates(root, extent_cache, rec))
6807 ret = delete_duplicate_records(trans, root, rec);
6811 * delete_duplicate_records will return the number of entries
6812 * deleted, so if it's greater than 0 then we know we actually
6813 * did something and we need to remove.
6824 cache = search_cache_extent(extent_cache, 0);
6827 rec = container_of(cache, struct extent_record, cache);
6828 if (rec->num_duplicates) {
6829 fprintf(stderr, "extent item %llu has multiple extent "
6830 "items\n", (unsigned long long)rec->start);
6834 if (rec->refs != rec->extent_item_refs) {
6835 fprintf(stderr, "ref mismatch on [%llu %llu] ",
6836 (unsigned long long)rec->start,
6837 (unsigned long long)rec->nr);
6838 fprintf(stderr, "extent item %llu, found %llu\n",
6839 (unsigned long long)rec->extent_item_refs,
6840 (unsigned long long)rec->refs);
6841 if (!fixed && repair) {
6842 ret = fixup_extent_refs(trans, root->fs_info,
6851 if (all_backpointers_checked(rec, 1)) {
6852 fprintf(stderr, "backpointer mismatch on [%llu %llu]\n",
6853 (unsigned long long)rec->start,
6854 (unsigned long long)rec->nr);
6856 if (!fixed && repair) {
6857 ret = fixup_extent_refs(trans, root->fs_info,
6866 if (!rec->owner_ref_checked) {
6867 fprintf(stderr, "owner ref check failed [%llu %llu]\n",
6868 (unsigned long long)rec->start,
6869 (unsigned long long)rec->nr);
6870 if (!fixed && repair) {
6871 ret = fixup_extent_refs(trans, root->fs_info,
6880 remove_cache_extent(extent_cache, cache);
6881 free_all_extent_backrefs(rec);
6886 if (ret && ret != -EAGAIN) {
6887 fprintf(stderr, "failed to repair damaged filesystem, aborting\n");
6890 btrfs_fix_block_accounting(trans, root);
6893 fprintf(stderr, "repaired damaged extent references\n");
6899 u64 calc_stripe_length(u64 type, u64 length, int num_stripes)
6903 if (type & BTRFS_BLOCK_GROUP_RAID0) {
6904 stripe_size = length;
6905 stripe_size /= num_stripes;
6906 } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
6907 stripe_size = length * 2;
6908 stripe_size /= num_stripes;
6909 } else if (type & BTRFS_BLOCK_GROUP_RAID5) {
6910 stripe_size = length;
6911 stripe_size /= (num_stripes - 1);
6912 } else if (type & BTRFS_BLOCK_GROUP_RAID6) {
6913 stripe_size = length;
6914 stripe_size /= (num_stripes - 2);
6916 stripe_size = length;
6922 * Check the chunk with its block group/dev list ref:
6923 * Return 0 if all refs seems valid.
6924 * Return 1 if part of refs seems valid, need later check for rebuild ref
6925 * like missing block group and needs to search extent tree to rebuild them.
6926 * Return -1 if essential refs are missing and unable to rebuild.
6928 static int check_chunk_refs(struct chunk_record *chunk_rec,
6929 struct block_group_tree *block_group_cache,
6930 struct device_extent_tree *dev_extent_cache,
6933 struct cache_extent *block_group_item;
6934 struct block_group_record *block_group_rec;
6935 struct cache_extent *dev_extent_item;
6936 struct device_extent_record *dev_extent_rec;
6943 block_group_item = lookup_cache_extent(&block_group_cache->tree,
6946 if (block_group_item) {
6947 block_group_rec = container_of(block_group_item,
6948 struct block_group_record,
6950 if (chunk_rec->length != block_group_rec->offset ||
6951 chunk_rec->offset != block_group_rec->objectid ||
6952 chunk_rec->type_flags != block_group_rec->flags) {
6955 "Chunk[%llu, %u, %llu]: length(%llu), offset(%llu), type(%llu) mismatch with block group[%llu, %u, %llu]: offset(%llu), objectid(%llu), flags(%llu)\n",
6956 chunk_rec->objectid,
6961 chunk_rec->type_flags,
6962 block_group_rec->objectid,
6963 block_group_rec->type,
6964 block_group_rec->offset,
6965 block_group_rec->offset,
6966 block_group_rec->objectid,
6967 block_group_rec->flags);
6970 list_del_init(&block_group_rec->list);
6971 chunk_rec->bg_rec = block_group_rec;
6976 "Chunk[%llu, %u, %llu]: length(%llu), offset(%llu), type(%llu) is not found in block group\n",
6977 chunk_rec->objectid,
6982 chunk_rec->type_flags);
6986 length = calc_stripe_length(chunk_rec->type_flags, chunk_rec->length,
6987 chunk_rec->num_stripes);
6988 for (i = 0; i < chunk_rec->num_stripes; ++i) {
6989 devid = chunk_rec->stripes[i].devid;
6990 offset = chunk_rec->stripes[i].offset;
6991 dev_extent_item = lookup_cache_extent2(&dev_extent_cache->tree,
6992 devid, offset, length);
6993 if (dev_extent_item) {
6994 dev_extent_rec = container_of(dev_extent_item,
6995 struct device_extent_record,
6997 if (dev_extent_rec->objectid != devid ||
6998 dev_extent_rec->offset != offset ||
6999 dev_extent_rec->chunk_offset != chunk_rec->offset ||
7000 dev_extent_rec->length != length) {
7003 "Chunk[%llu, %u, %llu] stripe[%llu, %llu] dismatch dev extent[%llu, %llu, %llu]\n",
7004 chunk_rec->objectid,
7007 chunk_rec->stripes[i].devid,
7008 chunk_rec->stripes[i].offset,
7009 dev_extent_rec->objectid,
7010 dev_extent_rec->offset,
7011 dev_extent_rec->length);
7014 list_move(&dev_extent_rec->chunk_list,
7015 &chunk_rec->dextents);
7020 "Chunk[%llu, %u, %llu] stripe[%llu, %llu] is not found in dev extent\n",
7021 chunk_rec->objectid,
7024 chunk_rec->stripes[i].devid,
7025 chunk_rec->stripes[i].offset);
7032 /* check btrfs_chunk -> btrfs_dev_extent / btrfs_block_group_item */
7033 int check_chunks(struct cache_tree *chunk_cache,
7034 struct block_group_tree *block_group_cache,
7035 struct device_extent_tree *dev_extent_cache,
7036 struct list_head *good, struct list_head *bad,
7037 struct list_head *rebuild, int silent)
7039 struct cache_extent *chunk_item;
7040 struct chunk_record *chunk_rec;
7041 struct block_group_record *bg_rec;
7042 struct device_extent_record *dext_rec;
7046 chunk_item = first_cache_extent(chunk_cache);
7047 while (chunk_item) {
7048 chunk_rec = container_of(chunk_item, struct chunk_record,
7050 err = check_chunk_refs(chunk_rec, block_group_cache,
7051 dev_extent_cache, silent);
7054 if (err == 0 && good)
7055 list_add_tail(&chunk_rec->list, good);
7056 if (err > 0 && rebuild)
7057 list_add_tail(&chunk_rec->list, rebuild);
7059 list_add_tail(&chunk_rec->list, bad);
7060 chunk_item = next_cache_extent(chunk_item);
7063 list_for_each_entry(bg_rec, &block_group_cache->block_groups, list) {
7066 "Block group[%llu, %llu] (flags = %llu) didn't find the relative chunk.\n",
7074 list_for_each_entry(dext_rec, &dev_extent_cache->no_chunk_orphans,
7078 "Device extent[%llu, %llu, %llu] didn't find the relative chunk.\n",
7089 static int check_device_used(struct device_record *dev_rec,
7090 struct device_extent_tree *dext_cache)
7092 struct cache_extent *cache;
7093 struct device_extent_record *dev_extent_rec;
7096 cache = search_cache_extent2(&dext_cache->tree, dev_rec->devid, 0);
7098 dev_extent_rec = container_of(cache,
7099 struct device_extent_record,
7101 if (dev_extent_rec->objectid != dev_rec->devid)
7104 list_del_init(&dev_extent_rec->device_list);
7105 total_byte += dev_extent_rec->length;
7106 cache = next_cache_extent(cache);
7109 if (total_byte != dev_rec->byte_used) {
7111 "Dev extent's total-byte(%llu) is not equal to byte-used(%llu) in dev[%llu, %u, %llu]\n",
7112 total_byte, dev_rec->byte_used, dev_rec->objectid,
7113 dev_rec->type, dev_rec->offset);
7120 /* check btrfs_dev_item -> btrfs_dev_extent */
7121 static int check_devices(struct rb_root *dev_cache,
7122 struct device_extent_tree *dev_extent_cache)
7124 struct rb_node *dev_node;
7125 struct device_record *dev_rec;
7126 struct device_extent_record *dext_rec;
7130 dev_node = rb_first(dev_cache);
7132 dev_rec = container_of(dev_node, struct device_record, node);
7133 err = check_device_used(dev_rec, dev_extent_cache);
7137 dev_node = rb_next(dev_node);
7139 list_for_each_entry(dext_rec, &dev_extent_cache->no_device_orphans,
7142 "Device extent[%llu, %llu, %llu] didn't find its device.\n",
7143 dext_rec->objectid, dext_rec->offset, dext_rec->length);
7150 static int add_root_item_to_list(struct list_head *head,
7151 u64 objectid, u64 bytenr,
7152 u8 level, u8 drop_level,
7153 int level_size, struct btrfs_key *drop_key)
7156 struct root_item_record *ri_rec;
7157 ri_rec = malloc(sizeof(*ri_rec));
7160 ri_rec->bytenr = bytenr;
7161 ri_rec->objectid = objectid;
7162 ri_rec->level = level;
7163 ri_rec->level_size = level_size;
7164 ri_rec->drop_level = drop_level;
7166 memcpy(&ri_rec->drop_key, drop_key, sizeof(*drop_key));
7167 list_add_tail(&ri_rec->list, head);
7172 static int deal_root_from_list(struct list_head *list,
7173 struct btrfs_trans_handle *trans,
7174 struct btrfs_root *root,
7175 struct block_info *bits,
7177 struct cache_tree *pending,
7178 struct cache_tree *seen,
7179 struct cache_tree *reada,
7180 struct cache_tree *nodes,
7181 struct cache_tree *extent_cache,
7182 struct cache_tree *chunk_cache,
7183 struct rb_root *dev_cache,
7184 struct block_group_tree *block_group_cache,
7185 struct device_extent_tree *dev_extent_cache)
7190 while (!list_empty(list)) {
7191 struct root_item_record *rec;
7192 struct extent_buffer *buf;
7193 rec = list_entry(list->next,
7194 struct root_item_record, list);
7196 buf = read_tree_block(root->fs_info->tree_root,
7197 rec->bytenr, rec->level_size, 0);
7198 if (!extent_buffer_uptodate(buf)) {
7199 free_extent_buffer(buf);
7203 add_root_to_pending(buf, extent_cache, pending,
7204 seen, nodes, rec->objectid);
7206 * To rebuild extent tree, we need deal with snapshot
7207 * one by one, otherwise we deal with node firstly which
7208 * can maximize readahead.
7210 if (!init_extent_tree && !rec->drop_level)
7213 ret = run_next_block(trans, root, bits, bits_nr, &last,
7214 pending, seen, reada,
7215 nodes, extent_cache,
7216 chunk_cache, dev_cache,
7218 dev_extent_cache, rec);
7223 free_extent_buffer(buf);
7224 list_del(&rec->list);
7228 ret = run_next_block(trans, root, bits, bits_nr, &last,
7229 pending, seen, reada,
7230 nodes, extent_cache,
7231 chunk_cache, dev_cache,
7233 dev_extent_cache, NULL);
7243 static int check_chunks_and_extents(struct btrfs_root *root)
7245 struct rb_root dev_cache;
7246 struct cache_tree chunk_cache;
7247 struct block_group_tree block_group_cache;
7248 struct device_extent_tree dev_extent_cache;
7249 struct cache_tree extent_cache;
7250 struct cache_tree seen;
7251 struct cache_tree pending;
7252 struct cache_tree reada;
7253 struct cache_tree nodes;
7254 struct cache_tree corrupt_blocks;
7255 struct btrfs_path path;
7256 struct btrfs_key key;
7257 struct btrfs_key found_key;
7259 struct block_info *bits;
7261 struct extent_buffer *leaf;
7262 struct btrfs_trans_handle *trans = NULL;
7264 struct btrfs_root_item ri;
7265 struct list_head dropping_trees;
7266 struct list_head normal_trees;
7267 struct btrfs_root *root1;
7272 dev_cache = RB_ROOT;
7273 cache_tree_init(&chunk_cache);
7274 block_group_tree_init(&block_group_cache);
7275 device_extent_tree_init(&dev_extent_cache);
7277 cache_tree_init(&extent_cache);
7278 cache_tree_init(&seen);
7279 cache_tree_init(&pending);
7280 cache_tree_init(&nodes);
7281 cache_tree_init(&reada);
7282 cache_tree_init(&corrupt_blocks);
7283 INIT_LIST_HEAD(&dropping_trees);
7284 INIT_LIST_HEAD(&normal_trees);
7287 trans = btrfs_start_transaction(root, 1);
7288 if (IS_ERR(trans)) {
7289 fprintf(stderr, "Error starting transaction\n");
7290 return PTR_ERR(trans);
7292 root->fs_info->fsck_extent_cache = &extent_cache;
7293 root->fs_info->free_extent_hook = free_extent_hook;
7294 root->fs_info->corrupt_blocks = &corrupt_blocks;
7298 bits = malloc(bits_nr * sizeof(struct block_info));
7305 root1 = root->fs_info->tree_root;
7306 level = btrfs_header_level(root1->node);
7307 ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
7308 root1->node->start, level, 0,
7309 btrfs_level_size(root1, level), NULL);
7312 root1 = root->fs_info->chunk_root;
7313 level = btrfs_header_level(root1->node);
7314 ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
7315 root1->node->start, level, 0,
7316 btrfs_level_size(root1, level), NULL);
7319 btrfs_init_path(&path);
7322 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
7323 ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
7328 leaf = path.nodes[0];
7329 slot = path.slots[0];
7330 if (slot >= btrfs_header_nritems(path.nodes[0])) {
7331 ret = btrfs_next_leaf(root, &path);
7334 leaf = path.nodes[0];
7335 slot = path.slots[0];
7337 btrfs_item_key_to_cpu(leaf, &found_key, path.slots[0]);
7338 if (btrfs_key_type(&found_key) == BTRFS_ROOT_ITEM_KEY) {
7339 unsigned long offset;
7341 offset = btrfs_item_ptr_offset(leaf, path.slots[0]);
7342 read_extent_buffer(leaf, &ri, offset, sizeof(ri));
7343 if (btrfs_disk_key_objectid(&ri.drop_progress) == 0) {
7344 level = btrfs_root_level(&ri);
7345 level_size = btrfs_level_size(root, level);
7346 ret = add_root_item_to_list(&normal_trees,
7348 btrfs_root_bytenr(&ri), level,
7349 0, level_size, NULL);
7353 level = btrfs_root_level(&ri);
7354 level_size = btrfs_level_size(root, level);
7355 objectid = found_key.objectid;
7356 btrfs_disk_key_to_cpu(&found_key,
7358 ret = add_root_item_to_list(&dropping_trees,
7360 btrfs_root_bytenr(&ri),
7361 level, ri.drop_level,
7362 level_size, &found_key);
7369 btrfs_release_path(&path);
7370 ret = deal_root_from_list(&normal_trees, trans, root,
7371 bits, bits_nr, &pending, &seen,
7372 &reada, &nodes, &extent_cache,
7373 &chunk_cache, &dev_cache, &block_group_cache,
7377 ret = deal_root_from_list(&dropping_trees, trans, root,
7378 bits, bits_nr, &pending, &seen,
7379 &reada, &nodes, &extent_cache,
7380 &chunk_cache, &dev_cache, &block_group_cache,
7385 ret = check_extent_refs(trans, root, &extent_cache);
7386 if (ret == -EAGAIN) {
7387 ret = btrfs_commit_transaction(trans, root);
7391 trans = btrfs_start_transaction(root, 1);
7392 if (IS_ERR(trans)) {
7393 ret = PTR_ERR(trans);
7397 free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
7398 free_extent_cache_tree(&seen);
7399 free_extent_cache_tree(&pending);
7400 free_extent_cache_tree(&reada);
7401 free_extent_cache_tree(&nodes);
7402 free_chunk_cache_tree(&chunk_cache);
7403 free_block_group_tree(&block_group_cache);
7404 free_device_cache_tree(&dev_cache);
7405 free_device_extent_tree(&dev_extent_cache);
7406 free_extent_record_cache(root->fs_info, &extent_cache);
7410 err = check_chunks(&chunk_cache, &block_group_cache,
7411 &dev_extent_cache, NULL, NULL, NULL, 0);
7415 err = check_devices(&dev_cache, &dev_extent_cache);
7421 err = btrfs_commit_transaction(trans, root);
7426 free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
7427 root->fs_info->fsck_extent_cache = NULL;
7428 root->fs_info->free_extent_hook = NULL;
7429 root->fs_info->corrupt_blocks = NULL;
7432 free_chunk_cache_tree(&chunk_cache);
7433 free_device_cache_tree(&dev_cache);
7434 free_block_group_tree(&block_group_cache);
7435 free_device_extent_tree(&dev_extent_cache);
7436 free_extent_cache_tree(&seen);
7437 free_extent_cache_tree(&pending);
7438 free_extent_cache_tree(&reada);
7439 free_extent_cache_tree(&nodes);
7443 static int btrfs_fsck_reinit_root(struct btrfs_trans_handle *trans,
7444 struct btrfs_root *root, int overwrite)
7446 struct extent_buffer *c;
7447 struct extent_buffer *old = root->node;
7450 struct btrfs_disk_key disk_key = {0,0,0};
7456 extent_buffer_get(c);
7459 c = btrfs_alloc_free_block(trans, root,
7460 btrfs_level_size(root, 0),
7461 root->root_key.objectid,
7462 &disk_key, level, 0, 0);
7465 extent_buffer_get(c);
7469 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
7470 btrfs_set_header_level(c, level);
7471 btrfs_set_header_bytenr(c, c->start);
7472 btrfs_set_header_generation(c, trans->transid);
7473 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
7474 btrfs_set_header_owner(c, root->root_key.objectid);
7476 write_extent_buffer(c, root->fs_info->fsid,
7477 btrfs_header_fsid(), BTRFS_FSID_SIZE);
7479 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
7480 btrfs_header_chunk_tree_uuid(c),
7483 btrfs_mark_buffer_dirty(c);
7485 * this case can happen in the following case:
7487 * 1.overwrite previous root.
7489 * 2.reinit reloc data root, this is because we skip pin
7490 * down reloc data tree before which means we can allocate
7491 * same block bytenr here.
7493 if (old->start == c->start) {
7494 btrfs_set_root_generation(&root->root_item,
7496 root->root_item.level = btrfs_header_level(root->node);
7497 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7498 &root->root_key, &root->root_item);
7500 free_extent_buffer(c);
7504 free_extent_buffer(old);
7506 add_root_to_dirty_list(root);
7510 static int pin_down_tree_blocks(struct btrfs_fs_info *fs_info,
7511 struct extent_buffer *eb, int tree_root)
7513 struct extent_buffer *tmp;
7514 struct btrfs_root_item *ri;
7515 struct btrfs_key key;
7518 int level = btrfs_header_level(eb);
7524 * If we have pinned this block before, don't pin it again.
7525 * This can not only avoid forever loop with broken filesystem
7526 * but also give us some speedups.
7528 if (test_range_bit(&fs_info->pinned_extents, eb->start,
7529 eb->start + eb->len - 1, EXTENT_DIRTY, 0))
7532 btrfs_pin_extent(fs_info, eb->start, eb->len);
7534 leafsize = btrfs_super_leafsize(fs_info->super_copy);
7535 nritems = btrfs_header_nritems(eb);
7536 for (i = 0; i < nritems; i++) {
7538 btrfs_item_key_to_cpu(eb, &key, i);
7539 if (key.type != BTRFS_ROOT_ITEM_KEY)
7541 /* Skip the extent root and reloc roots */
7542 if (key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
7543 key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
7544 key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
7546 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
7547 bytenr = btrfs_disk_root_bytenr(eb, ri);
7550 * If at any point we start needing the real root we
7551 * will have to build a stump root for the root we are
7552 * in, but for now this doesn't actually use the root so
7553 * just pass in extent_root.
7555 tmp = read_tree_block(fs_info->extent_root, bytenr,
7558 fprintf(stderr, "Error reading root block\n");
7561 ret = pin_down_tree_blocks(fs_info, tmp, 0);
7562 free_extent_buffer(tmp);
7566 bytenr = btrfs_node_blockptr(eb, i);
7568 /* If we aren't the tree root don't read the block */
7569 if (level == 1 && !tree_root) {
7570 btrfs_pin_extent(fs_info, bytenr, leafsize);
7574 tmp = read_tree_block(fs_info->extent_root, bytenr,
7577 fprintf(stderr, "Error reading tree block\n");
7580 ret = pin_down_tree_blocks(fs_info, tmp, tree_root);
7581 free_extent_buffer(tmp);
7590 static int pin_metadata_blocks(struct btrfs_fs_info *fs_info)
7594 ret = pin_down_tree_blocks(fs_info, fs_info->chunk_root->node, 0);
7598 return pin_down_tree_blocks(fs_info, fs_info->tree_root->node, 1);
7601 static int reset_block_groups(struct btrfs_fs_info *fs_info)
7603 struct btrfs_block_group_cache *cache;
7604 struct btrfs_path *path;
7605 struct extent_buffer *leaf;
7606 struct btrfs_chunk *chunk;
7607 struct btrfs_key key;
7611 path = btrfs_alloc_path();
7616 key.type = BTRFS_CHUNK_ITEM_KEY;
7619 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
7621 btrfs_free_path(path);
7626 * We do this in case the block groups were screwed up and had alloc
7627 * bits that aren't actually set on the chunks. This happens with
7628 * restored images every time and could happen in real life I guess.
7630 fs_info->avail_data_alloc_bits = 0;
7631 fs_info->avail_metadata_alloc_bits = 0;
7632 fs_info->avail_system_alloc_bits = 0;
7634 /* First we need to create the in-memory block groups */
7636 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7637 ret = btrfs_next_leaf(fs_info->chunk_root, path);
7639 btrfs_free_path(path);
7647 leaf = path->nodes[0];
7648 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7649 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7654 chunk = btrfs_item_ptr(leaf, path->slots[0],
7655 struct btrfs_chunk);
7656 btrfs_add_block_group(fs_info, 0,
7657 btrfs_chunk_type(leaf, chunk),
7658 key.objectid, key.offset,
7659 btrfs_chunk_length(leaf, chunk));
7660 set_extent_dirty(&fs_info->free_space_cache, key.offset,
7661 key.offset + btrfs_chunk_length(leaf, chunk),
7667 cache = btrfs_lookup_first_block_group(fs_info, start);
7671 start = cache->key.objectid + cache->key.offset;
7674 btrfs_free_path(path);
7678 static int reset_balance(struct btrfs_trans_handle *trans,
7679 struct btrfs_fs_info *fs_info)
7681 struct btrfs_root *root = fs_info->tree_root;
7682 struct btrfs_path *path;
7683 struct extent_buffer *leaf;
7684 struct btrfs_key key;
7685 int del_slot, del_nr = 0;
7689 path = btrfs_alloc_path();
7693 key.objectid = BTRFS_BALANCE_OBJECTID;
7694 key.type = BTRFS_BALANCE_ITEM_KEY;
7697 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7702 goto reinit_data_reloc;
7707 ret = btrfs_del_item(trans, root, path);
7710 btrfs_release_path(path);
7712 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7713 key.type = BTRFS_ROOT_ITEM_KEY;
7716 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7720 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7725 ret = btrfs_del_items(trans, root, path,
7732 btrfs_release_path(path);
7735 ret = btrfs_search_slot(trans, root, &key, path,
7742 leaf = path->nodes[0];
7743 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7744 if (key.objectid > BTRFS_TREE_RELOC_OBJECTID)
7746 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7751 del_slot = path->slots[0];
7760 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
7764 btrfs_release_path(path);
7767 key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7768 key.type = BTRFS_ROOT_ITEM_KEY;
7769 key.offset = (u64)-1;
7770 root = btrfs_read_fs_root(fs_info, &key);
7772 fprintf(stderr, "Error reading data reloc tree\n");
7773 ret = PTR_ERR(root);
7776 record_root_in_trans(trans, root);
7777 ret = btrfs_fsck_reinit_root(trans, root, 0);
7780 ret = btrfs_make_root_dir(trans, root, BTRFS_FIRST_FREE_OBJECTID);
7782 btrfs_free_path(path);
7786 static int reinit_extent_tree(struct btrfs_trans_handle *trans,
7787 struct btrfs_fs_info *fs_info)
7793 * The only reason we don't do this is because right now we're just
7794 * walking the trees we find and pinning down their bytes, we don't look
7795 * at any of the leaves. In order to do mixed groups we'd have to check
7796 * the leaves of any fs roots and pin down the bytes for any file
7797 * extents we find. Not hard but why do it if we don't have to?
7799 if (btrfs_fs_incompat(fs_info, BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)) {
7800 fprintf(stderr, "We don't support re-initing the extent tree "
7801 "for mixed block groups yet, please notify a btrfs "
7802 "developer you want to do this so they can add this "
7803 "functionality.\n");
7808 * first we need to walk all of the trees except the extent tree and pin
7809 * down the bytes that are in use so we don't overwrite any existing
7812 ret = pin_metadata_blocks(fs_info);
7814 fprintf(stderr, "error pinning down used bytes\n");
7819 * Need to drop all the block groups since we're going to recreate all
7822 btrfs_free_block_groups(fs_info);
7823 ret = reset_block_groups(fs_info);
7825 fprintf(stderr, "error resetting the block groups\n");
7829 /* Ok we can allocate now, reinit the extent root */
7830 ret = btrfs_fsck_reinit_root(trans, fs_info->extent_root, 0);
7832 fprintf(stderr, "extent root initialization failed\n");
7834 * When the transaction code is updated we should end the
7835 * transaction, but for now progs only knows about commit so
7836 * just return an error.
7842 * Now we have all the in-memory block groups setup so we can make
7843 * allocations properly, and the metadata we care about is safe since we
7844 * pinned all of it above.
7847 struct btrfs_block_group_cache *cache;
7849 cache = btrfs_lookup_first_block_group(fs_info, start);
7852 start = cache->key.objectid + cache->key.offset;
7853 ret = btrfs_insert_item(trans, fs_info->extent_root,
7854 &cache->key, &cache->item,
7855 sizeof(cache->item));
7857 fprintf(stderr, "Error adding block group\n");
7860 btrfs_extent_post_op(trans, fs_info->extent_root);
7863 ret = reset_balance(trans, fs_info);
7865 fprintf(stderr, "error reseting the pending balance\n");
7870 static int recow_extent_buffer(struct btrfs_root *root, struct extent_buffer *eb)
7872 struct btrfs_path *path;
7873 struct btrfs_trans_handle *trans;
7874 struct btrfs_key key;
7877 printf("Recowing metadata block %llu\n", eb->start);
7878 key.objectid = btrfs_header_owner(eb);
7879 key.type = BTRFS_ROOT_ITEM_KEY;
7880 key.offset = (u64)-1;
7882 root = btrfs_read_fs_root(root->fs_info, &key);
7884 fprintf(stderr, "Couldn't find owner root %llu\n",
7886 return PTR_ERR(root);
7889 path = btrfs_alloc_path();
7893 trans = btrfs_start_transaction(root, 1);
7894 if (IS_ERR(trans)) {
7895 btrfs_free_path(path);
7896 return PTR_ERR(trans);
7899 path->lowest_level = btrfs_header_level(eb);
7900 if (path->lowest_level)
7901 btrfs_node_key_to_cpu(eb, &key, 0);
7903 btrfs_item_key_to_cpu(eb, &key, 0);
7905 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
7906 btrfs_commit_transaction(trans, root);
7907 btrfs_free_path(path);
7911 static int delete_bad_item(struct btrfs_root *root, struct bad_item *bad)
7913 struct btrfs_path *path;
7914 struct btrfs_trans_handle *trans;
7915 struct btrfs_key key;
7918 printf("Deleting bad item [%llu,%u,%llu]\n", bad->key.objectid,
7919 bad->key.type, bad->key.offset);
7920 key.objectid = bad->root_id;
7921 key.type = BTRFS_ROOT_ITEM_KEY;
7922 key.offset = (u64)-1;
7924 root = btrfs_read_fs_root(root->fs_info, &key);
7926 fprintf(stderr, "Couldn't find owner root %llu\n",
7928 return PTR_ERR(root);
7931 path = btrfs_alloc_path();
7935 trans = btrfs_start_transaction(root, 1);
7936 if (IS_ERR(trans)) {
7937 btrfs_free_path(path);
7938 return PTR_ERR(trans);
7941 ret = btrfs_search_slot(trans, root, &bad->key, path, -1, 1);
7947 ret = btrfs_del_item(trans, root, path);
7949 btrfs_commit_transaction(trans, root);
7950 btrfs_free_path(path);
7954 static int zero_log_tree(struct btrfs_root *root)
7956 struct btrfs_trans_handle *trans;
7959 trans = btrfs_start_transaction(root, 1);
7960 if (IS_ERR(trans)) {
7961 ret = PTR_ERR(trans);
7964 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
7965 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
7966 ret = btrfs_commit_transaction(trans, root);
7970 static int populate_csum(struct btrfs_trans_handle *trans,
7971 struct btrfs_root *csum_root, char *buf, u64 start,
7978 while (offset < len) {
7979 sectorsize = csum_root->sectorsize;
7980 ret = read_extent_data(csum_root, buf, start + offset,
7984 ret = btrfs_csum_file_block(trans, csum_root, start + len,
7985 start + offset, buf, sectorsize);
7988 offset += sectorsize;
7993 static int fill_csum_tree(struct btrfs_trans_handle *trans,
7994 struct btrfs_root *csum_root)
7996 struct btrfs_root *extent_root = csum_root->fs_info->extent_root;
7997 struct btrfs_path *path;
7998 struct btrfs_extent_item *ei;
7999 struct extent_buffer *leaf;
8001 struct btrfs_key key;
8004 path = btrfs_alloc_path();
8009 key.type = BTRFS_EXTENT_ITEM_KEY;
8012 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
8014 btrfs_free_path(path);
8018 buf = malloc(csum_root->sectorsize);
8020 btrfs_free_path(path);
8025 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8026 ret = btrfs_next_leaf(extent_root, path);
8034 leaf = path->nodes[0];
8036 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
8037 if (key.type != BTRFS_EXTENT_ITEM_KEY) {
8042 ei = btrfs_item_ptr(leaf, path->slots[0],
8043 struct btrfs_extent_item);
8044 if (!(btrfs_extent_flags(leaf, ei) &
8045 BTRFS_EXTENT_FLAG_DATA)) {
8050 ret = populate_csum(trans, csum_root, buf, key.objectid,
8057 btrfs_free_path(path);
8062 struct root_item_info {
8063 /* level of the root */
8065 /* number of nodes at this level, must be 1 for a root */
8069 struct cache_extent cache_extent;
8072 static struct cache_tree *roots_info_cache = NULL;
8074 static void free_roots_info_cache(void)
8076 if (!roots_info_cache)
8079 while (!cache_tree_empty(roots_info_cache)) {
8080 struct cache_extent *entry;
8081 struct root_item_info *rii;
8083 entry = first_cache_extent(roots_info_cache);
8086 remove_cache_extent(roots_info_cache, entry);
8087 rii = container_of(entry, struct root_item_info, cache_extent);
8091 free(roots_info_cache);
8092 roots_info_cache = NULL;
8095 static int build_roots_info_cache(struct btrfs_fs_info *info)
8098 struct btrfs_key key;
8099 struct extent_buffer *leaf;
8100 struct btrfs_path *path;
8102 if (!roots_info_cache) {
8103 roots_info_cache = malloc(sizeof(*roots_info_cache));
8104 if (!roots_info_cache)
8106 cache_tree_init(roots_info_cache);
8109 path = btrfs_alloc_path();
8114 key.type = BTRFS_EXTENT_ITEM_KEY;
8117 ret = btrfs_search_slot(NULL, info->extent_root, &key, path, 0, 0);
8120 leaf = path->nodes[0];
8123 struct btrfs_key found_key;
8124 struct btrfs_extent_item *ei;
8125 struct btrfs_extent_inline_ref *iref;
8126 int slot = path->slots[0];
8131 struct cache_extent *entry;
8132 struct root_item_info *rii;
8134 if (slot >= btrfs_header_nritems(leaf)) {
8135 ret = btrfs_next_leaf(info->extent_root, path);
8142 leaf = path->nodes[0];
8143 slot = path->slots[0];
8146 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8148 if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
8149 found_key.type != BTRFS_METADATA_ITEM_KEY)
8152 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
8153 flags = btrfs_extent_flags(leaf, ei);
8155 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
8156 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
8159 if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
8160 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
8161 level = found_key.offset;
8163 struct btrfs_tree_block_info *info;
8165 info = (struct btrfs_tree_block_info *)(ei + 1);
8166 iref = (struct btrfs_extent_inline_ref *)(info + 1);
8167 level = btrfs_tree_block_level(leaf, info);
8171 * For a root extent, it must be of the following type and the
8172 * first (and only one) iref in the item.
8174 type = btrfs_extent_inline_ref_type(leaf, iref);
8175 if (type != BTRFS_TREE_BLOCK_REF_KEY)
8178 root_id = btrfs_extent_inline_ref_offset(leaf, iref);
8179 entry = lookup_cache_extent(roots_info_cache, root_id, 1);
8181 rii = malloc(sizeof(struct root_item_info));
8186 rii->cache_extent.start = root_id;
8187 rii->cache_extent.size = 1;
8188 rii->level = (u8)-1;
8189 entry = &rii->cache_extent;
8190 ret = insert_cache_extent(roots_info_cache, entry);
8193 rii = container_of(entry, struct root_item_info,
8197 ASSERT(rii->cache_extent.start == root_id);
8198 ASSERT(rii->cache_extent.size == 1);
8200 if (level > rii->level || rii->level == (u8)-1) {
8202 rii->bytenr = found_key.objectid;
8203 rii->gen = btrfs_extent_generation(leaf, ei);
8204 rii->node_count = 1;
8205 } else if (level == rii->level) {
8213 btrfs_free_path(path);
8218 static int maybe_repair_root_item(struct btrfs_fs_info *info,
8219 struct btrfs_path *path,
8220 const struct btrfs_key *root_key,
8221 const int read_only_mode)
8223 const u64 root_id = root_key->objectid;
8224 struct cache_extent *entry;
8225 struct root_item_info *rii;
8226 struct btrfs_root_item ri;
8227 unsigned long offset;
8229 entry = lookup_cache_extent(roots_info_cache, root_id, 1);
8232 "Error: could not find extent items for root %llu\n",
8233 root_key->objectid);
8237 rii = container_of(entry, struct root_item_info, cache_extent);
8238 ASSERT(rii->cache_extent.start == root_id);
8239 ASSERT(rii->cache_extent.size == 1);
8241 if (rii->node_count != 1) {
8243 "Error: could not find btree root extent for root %llu\n",
8248 offset = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
8249 read_extent_buffer(path->nodes[0], &ri, offset, sizeof(ri));
8251 if (btrfs_root_bytenr(&ri) != rii->bytenr ||
8252 btrfs_root_level(&ri) != rii->level ||
8253 btrfs_root_generation(&ri) != rii->gen) {
8256 * If we're in repair mode but our caller told us to not update
8257 * the root item, i.e. just check if it needs to be updated, don't
8258 * print this message, since the caller will call us again shortly
8259 * for the same root item without read only mode (the caller will
8260 * open a transaction first).
8262 if (!(read_only_mode && repair))
8264 "%sroot item for root %llu,"
8265 " current bytenr %llu, current gen %llu, current level %u,"
8266 " new bytenr %llu, new gen %llu, new level %u\n",
8267 (read_only_mode ? "" : "fixing "),
8269 btrfs_root_bytenr(&ri), btrfs_root_generation(&ri),
8270 btrfs_root_level(&ri),
8271 rii->bytenr, rii->gen, rii->level);
8273 if (btrfs_root_generation(&ri) > rii->gen) {
8275 "root %llu has a root item with a more recent gen (%llu) compared to the found root node (%llu)\n",
8276 root_id, btrfs_root_generation(&ri), rii->gen);
8280 if (!read_only_mode) {
8281 btrfs_set_root_bytenr(&ri, rii->bytenr);
8282 btrfs_set_root_level(&ri, rii->level);
8283 btrfs_set_root_generation(&ri, rii->gen);
8284 write_extent_buffer(path->nodes[0], &ri,
8285 offset, sizeof(ri));
8295 * A regression introduced in the 3.17 kernel (more specifically in 3.17-rc2),
8296 * caused read-only snapshots to be corrupted if they were created at a moment
8297 * when the source subvolume/snapshot had orphan items. The issue was that the
8298 * on-disk root items became incorrect, referring to the pre orphan cleanup root
8299 * node instead of the post orphan cleanup root node.
8300 * So this function, and its callees, just detects and fixes those cases. Even
8301 * though the regression was for read-only snapshots, this function applies to
8302 * any snapshot/subvolume root.
8303 * This must be run before any other repair code - not doing it so, makes other
8304 * repair code delete or modify backrefs in the extent tree for example, which
8305 * will result in an inconsistent fs after repairing the root items.
8307 static int repair_root_items(struct btrfs_fs_info *info)
8309 struct btrfs_path *path = NULL;
8310 struct btrfs_key key;
8311 struct extent_buffer *leaf;
8312 struct btrfs_trans_handle *trans = NULL;
8317 ret = build_roots_info_cache(info);
8321 path = btrfs_alloc_path();
8327 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
8328 key.type = BTRFS_ROOT_ITEM_KEY;
8333 * Avoid opening and committing transactions if a leaf doesn't have
8334 * any root items that need to be fixed, so that we avoid rotating
8335 * backup roots unnecessarily.
8338 trans = btrfs_start_transaction(info->tree_root, 1);
8339 if (IS_ERR(trans)) {
8340 ret = PTR_ERR(trans);
8345 ret = btrfs_search_slot(trans, info->tree_root, &key, path,
8349 leaf = path->nodes[0];
8352 struct btrfs_key found_key;
8354 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
8355 int no_more_keys = find_next_key(path, &key);
8357 btrfs_release_path(path);
8359 ret = btrfs_commit_transaction(trans,
8371 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8373 if (found_key.type != BTRFS_ROOT_ITEM_KEY)
8376 ret = maybe_repair_root_item(info, path, &found_key,
8381 if (!trans && repair) {
8384 btrfs_release_path(path);
8394 free_roots_info_cache();
8396 btrfs_free_path(path);
8403 const char * const cmd_check_usage[] = {
8404 "btrfs check [options] <device>",
8405 "Check an unmounted btrfs filesystem.",
8407 "-s|--super <superblock> use this superblock copy",
8408 "-b|--backup use the backup root copy",
8409 "--repair try to repair the filesystem",
8410 "--init-csum-tree create a new CRC tree",
8411 "--init-extent-tree create a new extent tree",
8412 "--check-data-csum verify checkums of data blocks",
8413 "--qgroup-report print a report on qgroup consistency",
8414 "--subvol-extents <subvolid> print subvolume extents and sharing state",
8415 "--tree-root <bytenr> use the given bytenr for the tree root",
8419 int cmd_check(int argc, char **argv)
8421 struct cache_tree root_cache;
8422 struct btrfs_root *root;
8423 struct btrfs_fs_info *info;
8426 u64 tree_root_bytenr = 0;
8427 char uuidbuf[BTRFS_UUID_UNPARSED_SIZE];
8430 int init_csum_tree = 0;
8432 int qgroup_report = 0;
8433 enum btrfs_open_ctree_flags ctree_flags = OPEN_CTREE_EXCLUSIVE;
8437 int option_index = 0;
8438 enum { OPT_REPAIR = 257, OPT_INIT_CSUM, OPT_INIT_EXTENT,
8439 OPT_CHECK_CSUM, OPT_READONLY };
8440 static const struct option long_options[] = {
8441 { "super", 1, NULL, 's' },
8442 { "repair", 0, NULL, OPT_REPAIR },
8443 { "readonly", 0, NULL, OPT_READONLY },
8444 { "init-csum-tree", 0, NULL, OPT_INIT_CSUM },
8445 { "init-extent-tree", 0, NULL, OPT_INIT_EXTENT },
8446 { "check-data-csum", 0, NULL, OPT_CHECK_CSUM },
8447 { "backup", 0, NULL, 'b' },
8448 { "subvol-extents", 1, NULL, 'E' },
8449 { "qgroup-report", 0, NULL, 'Q' },
8450 { "tree-root", 1, NULL, 'r' },
8454 c = getopt_long(argc, argv, "as:br:", long_options,
8459 case 'a': /* ignored */ break;
8461 ctree_flags |= OPEN_CTREE_BACKUP_ROOT;
8464 num = arg_strtou64(optarg);
8465 if (num >= BTRFS_SUPER_MIRROR_MAX) {
8467 "ERROR: super mirror should be less than: %d\n",
8468 BTRFS_SUPER_MIRROR_MAX);
8471 bytenr = btrfs_sb_offset(((int)num));
8472 printf("using SB copy %llu, bytenr %llu\n", num,
8473 (unsigned long long)bytenr);
8479 subvolid = arg_strtou64(optarg);
8482 tree_root_bytenr = arg_strtou64(optarg);
8486 usage(cmd_check_usage);
8488 printf("enabling repair mode\n");
8490 ctree_flags |= OPEN_CTREE_WRITES;
8496 printf("Creating a new CRC tree\n");
8499 ctree_flags |= OPEN_CTREE_WRITES;
8501 case OPT_INIT_EXTENT:
8502 init_extent_tree = 1;
8503 ctree_flags |= (OPEN_CTREE_WRITES |
8504 OPEN_CTREE_NO_BLOCK_GROUPS);
8507 case OPT_CHECK_CSUM:
8508 check_data_csum = 1;
8512 argc = argc - optind;
8514 if (check_argc_exact(argc, 1))
8515 usage(cmd_check_usage);
8517 /* This check is the only reason for --readonly to exist */
8518 if (readonly && repair) {
8519 fprintf(stderr, "Repair options are not compatible with --readonly\n");
8524 cache_tree_init(&root_cache);
8526 if((ret = check_mounted(argv[optind])) < 0) {
8527 fprintf(stderr, "Could not check mount status: %s\n", strerror(-ret));
8530 fprintf(stderr, "%s is currently mounted. Aborting.\n", argv[optind]);
8535 /* only allow partial opening under repair mode */
8537 ctree_flags |= OPEN_CTREE_PARTIAL;
8539 info = open_ctree_fs_info(argv[optind], bytenr, tree_root_bytenr,
8542 fprintf(stderr, "Couldn't open file system\n");
8547 root = info->fs_root;
8550 * repair mode will force us to commit transaction which
8551 * will make us fail to load log tree when mounting.
8553 if (repair && btrfs_super_log_root(info->super_copy)) {
8554 ret = ask_user("repair mode will force to clear out log tree, Are you sure?");
8559 ret = zero_log_tree(root);
8561 fprintf(stderr, "fail to zero log tree\n");
8566 uuid_unparse(info->super_copy->fsid, uuidbuf);
8567 if (qgroup_report) {
8568 printf("Print quota groups for %s\nUUID: %s\n", argv[optind],
8570 ret = qgroup_verify_all(info);
8572 print_qgroup_report(1);
8576 printf("Print extent state for subvolume %llu on %s\nUUID: %s\n",
8577 subvolid, argv[optind], uuidbuf);
8578 ret = print_extent_state(info, subvolid);
8581 printf("Checking filesystem on %s\nUUID: %s\n", argv[optind], uuidbuf);
8583 if (!extent_buffer_uptodate(info->tree_root->node) ||
8584 !extent_buffer_uptodate(info->dev_root->node) ||
8585 !extent_buffer_uptodate(info->chunk_root->node)) {
8586 fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
8591 if (init_extent_tree || init_csum_tree) {
8592 struct btrfs_trans_handle *trans;
8594 trans = btrfs_start_transaction(info->extent_root, 0);
8595 if (IS_ERR(trans)) {
8596 fprintf(stderr, "Error starting transaction\n");
8597 ret = PTR_ERR(trans);
8601 if (init_extent_tree) {
8602 printf("Creating a new extent tree\n");
8603 ret = reinit_extent_tree(trans, info);
8608 if (init_csum_tree) {
8609 fprintf(stderr, "Reinit crc root\n");
8610 ret = btrfs_fsck_reinit_root(trans, info->csum_root, 0);
8612 fprintf(stderr, "crc root initialization failed\n");
8617 ret = fill_csum_tree(trans, info->csum_root);
8619 fprintf(stderr, "crc refilling failed\n");
8624 * Ok now we commit and run the normal fsck, which will add
8625 * extent entries for all of the items it finds.
8627 ret = btrfs_commit_transaction(trans, info->extent_root);
8631 if (!extent_buffer_uptodate(info->extent_root->node)) {
8632 fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
8636 if (!extent_buffer_uptodate(info->csum_root->node)) {
8637 fprintf(stderr, "Checksum root corrupted, rerun with --init-csum-tree option\n");
8642 fprintf(stderr, "checking extents\n");
8643 ret = check_chunks_and_extents(root);
8645 fprintf(stderr, "Errors found in extent allocation tree or chunk allocation\n");
8647 ret = repair_root_items(info);
8651 fprintf(stderr, "Fixed %d roots.\n", ret);
8653 } else if (ret > 0) {
8655 "Found %d roots with an outdated root item.\n",
8658 "Please run a filesystem check with the option --repair to fix them.\n");
8663 fprintf(stderr, "checking free space cache\n");
8664 ret = check_space_cache(root);
8669 * We used to have to have these hole extents in between our real
8670 * extents so if we don't have this flag set we need to make sure there
8671 * are no gaps in the file extents for inodes, otherwise we can just
8672 * ignore it when this happens.
8674 no_holes = btrfs_fs_incompat(root->fs_info,
8675 BTRFS_FEATURE_INCOMPAT_NO_HOLES);
8676 fprintf(stderr, "checking fs roots\n");
8677 ret = check_fs_roots(root, &root_cache);
8681 fprintf(stderr, "checking csums\n");
8682 ret = check_csums(root);
8686 fprintf(stderr, "checking root refs\n");
8687 ret = check_root_refs(root, &root_cache);
8691 while (repair && !list_empty(&root->fs_info->recow_ebs)) {
8692 struct extent_buffer *eb;
8694 eb = list_first_entry(&root->fs_info->recow_ebs,
8695 struct extent_buffer, recow);
8696 list_del_init(&eb->recow);
8697 ret = recow_extent_buffer(root, eb);
8702 while (!list_empty(&delete_items)) {
8703 struct bad_item *bad;
8705 bad = list_first_entry(&delete_items, struct bad_item, list);
8706 list_del_init(&bad->list);
8708 ret = delete_bad_item(root, bad);
8712 if (info->quota_enabled) {
8714 fprintf(stderr, "checking quota groups\n");
8715 err = qgroup_verify_all(info);
8720 if (!list_empty(&root->fs_info->recow_ebs)) {
8721 fprintf(stderr, "Transid errors in file system\n");
8725 print_qgroup_report(0);
8726 if (found_old_backref) { /*
8727 * there was a disk format change when mixed
8728 * backref was in testing tree. The old format
8729 * existed about one week.
8731 printf("\n * Found old mixed backref format. "
8732 "The old format is not supported! *"
8733 "\n * Please mount the FS in readonly mode, "
8734 "backup data and re-format the FS. *\n\n");
8737 printf("found %llu bytes used err is %d\n",
8738 (unsigned long long)bytes_used, ret);
8739 printf("total csum bytes: %llu\n",(unsigned long long)total_csum_bytes);
8740 printf("total tree bytes: %llu\n",
8741 (unsigned long long)total_btree_bytes);
8742 printf("total fs tree bytes: %llu\n",
8743 (unsigned long long)total_fs_tree_bytes);
8744 printf("total extent tree bytes: %llu\n",
8745 (unsigned long long)total_extent_tree_bytes);
8746 printf("btree space waste bytes: %llu\n",
8747 (unsigned long long)btree_space_waste);
8748 printf("file data blocks allocated: %llu\n referenced %llu\n",
8749 (unsigned long long)data_bytes_allocated,
8750 (unsigned long long)data_bytes_referenced);
8751 printf("%s\n", PACKAGE_STRING);
8753 free_root_recs_tree(&root_cache);