2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
23 #include <sys/types.h>
27 #include <uuid/uuid.h>
32 #include "print-tree.h"
33 #include "transaction.h"
36 #include "free-space-cache.h"
38 #include "qgroup-verify.h"
39 #include "rbtree-utils.h"
43 static u64 bytes_used = 0;
44 static u64 total_csum_bytes = 0;
45 static u64 total_btree_bytes = 0;
46 static u64 total_fs_tree_bytes = 0;
47 static u64 total_extent_tree_bytes = 0;
48 static u64 btree_space_waste = 0;
49 static u64 data_bytes_allocated = 0;
50 static u64 data_bytes_referenced = 0;
51 static int found_old_backref = 0;
52 static LIST_HEAD(duplicate_extents);
53 static LIST_HEAD(delete_items);
54 static int repair = 0;
55 static int no_holes = 0;
56 static int init_extent_tree = 0;
57 static int check_data_csum = 0;
59 struct extent_backref {
60 struct list_head list;
61 unsigned int is_data:1;
62 unsigned int found_extent_tree:1;
63 unsigned int full_backref:1;
64 unsigned int found_ref:1;
65 unsigned int broken:1;
69 struct extent_backref node;
84 * Much like data_backref, just removed the undetermined members
85 * and change it to use list_head.
86 * Stored in the root->orphan_data_extents list
88 struct orphan_data_extent {
89 struct list_head list;
98 struct extent_backref node;
105 struct extent_record {
106 struct list_head backrefs;
107 struct list_head dups;
108 struct list_head list;
109 struct cache_extent cache;
110 struct btrfs_disk_key parent_key;
115 u64 extent_item_refs;
117 u64 parent_generation;
121 unsigned int found_rec:1;
122 unsigned int content_checked:1;
123 unsigned int owner_ref_checked:1;
124 unsigned int is_root:1;
125 unsigned int metadata:1;
126 unsigned int flag_block_full_backref:1;
129 struct inode_backref {
130 struct list_head list;
131 unsigned int found_dir_item:1;
132 unsigned int found_dir_index:1;
133 unsigned int found_inode_ref:1;
134 unsigned int filetype:8;
136 unsigned int ref_type;
143 struct root_item_record {
144 struct list_head list;
150 struct btrfs_key drop_key;
153 #define REF_ERR_NO_DIR_ITEM (1 << 0)
154 #define REF_ERR_NO_DIR_INDEX (1 << 1)
155 #define REF_ERR_NO_INODE_REF (1 << 2)
156 #define REF_ERR_DUP_DIR_ITEM (1 << 3)
157 #define REF_ERR_DUP_DIR_INDEX (1 << 4)
158 #define REF_ERR_DUP_INODE_REF (1 << 5)
159 #define REF_ERR_INDEX_UNMATCH (1 << 6)
160 #define REF_ERR_FILETYPE_UNMATCH (1 << 7)
161 #define REF_ERR_NAME_TOO_LONG (1 << 8) // 100
162 #define REF_ERR_NO_ROOT_REF (1 << 9)
163 #define REF_ERR_NO_ROOT_BACKREF (1 << 10)
164 #define REF_ERR_DUP_ROOT_REF (1 << 11)
165 #define REF_ERR_DUP_ROOT_BACKREF (1 << 12)
167 struct inode_record {
168 struct list_head backrefs;
169 unsigned int checked:1;
170 unsigned int merging:1;
171 unsigned int found_inode_item:1;
172 unsigned int found_dir_item:1;
173 unsigned int found_file_extent:1;
174 unsigned int found_csum_item:1;
175 unsigned int some_csum_missing:1;
176 unsigned int nodatasum:1;
189 u64 first_extent_gap;
194 #define I_ERR_NO_INODE_ITEM (1 << 0)
195 #define I_ERR_NO_ORPHAN_ITEM (1 << 1)
196 #define I_ERR_DUP_INODE_ITEM (1 << 2)
197 #define I_ERR_DUP_DIR_INDEX (1 << 3)
198 #define I_ERR_ODD_DIR_ITEM (1 << 4)
199 #define I_ERR_ODD_FILE_EXTENT (1 << 5)
200 #define I_ERR_BAD_FILE_EXTENT (1 << 6)
201 #define I_ERR_FILE_EXTENT_OVERLAP (1 << 7)
202 #define I_ERR_FILE_EXTENT_DISCOUNT (1 << 8) // 100
203 #define I_ERR_DIR_ISIZE_WRONG (1 << 9)
204 #define I_ERR_FILE_NBYTES_WRONG (1 << 10) // 400
205 #define I_ERR_ODD_CSUM_ITEM (1 << 11)
206 #define I_ERR_SOME_CSUM_MISSING (1 << 12)
207 #define I_ERR_LINK_COUNT_WRONG (1 << 13)
209 struct root_backref {
210 struct list_head list;
211 unsigned int found_dir_item:1;
212 unsigned int found_dir_index:1;
213 unsigned int found_back_ref:1;
214 unsigned int found_forward_ref:1;
215 unsigned int reachable:1;
225 struct list_head backrefs;
226 struct cache_extent cache;
227 unsigned int found_root_item:1;
233 struct cache_extent cache;
238 struct cache_extent cache;
239 struct cache_tree root_cache;
240 struct cache_tree inode_cache;
241 struct inode_record *current;
250 struct walk_control {
251 struct cache_tree shared;
252 struct shared_node *nodes[BTRFS_MAX_LEVEL];
258 struct btrfs_key key;
260 struct list_head list;
263 static void reset_cached_block_groups(struct btrfs_fs_info *fs_info);
265 static void record_root_in_trans(struct btrfs_trans_handle *trans,
266 struct btrfs_root *root)
268 if (root->last_trans != trans->transid) {
269 root->track_dirty = 1;
270 root->last_trans = trans->transid;
271 root->commit_root = root->node;
272 extent_buffer_get(root->node);
276 static u8 imode_to_type(u32 imode)
279 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
280 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
281 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
282 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
283 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
284 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
285 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
286 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
289 return btrfs_type_by_mode[(imode & S_IFMT) >> S_SHIFT];
293 static int device_record_compare(struct rb_node *node1, struct rb_node *node2)
295 struct device_record *rec1;
296 struct device_record *rec2;
298 rec1 = rb_entry(node1, struct device_record, node);
299 rec2 = rb_entry(node2, struct device_record, node);
300 if (rec1->devid > rec2->devid)
302 else if (rec1->devid < rec2->devid)
308 static struct inode_record *clone_inode_rec(struct inode_record *orig_rec)
310 struct inode_record *rec;
311 struct inode_backref *backref;
312 struct inode_backref *orig;
315 rec = malloc(sizeof(*rec));
316 memcpy(rec, orig_rec, sizeof(*rec));
318 INIT_LIST_HEAD(&rec->backrefs);
320 list_for_each_entry(orig, &orig_rec->backrefs, list) {
321 size = sizeof(*orig) + orig->namelen + 1;
322 backref = malloc(size);
323 memcpy(backref, orig, size);
324 list_add_tail(&backref->list, &rec->backrefs);
329 static void print_inode_error(struct btrfs_root *root, struct inode_record *rec)
331 u64 root_objectid = root->root_key.objectid;
332 int errors = rec->errors;
336 /* reloc root errors, we print its corresponding fs root objectid*/
337 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
338 root_objectid = root->root_key.offset;
339 fprintf(stderr, "reloc");
341 fprintf(stderr, "root %llu inode %llu errors %x",
342 (unsigned long long) root_objectid,
343 (unsigned long long) rec->ino, rec->errors);
345 if (errors & I_ERR_NO_INODE_ITEM)
346 fprintf(stderr, ", no inode item");
347 if (errors & I_ERR_NO_ORPHAN_ITEM)
348 fprintf(stderr, ", no orphan item");
349 if (errors & I_ERR_DUP_INODE_ITEM)
350 fprintf(stderr, ", dup inode item");
351 if (errors & I_ERR_DUP_DIR_INDEX)
352 fprintf(stderr, ", dup dir index");
353 if (errors & I_ERR_ODD_DIR_ITEM)
354 fprintf(stderr, ", odd dir item");
355 if (errors & I_ERR_ODD_FILE_EXTENT)
356 fprintf(stderr, ", odd file extent");
357 if (errors & I_ERR_BAD_FILE_EXTENT)
358 fprintf(stderr, ", bad file extent");
359 if (errors & I_ERR_FILE_EXTENT_OVERLAP)
360 fprintf(stderr, ", file extent overlap");
361 if (errors & I_ERR_FILE_EXTENT_DISCOUNT)
362 fprintf(stderr, ", file extent discount");
363 if (errors & I_ERR_DIR_ISIZE_WRONG)
364 fprintf(stderr, ", dir isize wrong");
365 if (errors & I_ERR_FILE_NBYTES_WRONG)
366 fprintf(stderr, ", nbytes wrong");
367 if (errors & I_ERR_ODD_CSUM_ITEM)
368 fprintf(stderr, ", odd csum item");
369 if (errors & I_ERR_SOME_CSUM_MISSING)
370 fprintf(stderr, ", some csum missing");
371 if (errors & I_ERR_LINK_COUNT_WRONG)
372 fprintf(stderr, ", link count wrong");
373 fprintf(stderr, "\n");
376 static void print_ref_error(int errors)
378 if (errors & REF_ERR_NO_DIR_ITEM)
379 fprintf(stderr, ", no dir item");
380 if (errors & REF_ERR_NO_DIR_INDEX)
381 fprintf(stderr, ", no dir index");
382 if (errors & REF_ERR_NO_INODE_REF)
383 fprintf(stderr, ", no inode ref");
384 if (errors & REF_ERR_DUP_DIR_ITEM)
385 fprintf(stderr, ", dup dir item");
386 if (errors & REF_ERR_DUP_DIR_INDEX)
387 fprintf(stderr, ", dup dir index");
388 if (errors & REF_ERR_DUP_INODE_REF)
389 fprintf(stderr, ", dup inode ref");
390 if (errors & REF_ERR_INDEX_UNMATCH)
391 fprintf(stderr, ", index unmatch");
392 if (errors & REF_ERR_FILETYPE_UNMATCH)
393 fprintf(stderr, ", filetype unmatch");
394 if (errors & REF_ERR_NAME_TOO_LONG)
395 fprintf(stderr, ", name too long");
396 if (errors & REF_ERR_NO_ROOT_REF)
397 fprintf(stderr, ", no root ref");
398 if (errors & REF_ERR_NO_ROOT_BACKREF)
399 fprintf(stderr, ", no root backref");
400 if (errors & REF_ERR_DUP_ROOT_REF)
401 fprintf(stderr, ", dup root ref");
402 if (errors & REF_ERR_DUP_ROOT_BACKREF)
403 fprintf(stderr, ", dup root backref");
404 fprintf(stderr, "\n");
407 static struct inode_record *get_inode_rec(struct cache_tree *inode_cache,
410 struct ptr_node *node;
411 struct cache_extent *cache;
412 struct inode_record *rec = NULL;
415 cache = lookup_cache_extent(inode_cache, ino, 1);
417 node = container_of(cache, struct ptr_node, cache);
419 if (mod && rec->refs > 1) {
420 node->data = clone_inode_rec(rec);
425 rec = calloc(1, sizeof(*rec));
427 rec->extent_start = (u64)-1;
428 rec->first_extent_gap = (u64)-1;
430 INIT_LIST_HEAD(&rec->backrefs);
432 node = malloc(sizeof(*node));
433 node->cache.start = ino;
434 node->cache.size = 1;
437 if (ino == BTRFS_FREE_INO_OBJECTID)
440 ret = insert_cache_extent(inode_cache, &node->cache);
446 static void free_inode_rec(struct inode_record *rec)
448 struct inode_backref *backref;
453 while (!list_empty(&rec->backrefs)) {
454 backref = list_entry(rec->backrefs.next,
455 struct inode_backref, list);
456 list_del(&backref->list);
462 static int can_free_inode_rec(struct inode_record *rec)
464 if (!rec->errors && rec->checked && rec->found_inode_item &&
465 rec->nlink == rec->found_link && list_empty(&rec->backrefs))
470 static void maybe_free_inode_rec(struct cache_tree *inode_cache,
471 struct inode_record *rec)
473 struct cache_extent *cache;
474 struct inode_backref *tmp, *backref;
475 struct ptr_node *node;
476 unsigned char filetype;
478 if (!rec->found_inode_item)
481 filetype = imode_to_type(rec->imode);
482 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
483 if (backref->found_dir_item && backref->found_dir_index) {
484 if (backref->filetype != filetype)
485 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
486 if (!backref->errors && backref->found_inode_ref) {
487 list_del(&backref->list);
493 if (!rec->checked || rec->merging)
496 if (S_ISDIR(rec->imode)) {
497 if (rec->found_size != rec->isize)
498 rec->errors |= I_ERR_DIR_ISIZE_WRONG;
499 if (rec->found_file_extent)
500 rec->errors |= I_ERR_ODD_FILE_EXTENT;
501 } else if (S_ISREG(rec->imode) || S_ISLNK(rec->imode)) {
502 if (rec->found_dir_item)
503 rec->errors |= I_ERR_ODD_DIR_ITEM;
504 if (rec->found_size != rec->nbytes)
505 rec->errors |= I_ERR_FILE_NBYTES_WRONG;
506 if (rec->extent_start == (u64)-1 || rec->extent_start > 0)
507 rec->first_extent_gap = 0;
508 if (rec->nlink > 0 && !no_holes &&
509 (rec->extent_end < rec->isize ||
510 rec->first_extent_gap < rec->isize))
511 rec->errors |= I_ERR_FILE_EXTENT_DISCOUNT;
514 if (S_ISREG(rec->imode) || S_ISLNK(rec->imode)) {
515 if (rec->found_csum_item && rec->nodatasum)
516 rec->errors |= I_ERR_ODD_CSUM_ITEM;
517 if (rec->some_csum_missing && !rec->nodatasum)
518 rec->errors |= I_ERR_SOME_CSUM_MISSING;
521 BUG_ON(rec->refs != 1);
522 if (can_free_inode_rec(rec)) {
523 cache = lookup_cache_extent(inode_cache, rec->ino, 1);
524 node = container_of(cache, struct ptr_node, cache);
525 BUG_ON(node->data != rec);
526 remove_cache_extent(inode_cache, &node->cache);
532 static int check_orphan_item(struct btrfs_root *root, u64 ino)
534 struct btrfs_path path;
535 struct btrfs_key key;
538 key.objectid = BTRFS_ORPHAN_OBJECTID;
539 key.type = BTRFS_ORPHAN_ITEM_KEY;
542 btrfs_init_path(&path);
543 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
544 btrfs_release_path(&path);
550 static int process_inode_item(struct extent_buffer *eb,
551 int slot, struct btrfs_key *key,
552 struct shared_node *active_node)
554 struct inode_record *rec;
555 struct btrfs_inode_item *item;
557 rec = active_node->current;
558 BUG_ON(rec->ino != key->objectid || rec->refs > 1);
559 if (rec->found_inode_item) {
560 rec->errors |= I_ERR_DUP_INODE_ITEM;
563 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
564 rec->nlink = btrfs_inode_nlink(eb, item);
565 rec->isize = btrfs_inode_size(eb, item);
566 rec->nbytes = btrfs_inode_nbytes(eb, item);
567 rec->imode = btrfs_inode_mode(eb, item);
568 if (btrfs_inode_flags(eb, item) & BTRFS_INODE_NODATASUM)
570 rec->found_inode_item = 1;
572 rec->errors |= I_ERR_NO_ORPHAN_ITEM;
573 maybe_free_inode_rec(&active_node->inode_cache, rec);
577 static struct inode_backref *get_inode_backref(struct inode_record *rec,
579 int namelen, u64 dir)
581 struct inode_backref *backref;
583 list_for_each_entry(backref, &rec->backrefs, list) {
584 if (rec->ino == BTRFS_MULTIPLE_OBJECTIDS)
586 if (backref->dir != dir || backref->namelen != namelen)
588 if (memcmp(name, backref->name, namelen))
593 backref = malloc(sizeof(*backref) + namelen + 1);
594 memset(backref, 0, sizeof(*backref));
596 backref->namelen = namelen;
597 memcpy(backref->name, name, namelen);
598 backref->name[namelen] = '\0';
599 list_add_tail(&backref->list, &rec->backrefs);
603 static int add_inode_backref(struct cache_tree *inode_cache,
604 u64 ino, u64 dir, u64 index,
605 const char *name, int namelen,
606 int filetype, int itemtype, int errors)
608 struct inode_record *rec;
609 struct inode_backref *backref;
611 rec = get_inode_rec(inode_cache, ino, 1);
612 backref = get_inode_backref(rec, name, namelen, dir);
614 backref->errors |= errors;
615 if (itemtype == BTRFS_DIR_INDEX_KEY) {
616 if (backref->found_dir_index)
617 backref->errors |= REF_ERR_DUP_DIR_INDEX;
618 if (backref->found_inode_ref && backref->index != index)
619 backref->errors |= REF_ERR_INDEX_UNMATCH;
620 if (backref->found_dir_item && backref->filetype != filetype)
621 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
623 backref->index = index;
624 backref->filetype = filetype;
625 backref->found_dir_index = 1;
626 } else if (itemtype == BTRFS_DIR_ITEM_KEY) {
628 if (backref->found_dir_item)
629 backref->errors |= REF_ERR_DUP_DIR_ITEM;
630 if (backref->found_dir_index && backref->filetype != filetype)
631 backref->errors |= REF_ERR_FILETYPE_UNMATCH;
633 backref->filetype = filetype;
634 backref->found_dir_item = 1;
635 } else if ((itemtype == BTRFS_INODE_REF_KEY) ||
636 (itemtype == BTRFS_INODE_EXTREF_KEY)) {
637 if (backref->found_inode_ref)
638 backref->errors |= REF_ERR_DUP_INODE_REF;
639 if (backref->found_dir_index && backref->index != index)
640 backref->errors |= REF_ERR_INDEX_UNMATCH;
642 backref->index = index;
644 backref->ref_type = itemtype;
645 backref->found_inode_ref = 1;
650 maybe_free_inode_rec(inode_cache, rec);
654 static int merge_inode_recs(struct inode_record *src, struct inode_record *dst,
655 struct cache_tree *dst_cache)
657 struct inode_backref *backref;
661 list_for_each_entry(backref, &src->backrefs, list) {
662 if (backref->found_dir_index) {
663 add_inode_backref(dst_cache, dst->ino, backref->dir,
664 backref->index, backref->name,
665 backref->namelen, backref->filetype,
666 BTRFS_DIR_INDEX_KEY, backref->errors);
668 if (backref->found_dir_item) {
670 add_inode_backref(dst_cache, dst->ino,
671 backref->dir, 0, backref->name,
672 backref->namelen, backref->filetype,
673 BTRFS_DIR_ITEM_KEY, backref->errors);
675 if (backref->found_inode_ref) {
676 add_inode_backref(dst_cache, dst->ino,
677 backref->dir, backref->index,
678 backref->name, backref->namelen, 0,
679 backref->ref_type, backref->errors);
683 if (src->found_dir_item)
684 dst->found_dir_item = 1;
685 if (src->found_file_extent)
686 dst->found_file_extent = 1;
687 if (src->found_csum_item)
688 dst->found_csum_item = 1;
689 if (src->some_csum_missing)
690 dst->some_csum_missing = 1;
691 if (dst->first_extent_gap > src->first_extent_gap)
692 dst->first_extent_gap = src->first_extent_gap;
694 BUG_ON(src->found_link < dir_count);
695 dst->found_link += src->found_link - dir_count;
696 dst->found_size += src->found_size;
697 if (src->extent_start != (u64)-1) {
698 if (dst->extent_start == (u64)-1) {
699 dst->extent_start = src->extent_start;
700 dst->extent_end = src->extent_end;
702 if (dst->extent_end > src->extent_start)
703 dst->errors |= I_ERR_FILE_EXTENT_OVERLAP;
704 else if (dst->extent_end < src->extent_start &&
705 dst->extent_end < dst->first_extent_gap)
706 dst->first_extent_gap = dst->extent_end;
707 if (dst->extent_end < src->extent_end)
708 dst->extent_end = src->extent_end;
712 dst->errors |= src->errors;
713 if (src->found_inode_item) {
714 if (!dst->found_inode_item) {
715 dst->nlink = src->nlink;
716 dst->isize = src->isize;
717 dst->nbytes = src->nbytes;
718 dst->imode = src->imode;
719 dst->nodatasum = src->nodatasum;
720 dst->found_inode_item = 1;
722 dst->errors |= I_ERR_DUP_INODE_ITEM;
730 static int splice_shared_node(struct shared_node *src_node,
731 struct shared_node *dst_node)
733 struct cache_extent *cache;
734 struct ptr_node *node, *ins;
735 struct cache_tree *src, *dst;
736 struct inode_record *rec, *conflict;
741 if (--src_node->refs == 0)
743 if (src_node->current)
744 current_ino = src_node->current->ino;
746 src = &src_node->root_cache;
747 dst = &dst_node->root_cache;
749 cache = search_cache_extent(src, 0);
751 node = container_of(cache, struct ptr_node, cache);
753 cache = next_cache_extent(cache);
756 remove_cache_extent(src, &node->cache);
759 ins = malloc(sizeof(*ins));
760 ins->cache.start = node->cache.start;
761 ins->cache.size = node->cache.size;
765 ret = insert_cache_extent(dst, &ins->cache);
766 if (ret == -EEXIST) {
767 conflict = get_inode_rec(dst, rec->ino, 1);
768 merge_inode_recs(rec, conflict, dst);
770 conflict->checked = 1;
771 if (dst_node->current == conflict)
772 dst_node->current = NULL;
774 maybe_free_inode_rec(dst, conflict);
782 if (src == &src_node->root_cache) {
783 src = &src_node->inode_cache;
784 dst = &dst_node->inode_cache;
788 if (current_ino > 0 && (!dst_node->current ||
789 current_ino > dst_node->current->ino)) {
790 if (dst_node->current) {
791 dst_node->current->checked = 1;
792 maybe_free_inode_rec(dst, dst_node->current);
794 dst_node->current = get_inode_rec(dst, current_ino, 1);
799 static void free_inode_ptr(struct cache_extent *cache)
801 struct ptr_node *node;
802 struct inode_record *rec;
804 node = container_of(cache, struct ptr_node, cache);
810 FREE_EXTENT_CACHE_BASED_TREE(inode_recs, free_inode_ptr);
812 static struct shared_node *find_shared_node(struct cache_tree *shared,
815 struct cache_extent *cache;
816 struct shared_node *node;
818 cache = lookup_cache_extent(shared, bytenr, 1);
820 node = container_of(cache, struct shared_node, cache);
826 static int add_shared_node(struct cache_tree *shared, u64 bytenr, u32 refs)
829 struct shared_node *node;
831 node = calloc(1, sizeof(*node));
832 node->cache.start = bytenr;
833 node->cache.size = 1;
834 cache_tree_init(&node->root_cache);
835 cache_tree_init(&node->inode_cache);
838 ret = insert_cache_extent(shared, &node->cache);
843 static int enter_shared_node(struct btrfs_root *root, u64 bytenr, u32 refs,
844 struct walk_control *wc, int level)
846 struct shared_node *node;
847 struct shared_node *dest;
849 if (level == wc->active_node)
852 BUG_ON(wc->active_node <= level);
853 node = find_shared_node(&wc->shared, bytenr);
855 add_shared_node(&wc->shared, bytenr, refs);
856 node = find_shared_node(&wc->shared, bytenr);
857 wc->nodes[level] = node;
858 wc->active_node = level;
862 if (wc->root_level == wc->active_node &&
863 btrfs_root_refs(&root->root_item) == 0) {
864 if (--node->refs == 0) {
865 free_inode_recs_tree(&node->root_cache);
866 free_inode_recs_tree(&node->inode_cache);
867 remove_cache_extent(&wc->shared, &node->cache);
873 dest = wc->nodes[wc->active_node];
874 splice_shared_node(node, dest);
875 if (node->refs == 0) {
876 remove_cache_extent(&wc->shared, &node->cache);
882 static int leave_shared_node(struct btrfs_root *root,
883 struct walk_control *wc, int level)
885 struct shared_node *node;
886 struct shared_node *dest;
889 if (level == wc->root_level)
892 for (i = level + 1; i < BTRFS_MAX_LEVEL; i++) {
896 BUG_ON(i >= BTRFS_MAX_LEVEL);
898 node = wc->nodes[wc->active_node];
899 wc->nodes[wc->active_node] = NULL;
902 dest = wc->nodes[wc->active_node];
903 if (wc->active_node < wc->root_level ||
904 btrfs_root_refs(&root->root_item) > 0) {
905 BUG_ON(node->refs <= 1);
906 splice_shared_node(node, dest);
908 BUG_ON(node->refs < 2);
917 * 1 - if the root with id child_root_id is a child of root parent_root_id
918 * 0 - if the root child_root_id isn't a child of the root parent_root_id but
919 * has other root(s) as parent(s)
920 * 2 - if the root child_root_id doesn't have any parent roots
922 static int is_child_root(struct btrfs_root *root, u64 parent_root_id,
925 struct btrfs_path path;
926 struct btrfs_key key;
927 struct extent_buffer *leaf;
931 btrfs_init_path(&path);
933 key.objectid = parent_root_id;
934 key.type = BTRFS_ROOT_REF_KEY;
935 key.offset = child_root_id;
936 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path,
940 btrfs_release_path(&path);
944 key.objectid = child_root_id;
945 key.type = BTRFS_ROOT_BACKREF_KEY;
947 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path,
953 leaf = path.nodes[0];
954 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
955 ret = btrfs_next_leaf(root->fs_info->tree_root, &path);
958 leaf = path.nodes[0];
961 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
962 if (key.objectid != child_root_id ||
963 key.type != BTRFS_ROOT_BACKREF_KEY)
968 if (key.offset == parent_root_id) {
969 btrfs_release_path(&path);
976 btrfs_release_path(&path);
979 return has_parent ? 0 : 2;
982 static int process_dir_item(struct btrfs_root *root,
983 struct extent_buffer *eb,
984 int slot, struct btrfs_key *key,
985 struct shared_node *active_node)
995 struct btrfs_dir_item *di;
996 struct inode_record *rec;
997 struct cache_tree *root_cache;
998 struct cache_tree *inode_cache;
999 struct btrfs_key location;
1000 char namebuf[BTRFS_NAME_LEN];
1002 root_cache = &active_node->root_cache;
1003 inode_cache = &active_node->inode_cache;
1004 rec = active_node->current;
1005 rec->found_dir_item = 1;
1007 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1008 total = btrfs_item_size_nr(eb, slot);
1009 while (cur < total) {
1011 btrfs_dir_item_key_to_cpu(eb, di, &location);
1012 name_len = btrfs_dir_name_len(eb, di);
1013 data_len = btrfs_dir_data_len(eb, di);
1014 filetype = btrfs_dir_type(eb, di);
1016 rec->found_size += name_len;
1017 if (name_len <= BTRFS_NAME_LEN) {
1021 len = BTRFS_NAME_LEN;
1022 error = REF_ERR_NAME_TOO_LONG;
1024 read_extent_buffer(eb, namebuf, (unsigned long)(di + 1), len);
1026 if (location.type == BTRFS_INODE_ITEM_KEY) {
1027 add_inode_backref(inode_cache, location.objectid,
1028 key->objectid, key->offset, namebuf,
1029 len, filetype, key->type, error);
1030 } else if (location.type == BTRFS_ROOT_ITEM_KEY) {
1031 add_inode_backref(root_cache, location.objectid,
1032 key->objectid, key->offset,
1033 namebuf, len, filetype,
1036 fprintf(stderr, "invalid location in dir item %u\n",
1038 add_inode_backref(inode_cache, BTRFS_MULTIPLE_OBJECTIDS,
1039 key->objectid, key->offset, namebuf,
1040 len, filetype, key->type, error);
1043 len = sizeof(*di) + name_len + data_len;
1044 di = (struct btrfs_dir_item *)((char *)di + len);
1047 if (key->type == BTRFS_DIR_INDEX_KEY && nritems > 1)
1048 rec->errors |= I_ERR_DUP_DIR_INDEX;
1053 static int process_inode_ref(struct extent_buffer *eb,
1054 int slot, struct btrfs_key *key,
1055 struct shared_node *active_node)
1063 struct cache_tree *inode_cache;
1064 struct btrfs_inode_ref *ref;
1065 char namebuf[BTRFS_NAME_LEN];
1067 inode_cache = &active_node->inode_cache;
1069 ref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1070 total = btrfs_item_size_nr(eb, slot);
1071 while (cur < total) {
1072 name_len = btrfs_inode_ref_name_len(eb, ref);
1073 index = btrfs_inode_ref_index(eb, ref);
1074 if (name_len <= BTRFS_NAME_LEN) {
1078 len = BTRFS_NAME_LEN;
1079 error = REF_ERR_NAME_TOO_LONG;
1081 read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
1082 add_inode_backref(inode_cache, key->objectid, key->offset,
1083 index, namebuf, len, 0, key->type, error);
1085 len = sizeof(*ref) + name_len;
1086 ref = (struct btrfs_inode_ref *)((char *)ref + len);
1092 static int process_inode_extref(struct extent_buffer *eb,
1093 int slot, struct btrfs_key *key,
1094 struct shared_node *active_node)
1103 struct cache_tree *inode_cache;
1104 struct btrfs_inode_extref *extref;
1105 char namebuf[BTRFS_NAME_LEN];
1107 inode_cache = &active_node->inode_cache;
1109 extref = btrfs_item_ptr(eb, slot, struct btrfs_inode_extref);
1110 total = btrfs_item_size_nr(eb, slot);
1111 while (cur < total) {
1112 name_len = btrfs_inode_extref_name_len(eb, extref);
1113 index = btrfs_inode_extref_index(eb, extref);
1114 parent = btrfs_inode_extref_parent(eb, extref);
1115 if (name_len <= BTRFS_NAME_LEN) {
1119 len = BTRFS_NAME_LEN;
1120 error = REF_ERR_NAME_TOO_LONG;
1122 read_extent_buffer(eb, namebuf,
1123 (unsigned long)(extref + 1), len);
1124 add_inode_backref(inode_cache, key->objectid, parent,
1125 index, namebuf, len, 0, key->type, error);
1127 len = sizeof(*extref) + name_len;
1128 extref = (struct btrfs_inode_extref *)((char *)extref + len);
1135 static int count_csum_range(struct btrfs_root *root, u64 start,
1136 u64 len, u64 *found)
1138 struct btrfs_key key;
1139 struct btrfs_path path;
1140 struct extent_buffer *leaf;
1145 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
1147 btrfs_init_path(&path);
1149 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1151 key.type = BTRFS_EXTENT_CSUM_KEY;
1153 ret = btrfs_search_slot(NULL, root->fs_info->csum_root,
1157 if (ret > 0 && path.slots[0] > 0) {
1158 leaf = path.nodes[0];
1159 btrfs_item_key_to_cpu(leaf, &key, path.slots[0] - 1);
1160 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
1161 key.type == BTRFS_EXTENT_CSUM_KEY)
1166 leaf = path.nodes[0];
1167 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
1168 ret = btrfs_next_leaf(root->fs_info->csum_root, &path);
1173 leaf = path.nodes[0];
1176 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1177 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
1178 key.type != BTRFS_EXTENT_CSUM_KEY)
1181 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1182 if (key.offset >= start + len)
1185 if (key.offset > start)
1188 size = btrfs_item_size_nr(leaf, path.slots[0]);
1189 csum_end = key.offset + (size / csum_size) * root->sectorsize;
1190 if (csum_end > start) {
1191 size = min(csum_end - start, len);
1200 btrfs_release_path(&path);
1206 static int process_file_extent(struct btrfs_root *root,
1207 struct extent_buffer *eb,
1208 int slot, struct btrfs_key *key,
1209 struct shared_node *active_node)
1211 struct inode_record *rec;
1212 struct btrfs_file_extent_item *fi;
1214 u64 disk_bytenr = 0;
1215 u64 extent_offset = 0;
1216 u64 mask = root->sectorsize - 1;
1220 rec = active_node->current;
1221 BUG_ON(rec->ino != key->objectid || rec->refs > 1);
1222 rec->found_file_extent = 1;
1224 if (rec->extent_start == (u64)-1) {
1225 rec->extent_start = key->offset;
1226 rec->extent_end = key->offset;
1229 if (rec->extent_end > key->offset)
1230 rec->errors |= I_ERR_FILE_EXTENT_OVERLAP;
1231 else if (rec->extent_end < key->offset &&
1232 rec->extent_end < rec->first_extent_gap)
1233 rec->first_extent_gap = rec->extent_end;
1235 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
1236 extent_type = btrfs_file_extent_type(eb, fi);
1238 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1239 num_bytes = btrfs_file_extent_inline_len(eb, slot, fi);
1241 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1242 rec->found_size += num_bytes;
1243 num_bytes = (num_bytes + mask) & ~mask;
1244 } else if (extent_type == BTRFS_FILE_EXTENT_REG ||
1245 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1246 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1247 disk_bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1248 extent_offset = btrfs_file_extent_offset(eb, fi);
1249 if (num_bytes == 0 || (num_bytes & mask))
1250 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1251 if (num_bytes + extent_offset >
1252 btrfs_file_extent_ram_bytes(eb, fi))
1253 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1254 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC &&
1255 (btrfs_file_extent_compression(eb, fi) ||
1256 btrfs_file_extent_encryption(eb, fi) ||
1257 btrfs_file_extent_other_encoding(eb, fi)))
1258 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1259 if (disk_bytenr > 0)
1260 rec->found_size += num_bytes;
1262 rec->errors |= I_ERR_BAD_FILE_EXTENT;
1264 rec->extent_end = key->offset + num_bytes;
1266 if (disk_bytenr > 0) {
1268 if (btrfs_file_extent_compression(eb, fi))
1269 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1271 disk_bytenr += extent_offset;
1273 ret = count_csum_range(root, disk_bytenr, num_bytes, &found);
1276 if (extent_type == BTRFS_FILE_EXTENT_REG) {
1278 rec->found_csum_item = 1;
1279 if (found < num_bytes)
1280 rec->some_csum_missing = 1;
1281 } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1283 rec->errors |= I_ERR_ODD_CSUM_ITEM;
1289 static int process_one_leaf(struct btrfs_root *root, struct extent_buffer *eb,
1290 struct walk_control *wc)
1292 struct btrfs_key key;
1296 struct cache_tree *inode_cache;
1297 struct shared_node *active_node;
1299 if (wc->root_level == wc->active_node &&
1300 btrfs_root_refs(&root->root_item) == 0)
1303 active_node = wc->nodes[wc->active_node];
1304 inode_cache = &active_node->inode_cache;
1305 nritems = btrfs_header_nritems(eb);
1306 for (i = 0; i < nritems; i++) {
1307 btrfs_item_key_to_cpu(eb, &key, i);
1309 if (key.objectid == BTRFS_FREE_SPACE_OBJECTID)
1311 if (key.type == BTRFS_ORPHAN_ITEM_KEY)
1314 if (active_node->current == NULL ||
1315 active_node->current->ino < key.objectid) {
1316 if (active_node->current) {
1317 active_node->current->checked = 1;
1318 maybe_free_inode_rec(inode_cache,
1319 active_node->current);
1321 active_node->current = get_inode_rec(inode_cache,
1325 case BTRFS_DIR_ITEM_KEY:
1326 case BTRFS_DIR_INDEX_KEY:
1327 ret = process_dir_item(root, eb, i, &key, active_node);
1329 case BTRFS_INODE_REF_KEY:
1330 ret = process_inode_ref(eb, i, &key, active_node);
1332 case BTRFS_INODE_EXTREF_KEY:
1333 ret = process_inode_extref(eb, i, &key, active_node);
1335 case BTRFS_INODE_ITEM_KEY:
1336 ret = process_inode_item(eb, i, &key, active_node);
1338 case BTRFS_EXTENT_DATA_KEY:
1339 ret = process_file_extent(root, eb, i, &key,
1349 static void reada_walk_down(struct btrfs_root *root,
1350 struct extent_buffer *node, int slot)
1359 level = btrfs_header_level(node);
1363 nritems = btrfs_header_nritems(node);
1364 blocksize = btrfs_level_size(root, level - 1);
1365 for (i = slot; i < nritems; i++) {
1366 bytenr = btrfs_node_blockptr(node, i);
1367 ptr_gen = btrfs_node_ptr_generation(node, i);
1368 readahead_tree_block(root, bytenr, blocksize, ptr_gen);
1373 * Check the child node/leaf by the following condition:
1374 * 1. the first item key of the node/leaf should be the same with the one
1376 * 2. block in parent node should match the child node/leaf.
1377 * 3. generation of parent node and child's header should be consistent.
1379 * Or the child node/leaf pointed by the key in parent is not valid.
1381 * We hope to check leaf owner too, but since subvol may share leaves,
1382 * which makes leaf owner check not so strong, key check should be
1383 * sufficient enough for that case.
1385 static int check_child_node(struct btrfs_root *root,
1386 struct extent_buffer *parent, int slot,
1387 struct extent_buffer *child)
1389 struct btrfs_key parent_key;
1390 struct btrfs_key child_key;
1393 btrfs_node_key_to_cpu(parent, &parent_key, slot);
1394 if (btrfs_header_level(child) == 0)
1395 btrfs_item_key_to_cpu(child, &child_key, 0);
1397 btrfs_node_key_to_cpu(child, &child_key, 0);
1399 if (memcmp(&parent_key, &child_key, sizeof(parent_key))) {
1402 "Wrong key of child node/leaf, wanted: (%llu, %u, %llu), have: (%llu, %u, %llu)\n",
1403 parent_key.objectid, parent_key.type, parent_key.offset,
1404 child_key.objectid, child_key.type, child_key.offset);
1406 if (btrfs_header_bytenr(child) != btrfs_node_blockptr(parent, slot)) {
1408 fprintf(stderr, "Wrong block of child node/leaf, wanted: %llu, have: %llu\n",
1409 btrfs_node_blockptr(parent, slot),
1410 btrfs_header_bytenr(child));
1412 if (btrfs_node_ptr_generation(parent, slot) !=
1413 btrfs_header_generation(child)) {
1415 fprintf(stderr, "Wrong generation of child node/leaf, wanted: %llu, have: %llu\n",
1416 btrfs_header_generation(child),
1417 btrfs_node_ptr_generation(parent, slot));
1422 static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
1423 struct walk_control *wc, int *level)
1425 enum btrfs_tree_block_status status;
1428 struct extent_buffer *next;
1429 struct extent_buffer *cur;
1434 WARN_ON(*level < 0);
1435 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1436 ret = btrfs_lookup_extent_info(NULL, root,
1437 path->nodes[*level]->start,
1438 *level, 1, &refs, NULL);
1445 ret = enter_shared_node(root, path->nodes[*level]->start,
1453 while (*level >= 0) {
1454 WARN_ON(*level < 0);
1455 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1456 cur = path->nodes[*level];
1458 if (btrfs_header_level(cur) != *level)
1461 if (path->slots[*level] >= btrfs_header_nritems(cur))
1464 ret = process_one_leaf(root, cur, wc);
1469 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1470 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1471 blocksize = btrfs_level_size(root, *level - 1);
1472 ret = btrfs_lookup_extent_info(NULL, root, bytenr, *level - 1,
1478 ret = enter_shared_node(root, bytenr, refs,
1481 path->slots[*level]++;
1486 next = btrfs_find_tree_block(root, bytenr, blocksize);
1487 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
1488 free_extent_buffer(next);
1489 reada_walk_down(root, cur, path->slots[*level]);
1490 next = read_tree_block(root, bytenr, blocksize,
1493 struct btrfs_key node_key;
1495 btrfs_node_key_to_cpu(path->nodes[*level],
1497 path->slots[*level]);
1498 btrfs_add_corrupt_extent_record(root->fs_info,
1500 path->nodes[*level]->start,
1501 root->leafsize, *level);
1507 ret = check_child_node(root, cur, path->slots[*level], next);
1513 if (btrfs_is_leaf(next))
1514 status = btrfs_check_leaf(root, NULL, next);
1516 status = btrfs_check_node(root, NULL, next);
1517 if (status != BTRFS_TREE_BLOCK_CLEAN) {
1518 free_extent_buffer(next);
1523 *level = *level - 1;
1524 free_extent_buffer(path->nodes[*level]);
1525 path->nodes[*level] = next;
1526 path->slots[*level] = 0;
1529 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
1533 static int walk_up_tree(struct btrfs_root *root, struct btrfs_path *path,
1534 struct walk_control *wc, int *level)
1537 struct extent_buffer *leaf;
1539 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1540 leaf = path->nodes[i];
1541 if (path->slots[i] + 1 < btrfs_header_nritems(leaf)) {
1546 free_extent_buffer(path->nodes[*level]);
1547 path->nodes[*level] = NULL;
1548 BUG_ON(*level > wc->active_node);
1549 if (*level == wc->active_node)
1550 leave_shared_node(root, wc, *level);
1557 static int check_root_dir(struct inode_record *rec)
1559 struct inode_backref *backref;
1562 if (!rec->found_inode_item || rec->errors)
1564 if (rec->nlink != 1 || rec->found_link != 0)
1566 if (list_empty(&rec->backrefs))
1568 backref = list_entry(rec->backrefs.next, struct inode_backref, list);
1569 if (!backref->found_inode_ref)
1571 if (backref->index != 0 || backref->namelen != 2 ||
1572 memcmp(backref->name, "..", 2))
1574 if (backref->found_dir_index || backref->found_dir_item)
1581 static int repair_inode_isize(struct btrfs_trans_handle *trans,
1582 struct btrfs_root *root, struct btrfs_path *path,
1583 struct inode_record *rec)
1585 struct btrfs_inode_item *ei;
1586 struct btrfs_key key;
1589 key.objectid = rec->ino;
1590 key.type = BTRFS_INODE_ITEM_KEY;
1591 key.offset = (u64)-1;
1593 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1597 if (!path->slots[0]) {
1604 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1605 if (key.objectid != rec->ino) {
1610 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1611 struct btrfs_inode_item);
1612 btrfs_set_inode_size(path->nodes[0], ei, rec->found_size);
1613 btrfs_mark_buffer_dirty(path->nodes[0]);
1614 rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
1615 printf("reset isize for dir %Lu root %Lu\n", rec->ino,
1616 root->root_key.objectid);
1618 btrfs_release_path(path);
1622 static int repair_inode_orphan_item(struct btrfs_trans_handle *trans,
1623 struct btrfs_root *root,
1624 struct btrfs_path *path,
1625 struct inode_record *rec)
1629 ret = btrfs_add_orphan_item(trans, root, path, rec->ino);
1630 btrfs_release_path(path);
1632 rec->errors &= ~I_ERR_NO_ORPHAN_ITEM;
1636 static int add_missing_dir_index(struct btrfs_root *root,
1637 struct cache_tree *inode_cache,
1638 struct inode_record *rec,
1639 struct inode_backref *backref)
1641 struct btrfs_path *path;
1642 struct btrfs_trans_handle *trans;
1643 struct btrfs_dir_item *dir_item;
1644 struct extent_buffer *leaf;
1645 struct btrfs_key key;
1646 struct btrfs_disk_key disk_key;
1647 struct inode_record *dir_rec;
1648 unsigned long name_ptr;
1649 u32 data_size = sizeof(*dir_item) + backref->namelen;
1652 path = btrfs_alloc_path();
1656 trans = btrfs_start_transaction(root, 1);
1657 if (IS_ERR(trans)) {
1658 btrfs_free_path(path);
1659 return PTR_ERR(trans);
1662 fprintf(stderr, "repairing missing dir index item for inode %llu\n",
1663 (unsigned long long)rec->ino);
1664 key.objectid = backref->dir;
1665 key.type = BTRFS_DIR_INDEX_KEY;
1666 key.offset = backref->index;
1668 ret = btrfs_insert_empty_item(trans, root, path, &key, data_size);
1671 leaf = path->nodes[0];
1672 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
1674 disk_key.objectid = cpu_to_le64(rec->ino);
1675 disk_key.type = BTRFS_INODE_ITEM_KEY;
1676 disk_key.offset = 0;
1678 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
1679 btrfs_set_dir_type(leaf, dir_item, imode_to_type(rec->imode));
1680 btrfs_set_dir_data_len(leaf, dir_item, 0);
1681 btrfs_set_dir_name_len(leaf, dir_item, backref->namelen);
1682 name_ptr = (unsigned long)(dir_item + 1);
1683 write_extent_buffer(leaf, backref->name, name_ptr, backref->namelen);
1684 btrfs_mark_buffer_dirty(leaf);
1685 btrfs_free_path(path);
1686 btrfs_commit_transaction(trans, root);
1688 backref->found_dir_index = 1;
1689 dir_rec = get_inode_rec(inode_cache, backref->dir, 0);
1692 dir_rec->found_size += backref->namelen;
1693 if (dir_rec->found_size == dir_rec->isize &&
1694 (dir_rec->errors & I_ERR_DIR_ISIZE_WRONG))
1695 dir_rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
1696 if (dir_rec->found_size != dir_rec->isize)
1697 dir_rec->errors |= I_ERR_DIR_ISIZE_WRONG;
1702 static int delete_dir_index(struct btrfs_root *root,
1703 struct cache_tree *inode_cache,
1704 struct inode_record *rec,
1705 struct inode_backref *backref)
1707 struct btrfs_trans_handle *trans;
1708 struct btrfs_dir_item *di;
1709 struct btrfs_path *path;
1712 path = btrfs_alloc_path();
1716 trans = btrfs_start_transaction(root, 1);
1717 if (IS_ERR(trans)) {
1718 btrfs_free_path(path);
1719 return PTR_ERR(trans);
1723 fprintf(stderr, "Deleting bad dir index [%llu,%u,%llu] root %llu\n",
1724 (unsigned long long)backref->dir,
1725 BTRFS_DIR_INDEX_KEY, (unsigned long long)backref->index,
1726 (unsigned long long)root->objectid);
1728 di = btrfs_lookup_dir_index(trans, root, path, backref->dir,
1729 backref->name, backref->namelen,
1730 backref->index, -1);
1733 btrfs_free_path(path);
1734 btrfs_commit_transaction(trans, root);
1741 ret = btrfs_del_item(trans, root, path);
1743 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1745 btrfs_free_path(path);
1746 btrfs_commit_transaction(trans, root);
1750 static int create_inode_item(struct btrfs_root *root,
1751 struct inode_record *rec,
1752 struct inode_backref *backref, int root_dir)
1754 struct btrfs_trans_handle *trans;
1755 struct btrfs_inode_item inode_item;
1756 time_t now = time(NULL);
1759 trans = btrfs_start_transaction(root, 1);
1760 if (IS_ERR(trans)) {
1761 ret = PTR_ERR(trans);
1765 fprintf(stderr, "root %llu inode %llu recreating inode item, this may "
1766 "be incomplete, please check permissions and content after "
1767 "the fsck completes.\n", (unsigned long long)root->objectid,
1768 (unsigned long long)rec->ino);
1770 memset(&inode_item, 0, sizeof(inode_item));
1771 btrfs_set_stack_inode_generation(&inode_item, trans->transid);
1773 btrfs_set_stack_inode_nlink(&inode_item, 1);
1775 btrfs_set_stack_inode_nlink(&inode_item, rec->found_link);
1776 btrfs_set_stack_inode_nbytes(&inode_item, rec->found_size);
1777 if (rec->found_dir_item) {
1778 if (rec->found_file_extent)
1779 fprintf(stderr, "root %llu inode %llu has both a dir "
1780 "item and extents, unsure if it is a dir or a "
1781 "regular file so setting it as a directory\n",
1782 (unsigned long long)root->objectid,
1783 (unsigned long long)rec->ino);
1784 btrfs_set_stack_inode_mode(&inode_item, S_IFDIR | 0755);
1785 btrfs_set_stack_inode_size(&inode_item, rec->found_size);
1786 } else if (!rec->found_dir_item) {
1787 btrfs_set_stack_inode_size(&inode_item, rec->extent_end);
1788 btrfs_set_stack_inode_mode(&inode_item, S_IFREG | 0755);
1790 btrfs_set_stack_timespec_sec(&inode_item.atime, now);
1791 btrfs_set_stack_timespec_nsec(&inode_item.atime, 0);
1792 btrfs_set_stack_timespec_sec(&inode_item.ctime, now);
1793 btrfs_set_stack_timespec_nsec(&inode_item.ctime, 0);
1794 btrfs_set_stack_timespec_sec(&inode_item.mtime, now);
1795 btrfs_set_stack_timespec_nsec(&inode_item.mtime, 0);
1796 btrfs_set_stack_timespec_sec(&inode_item.otime, 0);
1797 btrfs_set_stack_timespec_nsec(&inode_item.otime, 0);
1799 ret = btrfs_insert_inode(trans, root, rec->ino, &inode_item);
1801 btrfs_commit_transaction(trans, root);
1805 static int repair_inode_backrefs(struct btrfs_root *root,
1806 struct inode_record *rec,
1807 struct cache_tree *inode_cache,
1810 struct inode_backref *tmp, *backref;
1811 u64 root_dirid = btrfs_root_dirid(&root->root_item);
1815 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
1816 if (!delete && rec->ino == root_dirid) {
1817 if (!rec->found_inode_item) {
1818 ret = create_inode_item(root, rec, backref, 1);
1825 /* Index 0 for root dir's are special, don't mess with it */
1826 if (rec->ino == root_dirid && backref->index == 0)
1830 ((backref->found_dir_index && !backref->found_inode_ref) ||
1831 (backref->found_dir_index && backref->found_inode_ref &&
1832 (backref->errors & REF_ERR_INDEX_UNMATCH)))) {
1833 ret = delete_dir_index(root, inode_cache, rec, backref);
1837 list_del(&backref->list);
1841 if (!delete && !backref->found_dir_index &&
1842 backref->found_dir_item && backref->found_inode_ref) {
1843 ret = add_missing_dir_index(root, inode_cache, rec,
1848 if (backref->found_dir_item &&
1849 backref->found_dir_index &&
1850 backref->found_dir_index) {
1851 if (!backref->errors &&
1852 backref->found_inode_ref) {
1853 list_del(&backref->list);
1859 if (!delete && (!backref->found_dir_index &&
1860 !backref->found_dir_item &&
1861 backref->found_inode_ref)) {
1862 struct btrfs_trans_handle *trans;
1863 struct btrfs_key location;
1865 ret = check_dir_conflict(root, backref->name,
1871 * let nlink fixing routine to handle it,
1872 * which can do it better.
1877 location.objectid = rec->ino;
1878 location.type = BTRFS_INODE_ITEM_KEY;
1879 location.offset = 0;
1881 trans = btrfs_start_transaction(root, 1);
1882 if (IS_ERR(trans)) {
1883 ret = PTR_ERR(trans);
1886 fprintf(stderr, "adding missing dir index/item pair "
1888 (unsigned long long)rec->ino);
1889 ret = btrfs_insert_dir_item(trans, root, backref->name,
1891 backref->dir, &location,
1892 imode_to_type(rec->imode),
1895 btrfs_commit_transaction(trans, root);
1899 if (!delete && (backref->found_inode_ref &&
1900 backref->found_dir_index &&
1901 backref->found_dir_item &&
1902 !(backref->errors & REF_ERR_INDEX_UNMATCH) &&
1903 !rec->found_inode_item)) {
1904 ret = create_inode_item(root, rec, backref, 0);
1911 return ret ? ret : repaired;
1915 * To determine the file type for nlink/inode_item repair
1917 * Return 0 if file type is found and BTRFS_FT_* is stored into type.
1918 * Return -ENOENT if file type is not found.
1920 static int find_file_type(struct inode_record *rec, u8 *type)
1922 struct inode_backref *backref;
1924 /* For inode item recovered case */
1925 if (rec->found_inode_item) {
1926 *type = imode_to_type(rec->imode);
1930 list_for_each_entry(backref, &rec->backrefs, list) {
1931 if (backref->found_dir_index || backref->found_dir_item) {
1932 *type = backref->filetype;
1940 * To determine the file name for nlink repair
1942 * Return 0 if file name is found, set name and namelen.
1943 * Return -ENOENT if file name is not found.
1945 static int find_file_name(struct inode_record *rec,
1946 char *name, int *namelen)
1948 struct inode_backref *backref;
1950 list_for_each_entry(backref, &rec->backrefs, list) {
1951 if (backref->found_dir_index || backref->found_dir_item ||
1952 backref->found_inode_ref) {
1953 memcpy(name, backref->name, backref->namelen);
1954 *namelen = backref->namelen;
1961 /* Reset the nlink of the inode to the correct one */
1962 static int reset_nlink(struct btrfs_trans_handle *trans,
1963 struct btrfs_root *root,
1964 struct btrfs_path *path,
1965 struct inode_record *rec)
1967 struct inode_backref *backref;
1968 struct inode_backref *tmp;
1969 struct btrfs_key key;
1970 struct btrfs_inode_item *inode_item;
1973 /* We don't believe this either, reset it and iterate backref */
1974 rec->found_link = 0;
1976 /* Remove all backref including the valid ones */
1977 list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
1978 ret = btrfs_unlink(trans, root, rec->ino, backref->dir,
1979 backref->index, backref->name,
1980 backref->namelen, 0);
1984 /* remove invalid backref, so it won't be added back */
1985 if (!(backref->found_dir_index &&
1986 backref->found_dir_item &&
1987 backref->found_inode_ref)) {
1988 list_del(&backref->list);
1995 /* Set nlink to 0 */
1996 key.objectid = rec->ino;
1997 key.type = BTRFS_INODE_ITEM_KEY;
1999 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2006 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2007 struct btrfs_inode_item);
2008 btrfs_set_inode_nlink(path->nodes[0], inode_item, 0);
2009 btrfs_mark_buffer_dirty(path->nodes[0]);
2010 btrfs_release_path(path);
2013 * Add back valid inode_ref/dir_item/dir_index,
2014 * add_link() will handle the nlink inc, so new nlink must be correct
2016 list_for_each_entry(backref, &rec->backrefs, list) {
2017 ret = btrfs_add_link(trans, root, rec->ino, backref->dir,
2018 backref->name, backref->namelen,
2019 backref->ref_type, &backref->index, 1);
2024 btrfs_release_path(path);
2028 static int repair_inode_nlinks(struct btrfs_trans_handle *trans,
2029 struct btrfs_root *root,
2030 struct btrfs_path *path,
2031 struct inode_record *rec)
2033 char *dir_name = "lost+found";
2034 char namebuf[BTRFS_NAME_LEN] = {0};
2039 int name_recovered = 0;
2040 int type_recovered = 0;
2044 * Get file name and type first before these invalid inode ref
2045 * are deleted by remove_all_invalid_backref()
2047 name_recovered = !find_file_name(rec, namebuf, &namelen);
2048 type_recovered = !find_file_type(rec, &type);
2050 if (!name_recovered) {
2051 printf("Can't get file name for inode %llu, using '%llu' as fallback\n",
2052 rec->ino, rec->ino);
2053 namelen = count_digits(rec->ino);
2054 sprintf(namebuf, "%llu", rec->ino);
2057 if (!type_recovered) {
2058 printf("Can't get file type for inode %llu, using FILE as fallback\n",
2060 type = BTRFS_FT_REG_FILE;
2064 ret = reset_nlink(trans, root, path, rec);
2067 "Failed to reset nlink for inode %llu: %s\n",
2068 rec->ino, strerror(-ret));
2072 if (rec->found_link == 0) {
2073 lost_found_ino = root->highest_inode;
2074 if (lost_found_ino >= BTRFS_LAST_FREE_OBJECTID) {
2079 ret = btrfs_mkdir(trans, root, dir_name, strlen(dir_name),
2080 BTRFS_FIRST_FREE_OBJECTID, &lost_found_ino,
2083 fprintf(stderr, "Failed to create '%s' dir: %s",
2084 dir_name, strerror(-ret));
2087 ret = btrfs_add_link(trans, root, rec->ino, lost_found_ino,
2088 namebuf, namelen, type, NULL, 1);
2089 if (ret == -EEXIST) {
2091 * Conflicting file name, add ".INO" as suffix * +1 for '.'
2093 if (namelen + count_digits(rec->ino) + 1 >
2098 snprintf(namebuf + namelen, BTRFS_NAME_LEN - namelen,
2100 namelen += count_digits(rec->ino) + 1;
2101 ret = btrfs_add_link(trans, root, rec->ino,
2102 lost_found_ino, namebuf,
2103 namelen, type, NULL, 1);
2107 "Failed to link the inode %llu to %s dir: %s",
2108 rec->ino, dir_name, strerror(-ret));
2112 * Just increase the found_link, don't actually add the
2113 * backref. This will make things easier and this inode
2114 * record will be freed after the repair is done.
2115 * So fsck will not report problem about this inode.
2118 printf("Moving file '%.*s' to '%s' dir since it has no valid backref\n",
2119 namelen, namebuf, dir_name);
2121 rec->errors &= ~I_ERR_LINK_COUNT_WRONG;
2122 printf("Fixed the nlink of inode %llu\n", rec->ino);
2124 btrfs_release_path(path);
2129 * Check if there is any normal(reg or prealloc) file extent for given
2131 * This is used to determine the file type when neither its dir_index/item or
2132 * inode_item exists.
2134 * This will *NOT* report error, if any error happens, just consider it does
2135 * not have any normal file extent.
2137 static int find_normal_file_extent(struct btrfs_root *root, u64 ino)
2139 struct btrfs_path *path;
2140 struct btrfs_key key;
2141 struct btrfs_key found_key;
2142 struct btrfs_file_extent_item *fi;
2146 path = btrfs_alloc_path();
2150 key.type = BTRFS_EXTENT_DATA_KEY;
2153 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2158 if (ret && path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2159 ret = btrfs_next_leaf(root, path);
2166 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2168 if (found_key.objectid != ino ||
2169 found_key.type != BTRFS_EXTENT_DATA_KEY)
2171 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
2172 struct btrfs_file_extent_item);
2173 type = btrfs_file_extent_type(path->nodes[0], fi);
2174 if (type != BTRFS_FILE_EXTENT_INLINE) {
2180 btrfs_free_path(path);
2184 static u32 btrfs_type_to_imode(u8 type)
2186 static u32 imode_by_btrfs_type[] = {
2187 [BTRFS_FT_REG_FILE] = S_IFREG,
2188 [BTRFS_FT_DIR] = S_IFDIR,
2189 [BTRFS_FT_CHRDEV] = S_IFCHR,
2190 [BTRFS_FT_BLKDEV] = S_IFBLK,
2191 [BTRFS_FT_FIFO] = S_IFIFO,
2192 [BTRFS_FT_SOCK] = S_IFSOCK,
2193 [BTRFS_FT_SYMLINK] = S_IFLNK,
2196 return imode_by_btrfs_type[(type)];
2199 static int repair_inode_no_item(struct btrfs_trans_handle *trans,
2200 struct btrfs_root *root,
2201 struct btrfs_path *path,
2202 struct inode_record *rec)
2206 int type_recovered = 0;
2211 * 1. salvage data from existing file extent and
2212 * punch hole to keep fi ext consistent.
2213 * 2. salvage data from extent tree
2215 printf("Trying to rebuild inode:%llu\n", rec->ino);
2217 type_recovered = !find_file_type(rec, &filetype);
2220 * Try to determine inode type if type not found.
2222 * For found regular file extent, it must be FILE.
2223 * For found dir_item/index, it must be DIR.
2225 * For undetermined one, use FILE as fallback.
2228 * 1. If found extent belong to it in extent tree, it must be FILE
2229 * Need extra hook in extent tree scan.
2230 * 2. If found backref(inode_index/item is already handled) to it,
2232 * Need new inode-inode ref structure to allow search for that.
2234 if (!type_recovered) {
2235 if (rec->found_file_extent &&
2236 find_normal_file_extent(root, rec->ino)) {
2238 filetype = BTRFS_FT_REG_FILE;
2239 } else if (rec->found_dir_item) {
2241 filetype = BTRFS_FT_DIR;
2243 printf("Can't determint the filetype for inode %llu, assume it is a normal file\n",
2246 filetype = BTRFS_FT_REG_FILE;
2250 ret = btrfs_new_inode(trans, root, rec->ino,
2251 mode | btrfs_type_to_imode(filetype));
2256 * Here inode rebuild is done, we only rebuild the inode item,
2257 * don't repair the nlink(like move to lost+found).
2258 * That is the job of nlink repair.
2260 * We just fill the record and return
2262 rec->found_dir_item = 1;
2263 rec->imode = mode | btrfs_type_to_imode(filetype);
2265 rec->errors &= ~I_ERR_NO_INODE_ITEM;
2266 /* Ensure the inode_nlinks repair function will be called */
2267 rec->errors |= I_ERR_LINK_COUNT_WRONG;
2272 static int try_repair_inode(struct btrfs_root *root, struct inode_record *rec)
2274 struct btrfs_trans_handle *trans;
2275 struct btrfs_path *path;
2278 if (!(rec->errors & (I_ERR_DIR_ISIZE_WRONG |
2279 I_ERR_NO_ORPHAN_ITEM |
2280 I_ERR_LINK_COUNT_WRONG |
2281 I_ERR_NO_INODE_ITEM)))
2284 path = btrfs_alloc_path();
2289 * For nlink repair, it may create a dir and add link, so
2290 * 2 for parent(256)'s dir_index and dir_item
2291 * 2 for lost+found dir's inode_item and inode_ref
2292 * 1 for the new inode_ref of the file
2293 * 2 for lost+found dir's dir_index and dir_item for the file
2295 trans = btrfs_start_transaction(root, 7);
2296 if (IS_ERR(trans)) {
2297 btrfs_free_path(path);
2298 return PTR_ERR(trans);
2301 if (rec->errors & I_ERR_NO_INODE_ITEM)
2302 ret = repair_inode_no_item(trans, root, path, rec);
2303 if (!ret && rec->errors & I_ERR_DIR_ISIZE_WRONG)
2304 ret = repair_inode_isize(trans, root, path, rec);
2305 if (!ret && rec->errors & I_ERR_NO_ORPHAN_ITEM)
2306 ret = repair_inode_orphan_item(trans, root, path, rec);
2307 if (!ret && rec->errors & I_ERR_LINK_COUNT_WRONG)
2308 ret = repair_inode_nlinks(trans, root, path, rec);
2309 btrfs_commit_transaction(trans, root);
2310 btrfs_free_path(path);
2314 static int check_inode_recs(struct btrfs_root *root,
2315 struct cache_tree *inode_cache)
2317 struct cache_extent *cache;
2318 struct ptr_node *node;
2319 struct inode_record *rec;
2320 struct inode_backref *backref;
2325 u64 root_dirid = btrfs_root_dirid(&root->root_item);
2327 if (btrfs_root_refs(&root->root_item) == 0) {
2328 if (!cache_tree_empty(inode_cache))
2329 fprintf(stderr, "warning line %d\n", __LINE__);
2334 * We need to record the highest inode number for later 'lost+found'
2336 * We must select a ino not used/refered by any existing inode, or
2337 * 'lost+found' ino may be a missing ino in a corrupted leaf,
2338 * this may cause 'lost+found' dir has wrong nlinks.
2340 cache = last_cache_extent(inode_cache);
2342 node = container_of(cache, struct ptr_node, cache);
2344 if (rec->ino > root->highest_inode)
2345 root->highest_inode = rec->ino;
2349 * We need to repair backrefs first because we could change some of the
2350 * errors in the inode recs.
2352 * We also need to go through and delete invalid backrefs first and then
2353 * add the correct ones second. We do this because we may get EEXIST
2354 * when adding back the correct index because we hadn't yet deleted the
2357 * For example, if we were missing a dir index then the directories
2358 * isize would be wrong, so if we fixed the isize to what we thought it
2359 * would be and then fixed the backref we'd still have a invalid fs, so
2360 * we need to add back the dir index and then check to see if the isize
2365 if (stage == 3 && !err)
2368 cache = search_cache_extent(inode_cache, 0);
2369 while (repair && cache) {
2370 node = container_of(cache, struct ptr_node, cache);
2372 cache = next_cache_extent(cache);
2374 /* Need to free everything up and rescan */
2376 remove_cache_extent(inode_cache, &node->cache);
2378 free_inode_rec(rec);
2382 if (list_empty(&rec->backrefs))
2385 ret = repair_inode_backrefs(root, rec, inode_cache,
2399 rec = get_inode_rec(inode_cache, root_dirid, 0);
2401 ret = check_root_dir(rec);
2403 fprintf(stderr, "root %llu root dir %llu error\n",
2404 (unsigned long long)root->root_key.objectid,
2405 (unsigned long long)root_dirid);
2406 print_inode_error(root, rec);
2411 struct btrfs_trans_handle *trans;
2413 trans = btrfs_start_transaction(root, 1);
2414 if (IS_ERR(trans)) {
2415 err = PTR_ERR(trans);
2420 "root %llu missing its root dir, recreating\n",
2421 (unsigned long long)root->objectid);
2423 ret = btrfs_make_root_dir(trans, root, root_dirid);
2426 btrfs_commit_transaction(trans, root);
2430 fprintf(stderr, "root %llu root dir %llu not found\n",
2431 (unsigned long long)root->root_key.objectid,
2432 (unsigned long long)root_dirid);
2436 cache = search_cache_extent(inode_cache, 0);
2439 node = container_of(cache, struct ptr_node, cache);
2441 remove_cache_extent(inode_cache, &node->cache);
2443 if (rec->ino == root_dirid ||
2444 rec->ino == BTRFS_ORPHAN_OBJECTID) {
2445 free_inode_rec(rec);
2449 if (rec->errors & I_ERR_NO_ORPHAN_ITEM) {
2450 ret = check_orphan_item(root, rec->ino);
2452 rec->errors &= ~I_ERR_NO_ORPHAN_ITEM;
2453 if (can_free_inode_rec(rec)) {
2454 free_inode_rec(rec);
2459 if (!rec->found_inode_item)
2460 rec->errors |= I_ERR_NO_INODE_ITEM;
2461 if (rec->found_link != rec->nlink)
2462 rec->errors |= I_ERR_LINK_COUNT_WRONG;
2464 ret = try_repair_inode(root, rec);
2465 if (ret == 0 && can_free_inode_rec(rec)) {
2466 free_inode_rec(rec);
2472 if (!(repair && ret == 0))
2474 print_inode_error(root, rec);
2475 list_for_each_entry(backref, &rec->backrefs, list) {
2476 if (!backref->found_dir_item)
2477 backref->errors |= REF_ERR_NO_DIR_ITEM;
2478 if (!backref->found_dir_index)
2479 backref->errors |= REF_ERR_NO_DIR_INDEX;
2480 if (!backref->found_inode_ref)
2481 backref->errors |= REF_ERR_NO_INODE_REF;
2482 fprintf(stderr, "\tunresolved ref dir %llu index %llu"
2483 " namelen %u name %s filetype %d errors %x",
2484 (unsigned long long)backref->dir,
2485 (unsigned long long)backref->index,
2486 backref->namelen, backref->name,
2487 backref->filetype, backref->errors);
2488 print_ref_error(backref->errors);
2490 free_inode_rec(rec);
2492 return (error > 0) ? -1 : 0;
2495 static struct root_record *get_root_rec(struct cache_tree *root_cache,
2498 struct cache_extent *cache;
2499 struct root_record *rec = NULL;
2502 cache = lookup_cache_extent(root_cache, objectid, 1);
2504 rec = container_of(cache, struct root_record, cache);
2506 rec = calloc(1, sizeof(*rec));
2507 rec->objectid = objectid;
2508 INIT_LIST_HEAD(&rec->backrefs);
2509 rec->cache.start = objectid;
2510 rec->cache.size = 1;
2512 ret = insert_cache_extent(root_cache, &rec->cache);
2518 static struct root_backref *get_root_backref(struct root_record *rec,
2519 u64 ref_root, u64 dir, u64 index,
2520 const char *name, int namelen)
2522 struct root_backref *backref;
2524 list_for_each_entry(backref, &rec->backrefs, list) {
2525 if (backref->ref_root != ref_root || backref->dir != dir ||
2526 backref->namelen != namelen)
2528 if (memcmp(name, backref->name, namelen))
2533 backref = malloc(sizeof(*backref) + namelen + 1);
2534 memset(backref, 0, sizeof(*backref));
2535 backref->ref_root = ref_root;
2537 backref->index = index;
2538 backref->namelen = namelen;
2539 memcpy(backref->name, name, namelen);
2540 backref->name[namelen] = '\0';
2541 list_add_tail(&backref->list, &rec->backrefs);
2545 static void free_root_record(struct cache_extent *cache)
2547 struct root_record *rec;
2548 struct root_backref *backref;
2550 rec = container_of(cache, struct root_record, cache);
2551 while (!list_empty(&rec->backrefs)) {
2552 backref = list_entry(rec->backrefs.next,
2553 struct root_backref, list);
2554 list_del(&backref->list);
2561 FREE_EXTENT_CACHE_BASED_TREE(root_recs, free_root_record);
2563 static int add_root_backref(struct cache_tree *root_cache,
2564 u64 root_id, u64 ref_root, u64 dir, u64 index,
2565 const char *name, int namelen,
2566 int item_type, int errors)
2568 struct root_record *rec;
2569 struct root_backref *backref;
2571 rec = get_root_rec(root_cache, root_id);
2572 backref = get_root_backref(rec, ref_root, dir, index, name, namelen);
2574 backref->errors |= errors;
2576 if (item_type != BTRFS_DIR_ITEM_KEY) {
2577 if (backref->found_dir_index || backref->found_back_ref ||
2578 backref->found_forward_ref) {
2579 if (backref->index != index)
2580 backref->errors |= REF_ERR_INDEX_UNMATCH;
2582 backref->index = index;
2586 if (item_type == BTRFS_DIR_ITEM_KEY) {
2587 if (backref->found_forward_ref)
2589 backref->found_dir_item = 1;
2590 } else if (item_type == BTRFS_DIR_INDEX_KEY) {
2591 backref->found_dir_index = 1;
2592 } else if (item_type == BTRFS_ROOT_REF_KEY) {
2593 if (backref->found_forward_ref)
2594 backref->errors |= REF_ERR_DUP_ROOT_REF;
2595 else if (backref->found_dir_item)
2597 backref->found_forward_ref = 1;
2598 } else if (item_type == BTRFS_ROOT_BACKREF_KEY) {
2599 if (backref->found_back_ref)
2600 backref->errors |= REF_ERR_DUP_ROOT_BACKREF;
2601 backref->found_back_ref = 1;
2606 if (backref->found_forward_ref && backref->found_dir_item)
2607 backref->reachable = 1;
2611 static int merge_root_recs(struct btrfs_root *root,
2612 struct cache_tree *src_cache,
2613 struct cache_tree *dst_cache)
2615 struct cache_extent *cache;
2616 struct ptr_node *node;
2617 struct inode_record *rec;
2618 struct inode_backref *backref;
2621 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2622 free_inode_recs_tree(src_cache);
2627 cache = search_cache_extent(src_cache, 0);
2630 node = container_of(cache, struct ptr_node, cache);
2632 remove_cache_extent(src_cache, &node->cache);
2635 ret = is_child_root(root, root->objectid, rec->ino);
2641 list_for_each_entry(backref, &rec->backrefs, list) {
2642 BUG_ON(backref->found_inode_ref);
2643 if (backref->found_dir_item)
2644 add_root_backref(dst_cache, rec->ino,
2645 root->root_key.objectid, backref->dir,
2646 backref->index, backref->name,
2647 backref->namelen, BTRFS_DIR_ITEM_KEY,
2649 if (backref->found_dir_index)
2650 add_root_backref(dst_cache, rec->ino,
2651 root->root_key.objectid, backref->dir,
2652 backref->index, backref->name,
2653 backref->namelen, BTRFS_DIR_INDEX_KEY,
2657 free_inode_rec(rec);
2664 static int check_root_refs(struct btrfs_root *root,
2665 struct cache_tree *root_cache)
2667 struct root_record *rec;
2668 struct root_record *ref_root;
2669 struct root_backref *backref;
2670 struct cache_extent *cache;
2676 rec = get_root_rec(root_cache, BTRFS_FS_TREE_OBJECTID);
2679 /* fixme: this can not detect circular references */
2682 cache = search_cache_extent(root_cache, 0);
2686 rec = container_of(cache, struct root_record, cache);
2687 cache = next_cache_extent(cache);
2689 if (rec->found_ref == 0)
2692 list_for_each_entry(backref, &rec->backrefs, list) {
2693 if (!backref->reachable)
2696 ref_root = get_root_rec(root_cache,
2698 if (ref_root->found_ref > 0)
2701 backref->reachable = 0;
2703 if (rec->found_ref == 0)
2709 cache = search_cache_extent(root_cache, 0);
2713 rec = container_of(cache, struct root_record, cache);
2714 cache = next_cache_extent(cache);
2716 if (rec->found_ref == 0 &&
2717 rec->objectid >= BTRFS_FIRST_FREE_OBJECTID &&
2718 rec->objectid <= BTRFS_LAST_FREE_OBJECTID) {
2719 ret = check_orphan_item(root->fs_info->tree_root,
2725 * If we don't have a root item then we likely just have
2726 * a dir item in a snapshot for this root but no actual
2727 * ref key or anything so it's meaningless.
2729 if (!rec->found_root_item)
2732 fprintf(stderr, "fs tree %llu not referenced\n",
2733 (unsigned long long)rec->objectid);
2737 if (rec->found_ref > 0 && !rec->found_root_item)
2739 list_for_each_entry(backref, &rec->backrefs, list) {
2740 if (!backref->found_dir_item)
2741 backref->errors |= REF_ERR_NO_DIR_ITEM;
2742 if (!backref->found_dir_index)
2743 backref->errors |= REF_ERR_NO_DIR_INDEX;
2744 if (!backref->found_back_ref)
2745 backref->errors |= REF_ERR_NO_ROOT_BACKREF;
2746 if (!backref->found_forward_ref)
2747 backref->errors |= REF_ERR_NO_ROOT_REF;
2748 if (backref->reachable && backref->errors)
2755 fprintf(stderr, "fs tree %llu refs %u %s\n",
2756 (unsigned long long)rec->objectid, rec->found_ref,
2757 rec->found_root_item ? "" : "not found");
2759 list_for_each_entry(backref, &rec->backrefs, list) {
2760 if (!backref->reachable)
2762 if (!backref->errors && rec->found_root_item)
2764 fprintf(stderr, "\tunresolved ref root %llu dir %llu"
2765 " index %llu namelen %u name %s errors %x\n",
2766 (unsigned long long)backref->ref_root,
2767 (unsigned long long)backref->dir,
2768 (unsigned long long)backref->index,
2769 backref->namelen, backref->name,
2771 print_ref_error(backref->errors);
2774 return errors > 0 ? 1 : 0;
2777 static int process_root_ref(struct extent_buffer *eb, int slot,
2778 struct btrfs_key *key,
2779 struct cache_tree *root_cache)
2785 struct btrfs_root_ref *ref;
2786 char namebuf[BTRFS_NAME_LEN];
2789 ref = btrfs_item_ptr(eb, slot, struct btrfs_root_ref);
2791 dirid = btrfs_root_ref_dirid(eb, ref);
2792 index = btrfs_root_ref_sequence(eb, ref);
2793 name_len = btrfs_root_ref_name_len(eb, ref);
2795 if (name_len <= BTRFS_NAME_LEN) {
2799 len = BTRFS_NAME_LEN;
2800 error = REF_ERR_NAME_TOO_LONG;
2802 read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
2804 if (key->type == BTRFS_ROOT_REF_KEY) {
2805 add_root_backref(root_cache, key->offset, key->objectid, dirid,
2806 index, namebuf, len, key->type, error);
2808 add_root_backref(root_cache, key->objectid, key->offset, dirid,
2809 index, namebuf, len, key->type, error);
2814 static void free_corrupt_block(struct cache_extent *cache)
2816 struct btrfs_corrupt_block *corrupt;
2818 corrupt = container_of(cache, struct btrfs_corrupt_block, cache);
2822 FREE_EXTENT_CACHE_BASED_TREE(corrupt_blocks, free_corrupt_block);
2825 * Repair the btree of the given root.
2827 * The fix is to remove the node key in corrupt_blocks cache_tree.
2828 * and rebalance the tree.
2829 * After the fix, the btree should be writeable.
2831 static int repair_btree(struct btrfs_root *root,
2832 struct cache_tree *corrupt_blocks)
2834 struct btrfs_trans_handle *trans;
2835 struct btrfs_path *path;
2836 struct btrfs_corrupt_block *corrupt;
2837 struct cache_extent *cache;
2838 struct btrfs_key key;
2843 if (cache_tree_empty(corrupt_blocks))
2846 path = btrfs_alloc_path();
2850 trans = btrfs_start_transaction(root, 1);
2851 if (IS_ERR(trans)) {
2852 ret = PTR_ERR(trans);
2853 fprintf(stderr, "Error starting transaction: %s\n",
2857 cache = first_cache_extent(corrupt_blocks);
2859 corrupt = container_of(cache, struct btrfs_corrupt_block,
2861 level = corrupt->level;
2862 path->lowest_level = level;
2863 key.objectid = corrupt->key.objectid;
2864 key.type = corrupt->key.type;
2865 key.offset = corrupt->key.offset;
2868 * Here we don't want to do any tree balance, since it may
2869 * cause a balance with corrupted brother leaf/node,
2870 * so ins_len set to 0 here.
2871 * Balance will be done after all corrupt node/leaf is deleted.
2873 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2876 offset = btrfs_node_blockptr(path->nodes[level],
2877 path->slots[level]);
2879 /* Remove the ptr */
2880 ret = btrfs_del_ptr(trans, root, path, level,
2881 path->slots[level]);
2885 * Remove the corresponding extent
2886 * return value is not concerned.
2888 btrfs_release_path(path);
2889 ret = btrfs_free_extent(trans, root, offset, root->nodesize,
2890 0, root->root_key.objectid,
2892 cache = next_cache_extent(cache);
2895 /* Balance the btree using btrfs_search_slot() */
2896 cache = first_cache_extent(corrupt_blocks);
2898 corrupt = container_of(cache, struct btrfs_corrupt_block,
2900 memcpy(&key, &corrupt->key, sizeof(key));
2901 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2904 /* return will always >0 since it won't find the item */
2906 btrfs_release_path(path);
2907 cache = next_cache_extent(cache);
2910 btrfs_commit_transaction(trans, root);
2912 btrfs_free_path(path);
2916 static void print_orphan_data_extents(struct list_head *orphan_extents,
2919 struct orphan_data_extent *orphan;
2921 if (list_empty(orphan_extents))
2923 printf("The following data extent is lost in tree %llu:\n",
2925 list_for_each_entry(orphan, orphan_extents, list) {
2926 printf("\tinode: %llu, offset:%llu, disk_bytenr: %llu, disk_len: %llu\n",
2927 orphan->objectid, orphan->offset, orphan->disk_bytenr,
2932 static void free_orphan_data_extents(struct list_head *orphan_extents)
2934 struct orphan_data_extent *orphan;
2936 while (!list_empty(orphan_extents)) {
2937 orphan = list_entry(orphan_extents->next,
2938 struct orphan_data_extent, list);
2939 list_del(&orphan->list);
2944 static int check_fs_root(struct btrfs_root *root,
2945 struct cache_tree *root_cache,
2946 struct walk_control *wc)
2952 struct btrfs_path path;
2953 struct shared_node root_node;
2954 struct root_record *rec;
2955 struct btrfs_root_item *root_item = &root->root_item;
2956 struct cache_tree corrupt_blocks;
2957 enum btrfs_tree_block_status status;
2960 * Reuse the corrupt_block cache tree to record corrupted tree block
2962 * Unlike the usage in extent tree check, here we do it in a per
2963 * fs/subvol tree base.
2965 cache_tree_init(&corrupt_blocks);
2966 root->fs_info->corrupt_blocks = &corrupt_blocks;
2967 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2968 rec = get_root_rec(root_cache, root->root_key.objectid);
2969 if (btrfs_root_refs(root_item) > 0)
2970 rec->found_root_item = 1;
2973 btrfs_init_path(&path);
2974 memset(&root_node, 0, sizeof(root_node));
2975 cache_tree_init(&root_node.root_cache);
2976 cache_tree_init(&root_node.inode_cache);
2978 level = btrfs_header_level(root->node);
2979 memset(wc->nodes, 0, sizeof(wc->nodes));
2980 wc->nodes[level] = &root_node;
2981 wc->active_node = level;
2982 wc->root_level = level;
2984 /* We may not have checked the root block, lets do that now */
2985 if (btrfs_is_leaf(root->node))
2986 status = btrfs_check_leaf(root, NULL, root->node);
2988 status = btrfs_check_node(root, NULL, root->node);
2989 if (status != BTRFS_TREE_BLOCK_CLEAN)
2992 if (btrfs_root_refs(root_item) > 0 ||
2993 btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2994 path.nodes[level] = root->node;
2995 extent_buffer_get(root->node);
2996 path.slots[level] = 0;
2998 struct btrfs_key key;
2999 struct btrfs_disk_key found_key;
3001 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3002 level = root_item->drop_level;
3003 path.lowest_level = level;
3004 wret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
3007 btrfs_node_key(path.nodes[level], &found_key,
3009 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3010 sizeof(found_key)));
3014 wret = walk_down_tree(root, &path, wc, &level);
3020 wret = walk_up_tree(root, &path, wc, &level);
3027 btrfs_release_path(&path);
3029 if (!cache_tree_empty(&corrupt_blocks)) {
3030 struct cache_extent *cache;
3031 struct btrfs_corrupt_block *corrupt;
3033 printf("The following tree block(s) is corrupted in tree %llu:\n",
3034 root->root_key.objectid);
3035 cache = first_cache_extent(&corrupt_blocks);
3037 corrupt = container_of(cache,
3038 struct btrfs_corrupt_block,
3040 printf("\ttree block bytenr: %llu, level: %d, node key: (%llu, %u, %llu)\n",
3041 cache->start, corrupt->level,
3042 corrupt->key.objectid, corrupt->key.type,
3043 corrupt->key.offset);
3044 cache = next_cache_extent(cache);
3047 printf("Try to repair the btree for root %llu\n",
3048 root->root_key.objectid);
3049 ret = repair_btree(root, &corrupt_blocks);
3051 fprintf(stderr, "Failed to repair btree: %s\n",
3054 printf("Btree for root %llu is fixed\n",
3055 root->root_key.objectid);
3059 err = merge_root_recs(root, &root_node.root_cache, root_cache);
3063 if (root_node.current) {
3064 root_node.current->checked = 1;
3065 maybe_free_inode_rec(&root_node.inode_cache,
3069 err = check_inode_recs(root, &root_node.inode_cache);
3073 free_corrupt_blocks_tree(&corrupt_blocks);
3074 root->fs_info->corrupt_blocks = NULL;
3075 print_orphan_data_extents(&root->orphan_data_extents, root->objectid);
3076 free_orphan_data_extents(&root->orphan_data_extents);
3080 static int fs_root_objectid(u64 objectid)
3082 if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
3083 objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3085 return is_fstree(objectid);
3088 static int check_fs_roots(struct btrfs_root *root,
3089 struct cache_tree *root_cache)
3091 struct btrfs_path path;
3092 struct btrfs_key key;
3093 struct walk_control wc;
3094 struct extent_buffer *leaf, *tree_node;
3095 struct btrfs_root *tmp_root;
3096 struct btrfs_root *tree_root = root->fs_info->tree_root;
3101 * Just in case we made any changes to the extent tree that weren't
3102 * reflected into the free space cache yet.
3105 reset_cached_block_groups(root->fs_info);
3106 memset(&wc, 0, sizeof(wc));
3107 cache_tree_init(&wc.shared);
3108 btrfs_init_path(&path);
3113 key.type = BTRFS_ROOT_ITEM_KEY;
3114 ret = btrfs_search_slot(NULL, tree_root, &key, &path, 0, 0);
3119 tree_node = tree_root->node;
3121 if (tree_node != tree_root->node) {
3122 free_root_recs_tree(root_cache);
3123 btrfs_release_path(&path);
3126 leaf = path.nodes[0];
3127 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
3128 ret = btrfs_next_leaf(tree_root, &path);
3134 leaf = path.nodes[0];
3136 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
3137 if (key.type == BTRFS_ROOT_ITEM_KEY &&
3138 fs_root_objectid(key.objectid)) {
3139 if (key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
3140 tmp_root = btrfs_read_fs_root_no_cache(
3141 root->fs_info, &key);
3143 key.offset = (u64)-1;
3144 tmp_root = btrfs_read_fs_root(
3145 root->fs_info, &key);
3147 if (IS_ERR(tmp_root)) {
3151 ret = check_fs_root(tmp_root, root_cache, &wc);
3152 if (ret == -EAGAIN) {
3153 free_root_recs_tree(root_cache);
3154 btrfs_release_path(&path);
3159 if (key.objectid == BTRFS_TREE_RELOC_OBJECTID)
3160 btrfs_free_fs_root(tmp_root);
3161 } else if (key.type == BTRFS_ROOT_REF_KEY ||
3162 key.type == BTRFS_ROOT_BACKREF_KEY) {
3163 process_root_ref(leaf, path.slots[0], &key,
3170 btrfs_release_path(&path);
3172 free_extent_cache_tree(&wc.shared);
3173 if (!cache_tree_empty(&wc.shared))
3174 fprintf(stderr, "warning line %d\n", __LINE__);
3179 static int all_backpointers_checked(struct extent_record *rec, int print_errs)
3181 struct list_head *cur = rec->backrefs.next;
3182 struct extent_backref *back;
3183 struct tree_backref *tback;
3184 struct data_backref *dback;
3188 while(cur != &rec->backrefs) {
3189 back = list_entry(cur, struct extent_backref, list);
3191 if (!back->found_extent_tree) {
3195 if (back->is_data) {
3196 dback = (struct data_backref *)back;
3197 fprintf(stderr, "Backref %llu %s %llu"
3198 " owner %llu offset %llu num_refs %lu"
3199 " not found in extent tree\n",
3200 (unsigned long long)rec->start,
3201 back->full_backref ?
3203 back->full_backref ?
3204 (unsigned long long)dback->parent:
3205 (unsigned long long)dback->root,
3206 (unsigned long long)dback->owner,
3207 (unsigned long long)dback->offset,
3208 (unsigned long)dback->num_refs);
3210 tback = (struct tree_backref *)back;
3211 fprintf(stderr, "Backref %llu parent %llu"
3212 " root %llu not found in extent tree\n",
3213 (unsigned long long)rec->start,
3214 (unsigned long long)tback->parent,
3215 (unsigned long long)tback->root);
3218 if (!back->is_data && !back->found_ref) {
3222 tback = (struct tree_backref *)back;
3223 fprintf(stderr, "Backref %llu %s %llu not referenced back %p\n",
3224 (unsigned long long)rec->start,
3225 back->full_backref ? "parent" : "root",
3226 back->full_backref ?
3227 (unsigned long long)tback->parent :
3228 (unsigned long long)tback->root, back);
3230 if (back->is_data) {
3231 dback = (struct data_backref *)back;
3232 if (dback->found_ref != dback->num_refs) {
3236 fprintf(stderr, "Incorrect local backref count"
3237 " on %llu %s %llu owner %llu"
3238 " offset %llu found %u wanted %u back %p\n",
3239 (unsigned long long)rec->start,
3240 back->full_backref ?
3242 back->full_backref ?
3243 (unsigned long long)dback->parent:
3244 (unsigned long long)dback->root,
3245 (unsigned long long)dback->owner,
3246 (unsigned long long)dback->offset,
3247 dback->found_ref, dback->num_refs, back);
3249 if (dback->disk_bytenr != rec->start) {
3253 fprintf(stderr, "Backref disk bytenr does not"
3254 " match extent record, bytenr=%llu, "
3255 "ref bytenr=%llu\n",
3256 (unsigned long long)rec->start,
3257 (unsigned long long)dback->disk_bytenr);
3260 if (dback->bytes != rec->nr) {
3264 fprintf(stderr, "Backref bytes do not match "
3265 "extent backref, bytenr=%llu, ref "
3266 "bytes=%llu, backref bytes=%llu\n",
3267 (unsigned long long)rec->start,
3268 (unsigned long long)rec->nr,
3269 (unsigned long long)dback->bytes);
3272 if (!back->is_data) {
3275 dback = (struct data_backref *)back;
3276 found += dback->found_ref;
3279 if (found != rec->refs) {
3283 fprintf(stderr, "Incorrect global backref count "
3284 "on %llu found %llu wanted %llu\n",
3285 (unsigned long long)rec->start,
3286 (unsigned long long)found,
3287 (unsigned long long)rec->refs);
3293 static int free_all_extent_backrefs(struct extent_record *rec)
3295 struct extent_backref *back;
3296 struct list_head *cur;
3297 while (!list_empty(&rec->backrefs)) {
3298 cur = rec->backrefs.next;
3299 back = list_entry(cur, struct extent_backref, list);
3306 static void free_extent_record_cache(struct btrfs_fs_info *fs_info,
3307 struct cache_tree *extent_cache)
3309 struct cache_extent *cache;
3310 struct extent_record *rec;
3313 cache = first_cache_extent(extent_cache);
3316 rec = container_of(cache, struct extent_record, cache);
3317 btrfs_unpin_extent(fs_info, rec->start, rec->max_size);
3318 remove_cache_extent(extent_cache, cache);
3319 free_all_extent_backrefs(rec);
3324 static int maybe_free_extent_rec(struct cache_tree *extent_cache,
3325 struct extent_record *rec)
3327 if (rec->content_checked && rec->owner_ref_checked &&
3328 rec->extent_item_refs == rec->refs && rec->refs > 0 &&
3329 rec->num_duplicates == 0 && !all_backpointers_checked(rec, 0)) {
3330 remove_cache_extent(extent_cache, &rec->cache);
3331 free_all_extent_backrefs(rec);
3332 list_del_init(&rec->list);
3338 static int check_owner_ref(struct btrfs_root *root,
3339 struct extent_record *rec,
3340 struct extent_buffer *buf)
3342 struct extent_backref *node;
3343 struct tree_backref *back;
3344 struct btrfs_root *ref_root;
3345 struct btrfs_key key;
3346 struct btrfs_path path;
3347 struct extent_buffer *parent;
3352 list_for_each_entry(node, &rec->backrefs, list) {
3355 if (!node->found_ref)
3357 if (node->full_backref)
3359 back = (struct tree_backref *)node;
3360 if (btrfs_header_owner(buf) == back->root)
3363 BUG_ON(rec->is_root);
3365 /* try to find the block by search corresponding fs tree */
3366 key.objectid = btrfs_header_owner(buf);
3367 key.type = BTRFS_ROOT_ITEM_KEY;
3368 key.offset = (u64)-1;
3370 ref_root = btrfs_read_fs_root(root->fs_info, &key);
3371 if (IS_ERR(ref_root))
3374 level = btrfs_header_level(buf);
3376 btrfs_item_key_to_cpu(buf, &key, 0);
3378 btrfs_node_key_to_cpu(buf, &key, 0);
3380 btrfs_init_path(&path);
3381 path.lowest_level = level + 1;
3382 ret = btrfs_search_slot(NULL, ref_root, &key, &path, 0, 0);
3386 parent = path.nodes[level + 1];
3387 if (parent && buf->start == btrfs_node_blockptr(parent,
3388 path.slots[level + 1]))
3391 btrfs_release_path(&path);
3392 return found ? 0 : 1;
3395 static int is_extent_tree_record(struct extent_record *rec)
3397 struct list_head *cur = rec->backrefs.next;
3398 struct extent_backref *node;
3399 struct tree_backref *back;
3402 while(cur != &rec->backrefs) {
3403 node = list_entry(cur, struct extent_backref, list);
3407 back = (struct tree_backref *)node;
3408 if (node->full_backref)
3410 if (back->root == BTRFS_EXTENT_TREE_OBJECTID)
3417 static int record_bad_block_io(struct btrfs_fs_info *info,
3418 struct cache_tree *extent_cache,
3421 struct extent_record *rec;
3422 struct cache_extent *cache;
3423 struct btrfs_key key;
3425 cache = lookup_cache_extent(extent_cache, start, len);
3429 rec = container_of(cache, struct extent_record, cache);
3430 if (!is_extent_tree_record(rec))
3433 btrfs_disk_key_to_cpu(&key, &rec->parent_key);
3434 return btrfs_add_corrupt_extent_record(info, &key, start, len, 0);
3437 static int swap_values(struct btrfs_root *root, struct btrfs_path *path,
3438 struct extent_buffer *buf, int slot)
3440 if (btrfs_header_level(buf)) {
3441 struct btrfs_key_ptr ptr1, ptr2;
3443 read_extent_buffer(buf, &ptr1, btrfs_node_key_ptr_offset(slot),
3444 sizeof(struct btrfs_key_ptr));
3445 read_extent_buffer(buf, &ptr2,
3446 btrfs_node_key_ptr_offset(slot + 1),
3447 sizeof(struct btrfs_key_ptr));
3448 write_extent_buffer(buf, &ptr1,
3449 btrfs_node_key_ptr_offset(slot + 1),
3450 sizeof(struct btrfs_key_ptr));
3451 write_extent_buffer(buf, &ptr2,
3452 btrfs_node_key_ptr_offset(slot),
3453 sizeof(struct btrfs_key_ptr));
3455 struct btrfs_disk_key key;
3456 btrfs_node_key(buf, &key, 0);
3457 btrfs_fixup_low_keys(root, path, &key,
3458 btrfs_header_level(buf) + 1);
3461 struct btrfs_item *item1, *item2;
3462 struct btrfs_key k1, k2;
3463 char *item1_data, *item2_data;
3464 u32 item1_offset, item2_offset, item1_size, item2_size;
3466 item1 = btrfs_item_nr(slot);
3467 item2 = btrfs_item_nr(slot + 1);
3468 btrfs_item_key_to_cpu(buf, &k1, slot);
3469 btrfs_item_key_to_cpu(buf, &k2, slot + 1);
3470 item1_offset = btrfs_item_offset(buf, item1);
3471 item2_offset = btrfs_item_offset(buf, item2);
3472 item1_size = btrfs_item_size(buf, item1);
3473 item2_size = btrfs_item_size(buf, item2);
3475 item1_data = malloc(item1_size);
3478 item2_data = malloc(item2_size);
3484 read_extent_buffer(buf, item1_data, item1_offset, item1_size);
3485 read_extent_buffer(buf, item2_data, item2_offset, item2_size);
3487 write_extent_buffer(buf, item1_data, item2_offset, item2_size);
3488 write_extent_buffer(buf, item2_data, item1_offset, item1_size);
3492 btrfs_set_item_offset(buf, item1, item2_offset);
3493 btrfs_set_item_offset(buf, item2, item1_offset);
3494 btrfs_set_item_size(buf, item1, item2_size);
3495 btrfs_set_item_size(buf, item2, item1_size);
3497 path->slots[0] = slot;
3498 btrfs_set_item_key_unsafe(root, path, &k2);
3499 path->slots[0] = slot + 1;
3500 btrfs_set_item_key_unsafe(root, path, &k1);
3505 static int fix_key_order(struct btrfs_trans_handle *trans,
3506 struct btrfs_root *root,
3507 struct btrfs_path *path)
3509 struct extent_buffer *buf;
3510 struct btrfs_key k1, k2;
3512 int level = path->lowest_level;
3515 buf = path->nodes[level];
3516 for (i = 0; i < btrfs_header_nritems(buf) - 1; i++) {
3518 btrfs_node_key_to_cpu(buf, &k1, i);
3519 btrfs_node_key_to_cpu(buf, &k2, i + 1);
3521 btrfs_item_key_to_cpu(buf, &k1, i);
3522 btrfs_item_key_to_cpu(buf, &k2, i + 1);
3524 if (btrfs_comp_cpu_keys(&k1, &k2) < 0)
3526 ret = swap_values(root, path, buf, i);
3529 btrfs_mark_buffer_dirty(buf);
3535 static int delete_bogus_item(struct btrfs_trans_handle *trans,
3536 struct btrfs_root *root,
3537 struct btrfs_path *path,
3538 struct extent_buffer *buf, int slot)
3540 struct btrfs_key key;
3541 int nritems = btrfs_header_nritems(buf);
3543 btrfs_item_key_to_cpu(buf, &key, slot);
3545 /* These are all the keys we can deal with missing. */
3546 if (key.type != BTRFS_DIR_INDEX_KEY &&
3547 key.type != BTRFS_EXTENT_ITEM_KEY &&
3548 key.type != BTRFS_METADATA_ITEM_KEY &&
3549 key.type != BTRFS_TREE_BLOCK_REF_KEY &&
3550 key.type != BTRFS_EXTENT_DATA_REF_KEY)
3553 printf("Deleting bogus item [%llu,%u,%llu] at slot %d on block %llu\n",
3554 (unsigned long long)key.objectid, key.type,
3555 (unsigned long long)key.offset, slot, buf->start);
3556 memmove_extent_buffer(buf, btrfs_item_nr_offset(slot),
3557 btrfs_item_nr_offset(slot + 1),
3558 sizeof(struct btrfs_item) *
3559 (nritems - slot - 1));
3560 btrfs_set_header_nritems(buf, nritems - 1);
3562 struct btrfs_disk_key disk_key;
3564 btrfs_item_key(buf, &disk_key, 0);
3565 btrfs_fixup_low_keys(root, path, &disk_key, 1);
3567 btrfs_mark_buffer_dirty(buf);
3571 static int fix_item_offset(struct btrfs_trans_handle *trans,
3572 struct btrfs_root *root,
3573 struct btrfs_path *path)
3575 struct extent_buffer *buf;
3579 /* We should only get this for leaves */
3580 BUG_ON(path->lowest_level);
3581 buf = path->nodes[0];
3583 for (i = 0; i < btrfs_header_nritems(buf); i++) {
3584 unsigned int shift = 0, offset;
3586 if (i == 0 && btrfs_item_end_nr(buf, i) !=
3587 BTRFS_LEAF_DATA_SIZE(root)) {
3588 if (btrfs_item_end_nr(buf, i) >
3589 BTRFS_LEAF_DATA_SIZE(root)) {
3590 ret = delete_bogus_item(trans, root, path,
3594 fprintf(stderr, "item is off the end of the "
3595 "leaf, can't fix\n");
3599 shift = BTRFS_LEAF_DATA_SIZE(root) -
3600 btrfs_item_end_nr(buf, i);
3601 } else if (i > 0 && btrfs_item_end_nr(buf, i) !=
3602 btrfs_item_offset_nr(buf, i - 1)) {
3603 if (btrfs_item_end_nr(buf, i) >
3604 btrfs_item_offset_nr(buf, i - 1)) {
3605 ret = delete_bogus_item(trans, root, path,
3609 fprintf(stderr, "items overlap, can't fix\n");
3613 shift = btrfs_item_offset_nr(buf, i - 1) -
3614 btrfs_item_end_nr(buf, i);
3619 printf("Shifting item nr %d by %u bytes in block %llu\n",
3620 i, shift, (unsigned long long)buf->start);
3621 offset = btrfs_item_offset_nr(buf, i);
3622 memmove_extent_buffer(buf,
3623 btrfs_leaf_data(buf) + offset + shift,
3624 btrfs_leaf_data(buf) + offset,
3625 btrfs_item_size_nr(buf, i));
3626 btrfs_set_item_offset(buf, btrfs_item_nr(i),
3628 btrfs_mark_buffer_dirty(buf);
3632 * We may have moved things, in which case we want to exit so we don't
3633 * write those changes out. Once we have proper abort functionality in
3634 * progs this can be changed to something nicer.
3641 * Attempt to fix basic block failures. If we can't fix it for whatever reason
3642 * then just return -EIO.
3644 static int try_to_fix_bad_block(struct btrfs_trans_handle *trans,
3645 struct btrfs_root *root,
3646 struct extent_buffer *buf,
3647 enum btrfs_tree_block_status status)
3649 struct ulist *roots;
3650 struct ulist_node *node;
3651 struct btrfs_root *search_root;
3652 struct btrfs_path *path;
3653 struct ulist_iterator iter;
3654 struct btrfs_key root_key, key;
3657 if (status != BTRFS_TREE_BLOCK_BAD_KEY_ORDER &&
3658 status != BTRFS_TREE_BLOCK_INVALID_OFFSETS)
3661 path = btrfs_alloc_path();
3665 ret = btrfs_find_all_roots(trans, root->fs_info, buf->start,
3668 btrfs_free_path(path);
3672 ULIST_ITER_INIT(&iter);
3673 while ((node = ulist_next(roots, &iter))) {
3674 root_key.objectid = node->val;
3675 root_key.type = BTRFS_ROOT_ITEM_KEY;
3676 root_key.offset = (u64)-1;
3678 search_root = btrfs_read_fs_root(root->fs_info, &root_key);
3684 record_root_in_trans(trans, search_root);
3686 path->lowest_level = btrfs_header_level(buf);
3687 path->skip_check_block = 1;
3688 if (path->lowest_level)
3689 btrfs_node_key_to_cpu(buf, &key, 0);
3691 btrfs_item_key_to_cpu(buf, &key, 0);
3692 ret = btrfs_search_slot(trans, search_root, &key, path, 0, 1);
3697 if (status == BTRFS_TREE_BLOCK_BAD_KEY_ORDER)
3698 ret = fix_key_order(trans, search_root, path);
3699 else if (status == BTRFS_TREE_BLOCK_INVALID_OFFSETS)
3700 ret = fix_item_offset(trans, search_root, path);
3703 btrfs_release_path(path);
3706 btrfs_free_path(path);
3710 static int check_block(struct btrfs_trans_handle *trans,
3711 struct btrfs_root *root,
3712 struct cache_tree *extent_cache,
3713 struct extent_buffer *buf, u64 flags)
3715 struct extent_record *rec;
3716 struct cache_extent *cache;
3717 struct btrfs_key key;
3718 enum btrfs_tree_block_status status;
3722 cache = lookup_cache_extent(extent_cache, buf->start, buf->len);
3725 rec = container_of(cache, struct extent_record, cache);
3726 rec->generation = btrfs_header_generation(buf);
3728 level = btrfs_header_level(buf);
3729 if (btrfs_header_nritems(buf) > 0) {
3732 btrfs_item_key_to_cpu(buf, &key, 0);
3734 btrfs_node_key_to_cpu(buf, &key, 0);
3736 rec->info_objectid = key.objectid;
3738 rec->info_level = level;
3740 if (btrfs_is_leaf(buf))
3741 status = btrfs_check_leaf(root, &rec->parent_key, buf);
3743 status = btrfs_check_node(root, &rec->parent_key, buf);
3745 if (status != BTRFS_TREE_BLOCK_CLEAN) {
3747 status = try_to_fix_bad_block(trans, root, buf,
3749 if (status != BTRFS_TREE_BLOCK_CLEAN) {
3751 fprintf(stderr, "bad block %llu\n",
3752 (unsigned long long)buf->start);
3755 * Signal to callers we need to start the scan over
3756 * again since we'll have cow'ed blocks.
3761 rec->content_checked = 1;
3762 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
3763 rec->owner_ref_checked = 1;
3765 ret = check_owner_ref(root, rec, buf);
3767 rec->owner_ref_checked = 1;
3771 maybe_free_extent_rec(extent_cache, rec);
3775 static struct tree_backref *find_tree_backref(struct extent_record *rec,
3776 u64 parent, u64 root)
3778 struct list_head *cur = rec->backrefs.next;
3779 struct extent_backref *node;
3780 struct tree_backref *back;
3782 while(cur != &rec->backrefs) {
3783 node = list_entry(cur, struct extent_backref, list);
3787 back = (struct tree_backref *)node;
3789 if (!node->full_backref)
3791 if (parent == back->parent)
3794 if (node->full_backref)
3796 if (back->root == root)
3803 static struct tree_backref *alloc_tree_backref(struct extent_record *rec,
3804 u64 parent, u64 root)
3806 struct tree_backref *ref = malloc(sizeof(*ref));
3807 memset(&ref->node, 0, sizeof(ref->node));
3809 ref->parent = parent;
3810 ref->node.full_backref = 1;
3813 ref->node.full_backref = 0;
3815 list_add_tail(&ref->node.list, &rec->backrefs);
3820 static struct data_backref *find_data_backref(struct extent_record *rec,
3821 u64 parent, u64 root,
3822 u64 owner, u64 offset,
3824 u64 disk_bytenr, u64 bytes)
3826 struct list_head *cur = rec->backrefs.next;
3827 struct extent_backref *node;
3828 struct data_backref *back;
3830 while(cur != &rec->backrefs) {
3831 node = list_entry(cur, struct extent_backref, list);
3835 back = (struct data_backref *)node;
3837 if (!node->full_backref)
3839 if (parent == back->parent)
3842 if (node->full_backref)
3844 if (back->root == root && back->owner == owner &&
3845 back->offset == offset) {
3846 if (found_ref && node->found_ref &&
3847 (back->bytes != bytes ||
3848 back->disk_bytenr != disk_bytenr))
3857 static struct data_backref *alloc_data_backref(struct extent_record *rec,
3858 u64 parent, u64 root,
3859 u64 owner, u64 offset,
3862 struct data_backref *ref = malloc(sizeof(*ref));
3863 memset(&ref->node, 0, sizeof(ref->node));
3864 ref->node.is_data = 1;
3867 ref->parent = parent;
3870 ref->node.full_backref = 1;
3874 ref->offset = offset;
3875 ref->node.full_backref = 0;
3877 ref->bytes = max_size;
3880 list_add_tail(&ref->node.list, &rec->backrefs);
3881 if (max_size > rec->max_size)
3882 rec->max_size = max_size;
3886 static int add_extent_rec(struct cache_tree *extent_cache,
3887 struct btrfs_key *parent_key, u64 parent_gen,
3888 u64 start, u64 nr, u64 extent_item_refs,
3889 int is_root, int inc_ref, int set_checked,
3890 int metadata, int extent_rec, u64 max_size)
3892 struct extent_record *rec;
3893 struct cache_extent *cache;
3897 cache = lookup_cache_extent(extent_cache, start, nr);
3899 rec = container_of(cache, struct extent_record, cache);
3903 rec->nr = max(nr, max_size);
3906 * We need to make sure to reset nr to whatever the extent
3907 * record says was the real size, this way we can compare it to
3911 if (start != rec->start || rec->found_rec) {
3912 struct extent_record *tmp;
3915 if (list_empty(&rec->list))
3916 list_add_tail(&rec->list,
3917 &duplicate_extents);
3920 * We have to do this song and dance in case we
3921 * find an extent record that falls inside of
3922 * our current extent record but does not have
3923 * the same objectid.
3925 tmp = malloc(sizeof(*tmp));
3929 tmp->max_size = max_size;
3932 tmp->metadata = metadata;
3933 tmp->extent_item_refs = extent_item_refs;
3934 INIT_LIST_HEAD(&tmp->list);
3935 list_add_tail(&tmp->list, &rec->dups);
3936 rec->num_duplicates++;
3943 if (extent_item_refs && !dup) {
3944 if (rec->extent_item_refs) {
3945 fprintf(stderr, "block %llu rec "
3946 "extent_item_refs %llu, passed %llu\n",
3947 (unsigned long long)start,
3948 (unsigned long long)
3949 rec->extent_item_refs,
3950 (unsigned long long)extent_item_refs);
3952 rec->extent_item_refs = extent_item_refs;
3957 rec->content_checked = 1;
3958 rec->owner_ref_checked = 1;
3962 btrfs_cpu_key_to_disk(&rec->parent_key, parent_key);
3964 rec->parent_generation = parent_gen;
3966 if (rec->max_size < max_size)
3967 rec->max_size = max_size;
3969 maybe_free_extent_rec(extent_cache, rec);
3972 rec = malloc(sizeof(*rec));
3974 rec->max_size = max_size;
3975 rec->nr = max(nr, max_size);
3976 rec->found_rec = !!extent_rec;
3977 rec->content_checked = 0;
3978 rec->owner_ref_checked = 0;
3979 rec->num_duplicates = 0;
3980 rec->metadata = metadata;
3981 INIT_LIST_HEAD(&rec->backrefs);
3982 INIT_LIST_HEAD(&rec->dups);
3983 INIT_LIST_HEAD(&rec->list);
3995 if (extent_item_refs)
3996 rec->extent_item_refs = extent_item_refs;
3998 rec->extent_item_refs = 0;
4001 btrfs_cpu_key_to_disk(&rec->parent_key, parent_key);
4003 memset(&rec->parent_key, 0, sizeof(*parent_key));
4006 rec->parent_generation = parent_gen;
4008 rec->parent_generation = 0;
4010 rec->cache.start = start;
4011 rec->cache.size = nr;
4012 ret = insert_cache_extent(extent_cache, &rec->cache);
4016 rec->content_checked = 1;
4017 rec->owner_ref_checked = 1;
4022 static int add_tree_backref(struct cache_tree *extent_cache, u64 bytenr,
4023 u64 parent, u64 root, int found_ref)
4025 struct extent_record *rec;
4026 struct tree_backref *back;
4027 struct cache_extent *cache;
4029 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4031 add_extent_rec(extent_cache, NULL, 0, bytenr,
4032 1, 0, 0, 0, 0, 1, 0, 0);
4033 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4038 rec = container_of(cache, struct extent_record, cache);
4039 if (rec->start != bytenr) {
4043 back = find_tree_backref(rec, parent, root);
4045 back = alloc_tree_backref(rec, parent, root);
4048 if (back->node.found_ref) {
4049 fprintf(stderr, "Extent back ref already exists "
4050 "for %llu parent %llu root %llu \n",
4051 (unsigned long long)bytenr,
4052 (unsigned long long)parent,
4053 (unsigned long long)root);
4055 back->node.found_ref = 1;
4057 if (back->node.found_extent_tree) {
4058 fprintf(stderr, "Extent back ref already exists "
4059 "for %llu parent %llu root %llu \n",
4060 (unsigned long long)bytenr,
4061 (unsigned long long)parent,
4062 (unsigned long long)root);
4064 back->node.found_extent_tree = 1;
4066 maybe_free_extent_rec(extent_cache, rec);
4070 static int add_data_backref(struct cache_tree *extent_cache, u64 bytenr,
4071 u64 parent, u64 root, u64 owner, u64 offset,
4072 u32 num_refs, int found_ref, u64 max_size)
4074 struct extent_record *rec;
4075 struct data_backref *back;
4076 struct cache_extent *cache;
4078 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4080 add_extent_rec(extent_cache, NULL, 0, bytenr, 1, 0, 0, 0, 0,
4082 cache = lookup_cache_extent(extent_cache, bytenr, 1);
4087 rec = container_of(cache, struct extent_record, cache);
4088 if (rec->max_size < max_size)
4089 rec->max_size = max_size;
4092 * If found_ref is set then max_size is the real size and must match the
4093 * existing refs. So if we have already found a ref then we need to
4094 * make sure that this ref matches the existing one, otherwise we need
4095 * to add a new backref so we can notice that the backrefs don't match
4096 * and we need to figure out who is telling the truth. This is to
4097 * account for that awful fsync bug I introduced where we'd end up with
4098 * a btrfs_file_extent_item that would have its length include multiple
4099 * prealloc extents or point inside of a prealloc extent.
4101 back = find_data_backref(rec, parent, root, owner, offset, found_ref,
4104 back = alloc_data_backref(rec, parent, root, owner, offset,
4108 BUG_ON(num_refs != 1);
4109 if (back->node.found_ref)
4110 BUG_ON(back->bytes != max_size);
4111 back->node.found_ref = 1;
4112 back->found_ref += 1;
4113 back->bytes = max_size;
4114 back->disk_bytenr = bytenr;
4116 rec->content_checked = 1;
4117 rec->owner_ref_checked = 1;
4119 if (back->node.found_extent_tree) {
4120 fprintf(stderr, "Extent back ref already exists "
4121 "for %llu parent %llu root %llu "
4122 "owner %llu offset %llu num_refs %lu\n",
4123 (unsigned long long)bytenr,
4124 (unsigned long long)parent,
4125 (unsigned long long)root,
4126 (unsigned long long)owner,
4127 (unsigned long long)offset,
4128 (unsigned long)num_refs);
4130 back->num_refs = num_refs;
4131 back->node.found_extent_tree = 1;
4133 maybe_free_extent_rec(extent_cache, rec);
4137 static int add_pending(struct cache_tree *pending,
4138 struct cache_tree *seen, u64 bytenr, u32 size)
4141 ret = add_cache_extent(seen, bytenr, size);
4144 add_cache_extent(pending, bytenr, size);
4148 static int pick_next_pending(struct cache_tree *pending,
4149 struct cache_tree *reada,
4150 struct cache_tree *nodes,
4151 u64 last, struct block_info *bits, int bits_nr,
4154 unsigned long node_start = last;
4155 struct cache_extent *cache;
4158 cache = search_cache_extent(reada, 0);
4160 bits[0].start = cache->start;
4161 bits[0].size = cache->size;
4166 if (node_start > 32768)
4167 node_start -= 32768;
4169 cache = search_cache_extent(nodes, node_start);
4171 cache = search_cache_extent(nodes, 0);
4174 cache = search_cache_extent(pending, 0);
4179 bits[ret].start = cache->start;
4180 bits[ret].size = cache->size;
4181 cache = next_cache_extent(cache);
4183 } while (cache && ret < bits_nr);
4189 bits[ret].start = cache->start;
4190 bits[ret].size = cache->size;
4191 cache = next_cache_extent(cache);
4193 } while (cache && ret < bits_nr);
4195 if (bits_nr - ret > 8) {
4196 u64 lookup = bits[0].start + bits[0].size;
4197 struct cache_extent *next;
4198 next = search_cache_extent(pending, lookup);
4200 if (next->start - lookup > 32768)
4202 bits[ret].start = next->start;
4203 bits[ret].size = next->size;
4204 lookup = next->start + next->size;
4208 next = next_cache_extent(next);
4216 static void free_chunk_record(struct cache_extent *cache)
4218 struct chunk_record *rec;
4220 rec = container_of(cache, struct chunk_record, cache);
4221 list_del_init(&rec->list);
4222 list_del_init(&rec->dextents);
4226 void free_chunk_cache_tree(struct cache_tree *chunk_cache)
4228 cache_tree_free_extents(chunk_cache, free_chunk_record);
4231 static void free_device_record(struct rb_node *node)
4233 struct device_record *rec;
4235 rec = container_of(node, struct device_record, node);
4239 FREE_RB_BASED_TREE(device_cache, free_device_record);
4241 int insert_block_group_record(struct block_group_tree *tree,
4242 struct block_group_record *bg_rec)
4246 ret = insert_cache_extent(&tree->tree, &bg_rec->cache);
4250 list_add_tail(&bg_rec->list, &tree->block_groups);
4254 static void free_block_group_record(struct cache_extent *cache)
4256 struct block_group_record *rec;
4258 rec = container_of(cache, struct block_group_record, cache);
4259 list_del_init(&rec->list);
4263 void free_block_group_tree(struct block_group_tree *tree)
4265 cache_tree_free_extents(&tree->tree, free_block_group_record);
4268 int insert_device_extent_record(struct device_extent_tree *tree,
4269 struct device_extent_record *de_rec)
4274 * Device extent is a bit different from the other extents, because
4275 * the extents which belong to the different devices may have the
4276 * same start and size, so we need use the special extent cache
4277 * search/insert functions.
4279 ret = insert_cache_extent2(&tree->tree, &de_rec->cache);
4283 list_add_tail(&de_rec->chunk_list, &tree->no_chunk_orphans);
4284 list_add_tail(&de_rec->device_list, &tree->no_device_orphans);
4288 static void free_device_extent_record(struct cache_extent *cache)
4290 struct device_extent_record *rec;
4292 rec = container_of(cache, struct device_extent_record, cache);
4293 if (!list_empty(&rec->chunk_list))
4294 list_del_init(&rec->chunk_list);
4295 if (!list_empty(&rec->device_list))
4296 list_del_init(&rec->device_list);
4300 void free_device_extent_tree(struct device_extent_tree *tree)
4302 cache_tree_free_extents(&tree->tree, free_device_extent_record);
4305 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4306 static int process_extent_ref_v0(struct cache_tree *extent_cache,
4307 struct extent_buffer *leaf, int slot)
4309 struct btrfs_extent_ref_v0 *ref0;
4310 struct btrfs_key key;
4312 btrfs_item_key_to_cpu(leaf, &key, slot);
4313 ref0 = btrfs_item_ptr(leaf, slot, struct btrfs_extent_ref_v0);
4314 if (btrfs_ref_objectid_v0(leaf, ref0) < BTRFS_FIRST_FREE_OBJECTID) {
4315 add_tree_backref(extent_cache, key.objectid, key.offset, 0, 0);
4317 add_data_backref(extent_cache, key.objectid, key.offset, 0,
4318 0, 0, btrfs_ref_count_v0(leaf, ref0), 0, 0);
4324 struct chunk_record *btrfs_new_chunk_record(struct extent_buffer *leaf,
4325 struct btrfs_key *key,
4328 struct btrfs_chunk *ptr;
4329 struct chunk_record *rec;
4332 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4333 num_stripes = btrfs_chunk_num_stripes(leaf, ptr);
4335 rec = malloc(btrfs_chunk_record_size(num_stripes));
4337 fprintf(stderr, "memory allocation failed\n");
4341 memset(rec, 0, btrfs_chunk_record_size(num_stripes));
4343 INIT_LIST_HEAD(&rec->list);
4344 INIT_LIST_HEAD(&rec->dextents);
4347 rec->cache.start = key->offset;
4348 rec->cache.size = btrfs_chunk_length(leaf, ptr);
4350 rec->generation = btrfs_header_generation(leaf);
4352 rec->objectid = key->objectid;
4353 rec->type = key->type;
4354 rec->offset = key->offset;
4356 rec->length = rec->cache.size;
4357 rec->owner = btrfs_chunk_owner(leaf, ptr);
4358 rec->stripe_len = btrfs_chunk_stripe_len(leaf, ptr);
4359 rec->type_flags = btrfs_chunk_type(leaf, ptr);
4360 rec->io_width = btrfs_chunk_io_width(leaf, ptr);
4361 rec->io_align = btrfs_chunk_io_align(leaf, ptr);
4362 rec->sector_size = btrfs_chunk_sector_size(leaf, ptr);
4363 rec->num_stripes = num_stripes;
4364 rec->sub_stripes = btrfs_chunk_sub_stripes(leaf, ptr);
4366 for (i = 0; i < rec->num_stripes; ++i) {
4367 rec->stripes[i].devid =
4368 btrfs_stripe_devid_nr(leaf, ptr, i);
4369 rec->stripes[i].offset =
4370 btrfs_stripe_offset_nr(leaf, ptr, i);
4371 read_extent_buffer(leaf, rec->stripes[i].dev_uuid,
4372 (unsigned long)btrfs_stripe_dev_uuid_nr(ptr, i),
4379 static int process_chunk_item(struct cache_tree *chunk_cache,
4380 struct btrfs_key *key, struct extent_buffer *eb,
4383 struct chunk_record *rec;
4386 rec = btrfs_new_chunk_record(eb, key, slot);
4387 ret = insert_cache_extent(chunk_cache, &rec->cache);
4389 fprintf(stderr, "Chunk[%llu, %llu] existed.\n",
4390 rec->offset, rec->length);
4397 static int process_device_item(struct rb_root *dev_cache,
4398 struct btrfs_key *key, struct extent_buffer *eb, int slot)
4400 struct btrfs_dev_item *ptr;
4401 struct device_record *rec;
4404 ptr = btrfs_item_ptr(eb,
4405 slot, struct btrfs_dev_item);
4407 rec = malloc(sizeof(*rec));
4409 fprintf(stderr, "memory allocation failed\n");
4413 rec->devid = key->offset;
4414 rec->generation = btrfs_header_generation(eb);
4416 rec->objectid = key->objectid;
4417 rec->type = key->type;
4418 rec->offset = key->offset;
4420 rec->devid = btrfs_device_id(eb, ptr);
4421 rec->total_byte = btrfs_device_total_bytes(eb, ptr);
4422 rec->byte_used = btrfs_device_bytes_used(eb, ptr);
4424 ret = rb_insert(dev_cache, &rec->node, device_record_compare);
4426 fprintf(stderr, "Device[%llu] existed.\n", rec->devid);
4433 struct block_group_record *
4434 btrfs_new_block_group_record(struct extent_buffer *leaf, struct btrfs_key *key,
4437 struct btrfs_block_group_item *ptr;
4438 struct block_group_record *rec;
4440 rec = malloc(sizeof(*rec));
4442 fprintf(stderr, "memory allocation failed\n");
4445 memset(rec, 0, sizeof(*rec));
4447 rec->cache.start = key->objectid;
4448 rec->cache.size = key->offset;
4450 rec->generation = btrfs_header_generation(leaf);
4452 rec->objectid = key->objectid;
4453 rec->type = key->type;
4454 rec->offset = key->offset;
4456 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_block_group_item);
4457 rec->flags = btrfs_disk_block_group_flags(leaf, ptr);
4459 INIT_LIST_HEAD(&rec->list);
4464 static int process_block_group_item(struct block_group_tree *block_group_cache,
4465 struct btrfs_key *key,
4466 struct extent_buffer *eb, int slot)
4468 struct block_group_record *rec;
4471 rec = btrfs_new_block_group_record(eb, key, slot);
4472 ret = insert_block_group_record(block_group_cache, rec);
4474 fprintf(stderr, "Block Group[%llu, %llu] existed.\n",
4475 rec->objectid, rec->offset);
4482 struct device_extent_record *
4483 btrfs_new_device_extent_record(struct extent_buffer *leaf,
4484 struct btrfs_key *key, int slot)
4486 struct device_extent_record *rec;
4487 struct btrfs_dev_extent *ptr;
4489 rec = malloc(sizeof(*rec));
4491 fprintf(stderr, "memory allocation failed\n");
4494 memset(rec, 0, sizeof(*rec));
4496 rec->cache.objectid = key->objectid;
4497 rec->cache.start = key->offset;
4499 rec->generation = btrfs_header_generation(leaf);
4501 rec->objectid = key->objectid;
4502 rec->type = key->type;
4503 rec->offset = key->offset;
4505 ptr = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
4506 rec->chunk_objecteid =
4507 btrfs_dev_extent_chunk_objectid(leaf, ptr);
4509 btrfs_dev_extent_chunk_offset(leaf, ptr);
4510 rec->length = btrfs_dev_extent_length(leaf, ptr);
4511 rec->cache.size = rec->length;
4513 INIT_LIST_HEAD(&rec->chunk_list);
4514 INIT_LIST_HEAD(&rec->device_list);
4520 process_device_extent_item(struct device_extent_tree *dev_extent_cache,
4521 struct btrfs_key *key, struct extent_buffer *eb,
4524 struct device_extent_record *rec;
4527 rec = btrfs_new_device_extent_record(eb, key, slot);
4528 ret = insert_device_extent_record(dev_extent_cache, rec);
4531 "Device extent[%llu, %llu, %llu] existed.\n",
4532 rec->objectid, rec->offset, rec->length);
4539 static int process_extent_item(struct btrfs_root *root,
4540 struct cache_tree *extent_cache,
4541 struct extent_buffer *eb, int slot)
4543 struct btrfs_extent_item *ei;
4544 struct btrfs_extent_inline_ref *iref;
4545 struct btrfs_extent_data_ref *dref;
4546 struct btrfs_shared_data_ref *sref;
4547 struct btrfs_key key;
4551 u32 item_size = btrfs_item_size_nr(eb, slot);
4557 btrfs_item_key_to_cpu(eb, &key, slot);
4559 if (key.type == BTRFS_METADATA_ITEM_KEY) {
4561 num_bytes = root->leafsize;
4563 num_bytes = key.offset;
4566 if (item_size < sizeof(*ei)) {
4567 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4568 struct btrfs_extent_item_v0 *ei0;
4569 BUG_ON(item_size != sizeof(*ei0));
4570 ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
4571 refs = btrfs_extent_refs_v0(eb, ei0);
4575 return add_extent_rec(extent_cache, NULL, 0, key.objectid,
4576 num_bytes, refs, 0, 0, 0, metadata, 1,
4580 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
4581 refs = btrfs_extent_refs(eb, ei);
4583 add_extent_rec(extent_cache, NULL, 0, key.objectid, num_bytes,
4584 refs, 0, 0, 0, metadata, 1, num_bytes);
4586 ptr = (unsigned long)(ei + 1);
4587 if (btrfs_extent_flags(eb, ei) & BTRFS_EXTENT_FLAG_TREE_BLOCK &&
4588 key.type == BTRFS_EXTENT_ITEM_KEY)
4589 ptr += sizeof(struct btrfs_tree_block_info);
4591 end = (unsigned long)ei + item_size;
4593 iref = (struct btrfs_extent_inline_ref *)ptr;
4594 type = btrfs_extent_inline_ref_type(eb, iref);
4595 offset = btrfs_extent_inline_ref_offset(eb, iref);
4597 case BTRFS_TREE_BLOCK_REF_KEY:
4598 add_tree_backref(extent_cache, key.objectid,
4601 case BTRFS_SHARED_BLOCK_REF_KEY:
4602 add_tree_backref(extent_cache, key.objectid,
4605 case BTRFS_EXTENT_DATA_REF_KEY:
4606 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
4607 add_data_backref(extent_cache, key.objectid, 0,
4608 btrfs_extent_data_ref_root(eb, dref),
4609 btrfs_extent_data_ref_objectid(eb,
4611 btrfs_extent_data_ref_offset(eb, dref),
4612 btrfs_extent_data_ref_count(eb, dref),
4615 case BTRFS_SHARED_DATA_REF_KEY:
4616 sref = (struct btrfs_shared_data_ref *)(iref + 1);
4617 add_data_backref(extent_cache, key.objectid, offset,
4619 btrfs_shared_data_ref_count(eb, sref),
4623 fprintf(stderr, "corrupt extent record: key %Lu %u %Lu\n",
4624 key.objectid, key.type, num_bytes);
4627 ptr += btrfs_extent_inline_ref_size(type);
4634 static int check_cache_range(struct btrfs_root *root,
4635 struct btrfs_block_group_cache *cache,
4636 u64 offset, u64 bytes)
4638 struct btrfs_free_space *entry;
4644 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4645 bytenr = btrfs_sb_offset(i);
4646 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
4647 cache->key.objectid, bytenr, 0,
4648 &logical, &nr, &stripe_len);
4653 if (logical[nr] + stripe_len <= offset)
4655 if (offset + bytes <= logical[nr])
4657 if (logical[nr] == offset) {
4658 if (stripe_len >= bytes) {
4662 bytes -= stripe_len;
4663 offset += stripe_len;
4664 } else if (logical[nr] < offset) {
4665 if (logical[nr] + stripe_len >=
4670 bytes = (offset + bytes) -
4671 (logical[nr] + stripe_len);
4672 offset = logical[nr] + stripe_len;
4675 * Could be tricky, the super may land in the
4676 * middle of the area we're checking. First
4677 * check the easiest case, it's at the end.
4679 if (logical[nr] + stripe_len >=
4681 bytes = logical[nr] - offset;
4685 /* Check the left side */
4686 ret = check_cache_range(root, cache,
4688 logical[nr] - offset);
4694 /* Now we continue with the right side */
4695 bytes = (offset + bytes) -
4696 (logical[nr] + stripe_len);
4697 offset = logical[nr] + stripe_len;
4704 entry = btrfs_find_free_space(cache->free_space_ctl, offset, bytes);
4706 fprintf(stderr, "There is no free space entry for %Lu-%Lu\n",
4707 offset, offset+bytes);
4711 if (entry->offset != offset) {
4712 fprintf(stderr, "Wanted offset %Lu, found %Lu\n", offset,
4717 if (entry->bytes != bytes) {
4718 fprintf(stderr, "Wanted bytes %Lu, found %Lu for off %Lu\n",
4719 bytes, entry->bytes, offset);
4723 unlink_free_space(cache->free_space_ctl, entry);
4728 static int verify_space_cache(struct btrfs_root *root,
4729 struct btrfs_block_group_cache *cache)
4731 struct btrfs_path *path;
4732 struct extent_buffer *leaf;
4733 struct btrfs_key key;
4737 path = btrfs_alloc_path();
4741 root = root->fs_info->extent_root;
4743 last = max_t(u64, cache->key.objectid, BTRFS_SUPER_INFO_OFFSET);
4745 key.objectid = last;
4747 key.type = BTRFS_EXTENT_ITEM_KEY;
4749 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4754 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4755 ret = btrfs_next_leaf(root, path);
4763 leaf = path->nodes[0];
4764 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4765 if (key.objectid >= cache->key.offset + cache->key.objectid)
4767 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
4768 key.type != BTRFS_METADATA_ITEM_KEY) {
4773 if (last == key.objectid) {
4774 if (key.type == BTRFS_EXTENT_ITEM_KEY)
4775 last = key.objectid + key.offset;
4777 last = key.objectid + root->leafsize;
4782 ret = check_cache_range(root, cache, last,
4783 key.objectid - last);
4786 if (key.type == BTRFS_EXTENT_ITEM_KEY)
4787 last = key.objectid + key.offset;
4789 last = key.objectid + root->leafsize;
4793 if (last < cache->key.objectid + cache->key.offset)
4794 ret = check_cache_range(root, cache, last,
4795 cache->key.objectid +
4796 cache->key.offset - last);
4799 btrfs_free_path(path);
4802 !RB_EMPTY_ROOT(&cache->free_space_ctl->free_space_offset)) {
4803 fprintf(stderr, "There are still entries left in the space "
4811 static int check_space_cache(struct btrfs_root *root)
4813 struct btrfs_block_group_cache *cache;
4814 u64 start = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
4818 if (btrfs_super_cache_generation(root->fs_info->super_copy) != -1ULL &&
4819 btrfs_super_generation(root->fs_info->super_copy) !=
4820 btrfs_super_cache_generation(root->fs_info->super_copy)) {
4821 printf("cache and super generation don't match, space cache "
4822 "will be invalidated\n");
4827 cache = btrfs_lookup_first_block_group(root->fs_info, start);
4831 start = cache->key.objectid + cache->key.offset;
4832 if (!cache->free_space_ctl) {
4833 if (btrfs_init_free_space_ctl(cache,
4834 root->sectorsize)) {
4839 btrfs_remove_free_space_cache(cache);
4842 ret = load_free_space_cache(root->fs_info, cache);
4846 ret = verify_space_cache(root, cache);
4848 fprintf(stderr, "cache appears valid but isnt %Lu\n",
4849 cache->key.objectid);
4854 return error ? -EINVAL : 0;
4857 static int read_extent_data(struct btrfs_root *root, char *data,
4858 u64 logical, u64 *len, int mirror)
4861 struct btrfs_multi_bio *multi = NULL;
4862 struct btrfs_fs_info *info = root->fs_info;
4863 struct btrfs_device *device;
4867 ret = btrfs_map_block(&info->mapping_tree, READ, logical, len,
4868 &multi, mirror, NULL);
4870 fprintf(stderr, "Couldn't map the block %llu\n",
4874 device = multi->stripes[0].dev;
4876 if (device->fd == 0)
4881 ret = pread64(device->fd, data, *len, multi->stripes[0].physical);
4891 static int check_extent_csums(struct btrfs_root *root, u64 bytenr,
4892 u64 num_bytes, unsigned long leaf_offset,
4893 struct extent_buffer *eb) {
4896 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
4898 unsigned long csum_offset;
4902 u64 data_checked = 0;
4908 if (num_bytes % root->sectorsize)
4911 data = malloc(num_bytes);
4915 while (offset < num_bytes) {
4918 read_len = num_bytes - offset;
4919 /* read as much space once a time */
4920 ret = read_extent_data(root, data + offset,
4921 bytenr + offset, &read_len, mirror);
4925 /* verify every 4k data's checksum */
4926 while (data_checked < read_len) {
4928 tmp = offset + data_checked;
4930 csum = btrfs_csum_data(NULL, (char *)data + tmp,
4931 csum, root->sectorsize);
4932 btrfs_csum_final(csum, (char *)&csum);
4934 csum_offset = leaf_offset +
4935 tmp / root->sectorsize * csum_size;
4936 read_extent_buffer(eb, (char *)&csum_expected,
4937 csum_offset, csum_size);
4938 /* try another mirror */
4939 if (csum != csum_expected) {
4940 fprintf(stderr, "mirror %d bytenr %llu csum %u expected csum %u\n",
4941 mirror, bytenr + tmp,
4942 csum, csum_expected);
4943 num_copies = btrfs_num_copies(
4944 &root->fs_info->mapping_tree,
4946 if (mirror < num_copies - 1) {
4951 data_checked += root->sectorsize;
4960 static int check_extent_exists(struct btrfs_root *root, u64 bytenr,
4963 struct btrfs_path *path;
4964 struct extent_buffer *leaf;
4965 struct btrfs_key key;
4968 path = btrfs_alloc_path();
4970 fprintf(stderr, "Error allocing path\n");
4974 key.objectid = bytenr;
4975 key.type = BTRFS_EXTENT_ITEM_KEY;
4976 key.offset = (u64)-1;
4979 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
4982 fprintf(stderr, "Error looking up extent record %d\n", ret);
4983 btrfs_free_path(path);
4986 if (path->slots[0] > 0) {
4989 ret = btrfs_prev_leaf(root, path);
4992 } else if (ret > 0) {
4999 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5002 * Block group items come before extent items if they have the same
5003 * bytenr, so walk back one more just in case. Dear future traveler,
5004 * first congrats on mastering time travel. Now if it's not too much
5005 * trouble could you go back to 2006 and tell Chris to make the
5006 * BLOCK_GROUP_ITEM_KEY (and BTRFS_*_REF_KEY) lower than the
5007 * EXTENT_ITEM_KEY please?
5009 while (key.type > BTRFS_EXTENT_ITEM_KEY) {
5010 if (path->slots[0] > 0) {
5013 ret = btrfs_prev_leaf(root, path);
5016 } else if (ret > 0) {
5021 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5025 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5026 ret = btrfs_next_leaf(root, path);
5028 fprintf(stderr, "Error going to next leaf "
5030 btrfs_free_path(path);
5036 leaf = path->nodes[0];
5037 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5038 if (key.type != BTRFS_EXTENT_ITEM_KEY) {
5042 if (key.objectid + key.offset < bytenr) {
5046 if (key.objectid > bytenr + num_bytes)
5049 if (key.objectid == bytenr) {
5050 if (key.offset >= num_bytes) {
5054 num_bytes -= key.offset;
5055 bytenr += key.offset;
5056 } else if (key.objectid < bytenr) {
5057 if (key.objectid + key.offset >= bytenr + num_bytes) {
5061 num_bytes = (bytenr + num_bytes) -
5062 (key.objectid + key.offset);
5063 bytenr = key.objectid + key.offset;
5065 if (key.objectid + key.offset < bytenr + num_bytes) {
5066 u64 new_start = key.objectid + key.offset;
5067 u64 new_bytes = bytenr + num_bytes - new_start;
5070 * Weird case, the extent is in the middle of
5071 * our range, we'll have to search one side
5072 * and then the other. Not sure if this happens
5073 * in real life, but no harm in coding it up
5074 * anyway just in case.
5076 btrfs_release_path(path);
5077 ret = check_extent_exists(root, new_start,
5080 fprintf(stderr, "Right section didn't "
5084 num_bytes = key.objectid - bytenr;
5087 num_bytes = key.objectid - bytenr;
5094 if (num_bytes && !ret) {
5095 fprintf(stderr, "There are no extents for csum range "
5096 "%Lu-%Lu\n", bytenr, bytenr+num_bytes);
5100 btrfs_free_path(path);
5104 static int check_csums(struct btrfs_root *root)
5106 struct btrfs_path *path;
5107 struct extent_buffer *leaf;
5108 struct btrfs_key key;
5109 u64 offset = 0, num_bytes = 0;
5110 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
5114 unsigned long leaf_offset;
5116 root = root->fs_info->csum_root;
5117 if (!extent_buffer_uptodate(root->node)) {
5118 fprintf(stderr, "No valid csum tree found\n");
5122 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
5123 key.type = BTRFS_EXTENT_CSUM_KEY;
5126 path = btrfs_alloc_path();
5130 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5132 fprintf(stderr, "Error searching csum tree %d\n", ret);
5133 btrfs_free_path(path);
5137 if (ret > 0 && path->slots[0])
5142 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5143 ret = btrfs_next_leaf(root, path);
5145 fprintf(stderr, "Error going to next leaf "
5152 leaf = path->nodes[0];
5154 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5155 if (key.type != BTRFS_EXTENT_CSUM_KEY) {
5160 data_len = (btrfs_item_size_nr(leaf, path->slots[0]) /
5161 csum_size) * root->sectorsize;
5162 if (!check_data_csum)
5163 goto skip_csum_check;
5164 leaf_offset = btrfs_item_ptr_offset(leaf, path->slots[0]);
5165 ret = check_extent_csums(root, key.offset, data_len,
5171 offset = key.offset;
5172 } else if (key.offset != offset + num_bytes) {
5173 ret = check_extent_exists(root, offset, num_bytes);
5175 fprintf(stderr, "Csum exists for %Lu-%Lu but "
5176 "there is no extent record\n",
5177 offset, offset+num_bytes);
5180 offset = key.offset;
5183 num_bytes += data_len;
5187 btrfs_free_path(path);
5191 static int is_dropped_key(struct btrfs_key *key,
5192 struct btrfs_key *drop_key) {
5193 if (key->objectid < drop_key->objectid)
5195 else if (key->objectid == drop_key->objectid) {
5196 if (key->type < drop_key->type)
5198 else if (key->type == drop_key->type) {
5199 if (key->offset < drop_key->offset)
5206 static int calc_extent_flag(struct btrfs_root *root,
5207 struct cache_tree *extent_cache,
5208 struct extent_buffer *buf,
5209 struct root_item_record *ri,
5213 int nritems = btrfs_header_nritems(buf);
5214 struct btrfs_key key;
5215 struct extent_record *rec;
5216 struct cache_extent *cache;
5217 struct data_backref *dback;
5218 struct tree_backref *tback;
5219 struct extent_buffer *new_buf;
5229 * Except file/reloc tree, we can not have
5232 if (ri->objectid < BTRFS_FIRST_FREE_OBJECTID)
5237 if (buf->start == ri->bytenr)
5239 if (btrfs_is_leaf(buf)) {
5241 * we are searching from original root, world
5242 * peace is achieved, we use normal backref.
5244 owner = btrfs_header_owner(buf);
5245 if (owner == ri->objectid)
5248 * we check every eb here, and if any of
5249 * eb dosen't have original root refers
5250 * to this eb, we set full backref flag for
5251 * this extent, otherwise normal backref.
5253 for (i = 0; i < nritems; i++) {
5254 struct btrfs_file_extent_item *fi;
5255 btrfs_item_key_to_cpu(buf, &key, i);
5257 if (key.type != BTRFS_EXTENT_DATA_KEY)
5259 fi = btrfs_item_ptr(buf, i,
5260 struct btrfs_file_extent_item);
5261 if (btrfs_file_extent_type(buf, fi) ==
5262 BTRFS_FILE_EXTENT_INLINE)
5264 if (btrfs_file_extent_disk_bytenr(buf, fi) == 0)
5266 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
5267 cache = lookup_cache_extent(extent_cache, bytenr, 1);
5270 offset = btrfs_file_extent_offset(buf, fi);
5271 rec = container_of(cache, struct extent_record, cache);
5272 dback = find_data_backref(rec, 0, ri->objectid, owner,
5273 key.offset - offset, 1, bytenr, bytenr);
5279 level = btrfs_header_level(buf);
5280 for (i = 0; i < nritems; i++) {
5281 ptr = btrfs_node_blockptr(buf, i);
5282 size = btrfs_level_size(root, level);
5284 new_buf = read_tree_block(root, ptr, size, 0);
5285 if (!extent_buffer_uptodate(new_buf)) {
5286 free_extent_buffer(new_buf);
5291 * we are searching from origin root, world
5292 * peace is achieved, we use normal backref.
5294 owner = btrfs_header_owner(new_buf);
5295 free_extent_buffer(new_buf);
5296 if (owner == ri->objectid)
5299 cache = lookup_cache_extent(extent_cache, ptr, size);
5302 rec = container_of(cache, struct extent_record, cache);
5303 tback = find_tree_backref(rec, 0, owner);
5311 cache = lookup_cache_extent(extent_cache, buf->start, 1);
5312 /* we have added this extent before */
5314 rec = container_of(cache, struct extent_record, cache);
5315 rec->flag_block_full_backref = 0;
5318 *flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5319 cache = lookup_cache_extent(extent_cache, buf->start, 1);
5320 /* we have added this extent before */
5322 rec = container_of(cache, struct extent_record, cache);
5323 rec->flag_block_full_backref = 1;
5327 static int run_next_block(struct btrfs_trans_handle *trans,
5328 struct btrfs_root *root,
5329 struct block_info *bits,
5332 struct cache_tree *pending,
5333 struct cache_tree *seen,
5334 struct cache_tree *reada,
5335 struct cache_tree *nodes,
5336 struct cache_tree *extent_cache,
5337 struct cache_tree *chunk_cache,
5338 struct rb_root *dev_cache,
5339 struct block_group_tree *block_group_cache,
5340 struct device_extent_tree *dev_extent_cache,
5341 struct root_item_record *ri)
5343 struct extent_buffer *buf;
5354 struct btrfs_key key;
5355 struct cache_extent *cache;
5358 nritems = pick_next_pending(pending, reada, nodes, *last, bits,
5359 bits_nr, &reada_bits);
5364 for(i = 0; i < nritems; i++) {
5365 ret = add_cache_extent(reada, bits[i].start,
5370 /* fixme, get the parent transid */
5371 readahead_tree_block(root, bits[i].start,
5375 *last = bits[0].start;
5376 bytenr = bits[0].start;
5377 size = bits[0].size;
5379 cache = lookup_cache_extent(pending, bytenr, size);
5381 remove_cache_extent(pending, cache);
5384 cache = lookup_cache_extent(reada, bytenr, size);
5386 remove_cache_extent(reada, cache);
5389 cache = lookup_cache_extent(nodes, bytenr, size);
5391 remove_cache_extent(nodes, cache);
5394 cache = lookup_cache_extent(extent_cache, bytenr, size);
5396 struct extent_record *rec;
5398 rec = container_of(cache, struct extent_record, cache);
5399 gen = rec->parent_generation;
5402 /* fixme, get the real parent transid */
5403 buf = read_tree_block(root, bytenr, size, gen);
5404 if (!extent_buffer_uptodate(buf)) {
5405 record_bad_block_io(root->fs_info,
5406 extent_cache, bytenr, size);
5410 nritems = btrfs_header_nritems(buf);
5413 * FIXME, this only works only if we don't have any full
5416 if (!init_extent_tree) {
5417 ret = btrfs_lookup_extent_info(NULL, root, bytenr,
5418 btrfs_header_level(buf), 1, NULL,
5424 ret = calc_extent_flag(root, extent_cache, buf, ri, &flags);
5429 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5434 owner = btrfs_header_owner(buf);
5437 ret = check_block(trans, root, extent_cache, buf, flags);
5441 if (btrfs_is_leaf(buf)) {
5442 btree_space_waste += btrfs_leaf_free_space(root, buf);
5443 for (i = 0; i < nritems; i++) {
5444 struct btrfs_file_extent_item *fi;
5445 btrfs_item_key_to_cpu(buf, &key, i);
5446 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
5447 process_extent_item(root, extent_cache, buf,
5451 if (key.type == BTRFS_METADATA_ITEM_KEY) {
5452 process_extent_item(root, extent_cache, buf,
5456 if (key.type == BTRFS_EXTENT_CSUM_KEY) {
5458 btrfs_item_size_nr(buf, i);
5461 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5462 process_chunk_item(chunk_cache, &key, buf, i);
5465 if (key.type == BTRFS_DEV_ITEM_KEY) {
5466 process_device_item(dev_cache, &key, buf, i);
5469 if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5470 process_block_group_item(block_group_cache,
5474 if (key.type == BTRFS_DEV_EXTENT_KEY) {
5475 process_device_extent_item(dev_extent_cache,
5480 if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
5481 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5482 process_extent_ref_v0(extent_cache, buf, i);
5489 if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
5490 add_tree_backref(extent_cache, key.objectid, 0,
5494 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
5495 add_tree_backref(extent_cache, key.objectid,
5499 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
5500 struct btrfs_extent_data_ref *ref;
5501 ref = btrfs_item_ptr(buf, i,
5502 struct btrfs_extent_data_ref);
5503 add_data_backref(extent_cache,
5505 btrfs_extent_data_ref_root(buf, ref),
5506 btrfs_extent_data_ref_objectid(buf,
5508 btrfs_extent_data_ref_offset(buf, ref),
5509 btrfs_extent_data_ref_count(buf, ref),
5510 0, root->sectorsize);
5513 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
5514 struct btrfs_shared_data_ref *ref;
5515 ref = btrfs_item_ptr(buf, i,
5516 struct btrfs_shared_data_ref);
5517 add_data_backref(extent_cache,
5518 key.objectid, key.offset, 0, 0, 0,
5519 btrfs_shared_data_ref_count(buf, ref),
5520 0, root->sectorsize);
5523 if (key.type == BTRFS_ORPHAN_ITEM_KEY) {
5524 struct bad_item *bad;
5526 if (key.objectid == BTRFS_ORPHAN_OBJECTID)
5530 bad = malloc(sizeof(struct bad_item));
5533 INIT_LIST_HEAD(&bad->list);
5534 memcpy(&bad->key, &key,
5535 sizeof(struct btrfs_key));
5536 bad->root_id = owner;
5537 list_add_tail(&bad->list, &delete_items);
5540 if (key.type != BTRFS_EXTENT_DATA_KEY)
5542 fi = btrfs_item_ptr(buf, i,
5543 struct btrfs_file_extent_item);
5544 if (btrfs_file_extent_type(buf, fi) ==
5545 BTRFS_FILE_EXTENT_INLINE)
5547 if (btrfs_file_extent_disk_bytenr(buf, fi) == 0)
5550 data_bytes_allocated +=
5551 btrfs_file_extent_disk_num_bytes(buf, fi);
5552 if (data_bytes_allocated < root->sectorsize) {
5555 data_bytes_referenced +=
5556 btrfs_file_extent_num_bytes(buf, fi);
5557 add_data_backref(extent_cache,
5558 btrfs_file_extent_disk_bytenr(buf, fi),
5559 parent, owner, key.objectid, key.offset -
5560 btrfs_file_extent_offset(buf, fi), 1, 1,
5561 btrfs_file_extent_disk_num_bytes(buf, fi));
5565 struct btrfs_key first_key;
5567 first_key.objectid = 0;
5570 btrfs_item_key_to_cpu(buf, &first_key, 0);
5571 level = btrfs_header_level(buf);
5572 for (i = 0; i < nritems; i++) {
5573 ptr = btrfs_node_blockptr(buf, i);
5574 size = btrfs_level_size(root, level - 1);
5575 btrfs_node_key_to_cpu(buf, &key, i);
5577 if ((level == ri->drop_level)
5578 && is_dropped_key(&key, &ri->drop_key)) {
5582 ret = add_extent_rec(extent_cache, &key,
5583 btrfs_node_ptr_generation(buf, i),
5584 ptr, size, 0, 0, 1, 0, 1, 0,
5588 add_tree_backref(extent_cache, ptr, parent, owner, 1);
5591 add_pending(nodes, seen, ptr, size);
5593 add_pending(pending, seen, ptr, size);
5596 btree_space_waste += (BTRFS_NODEPTRS_PER_BLOCK(root) -
5597 nritems) * sizeof(struct btrfs_key_ptr);
5599 total_btree_bytes += buf->len;
5600 if (fs_root_objectid(btrfs_header_owner(buf)))
5601 total_fs_tree_bytes += buf->len;
5602 if (btrfs_header_owner(buf) == BTRFS_EXTENT_TREE_OBJECTID)
5603 total_extent_tree_bytes += buf->len;
5604 if (!found_old_backref &&
5605 btrfs_header_owner(buf) == BTRFS_TREE_RELOC_OBJECTID &&
5606 btrfs_header_backref_rev(buf) == BTRFS_MIXED_BACKREF_REV &&
5607 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
5608 found_old_backref = 1;
5610 free_extent_buffer(buf);
5614 static int add_root_to_pending(struct extent_buffer *buf,
5615 struct cache_tree *extent_cache,
5616 struct cache_tree *pending,
5617 struct cache_tree *seen,
5618 struct cache_tree *nodes,
5621 if (btrfs_header_level(buf) > 0)
5622 add_pending(nodes, seen, buf->start, buf->len);
5624 add_pending(pending, seen, buf->start, buf->len);
5625 add_extent_rec(extent_cache, NULL, 0, buf->start, buf->len,
5626 0, 1, 1, 0, 1, 0, buf->len);
5628 if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
5629 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
5630 add_tree_backref(extent_cache, buf->start, buf->start,
5633 add_tree_backref(extent_cache, buf->start, 0, objectid, 1);
5637 /* as we fix the tree, we might be deleting blocks that
5638 * we're tracking for repair. This hook makes sure we
5639 * remove any backrefs for blocks as we are fixing them.
5641 static int free_extent_hook(struct btrfs_trans_handle *trans,
5642 struct btrfs_root *root,
5643 u64 bytenr, u64 num_bytes, u64 parent,
5644 u64 root_objectid, u64 owner, u64 offset,
5647 struct extent_record *rec;
5648 struct cache_extent *cache;
5650 struct cache_tree *extent_cache = root->fs_info->fsck_extent_cache;
5652 is_data = owner >= BTRFS_FIRST_FREE_OBJECTID;
5653 cache = lookup_cache_extent(extent_cache, bytenr, num_bytes);
5657 rec = container_of(cache, struct extent_record, cache);
5659 struct data_backref *back;
5660 back = find_data_backref(rec, parent, root_objectid, owner,
5661 offset, 1, bytenr, num_bytes);
5664 if (back->node.found_ref) {
5665 back->found_ref -= refs_to_drop;
5667 rec->refs -= refs_to_drop;
5669 if (back->node.found_extent_tree) {
5670 back->num_refs -= refs_to_drop;
5671 if (rec->extent_item_refs)
5672 rec->extent_item_refs -= refs_to_drop;
5674 if (back->found_ref == 0)
5675 back->node.found_ref = 0;
5676 if (back->num_refs == 0)
5677 back->node.found_extent_tree = 0;
5679 if (!back->node.found_extent_tree && back->node.found_ref) {
5680 list_del(&back->node.list);
5684 struct tree_backref *back;
5685 back = find_tree_backref(rec, parent, root_objectid);
5688 if (back->node.found_ref) {
5691 back->node.found_ref = 0;
5693 if (back->node.found_extent_tree) {
5694 if (rec->extent_item_refs)
5695 rec->extent_item_refs--;
5696 back->node.found_extent_tree = 0;
5698 if (!back->node.found_extent_tree && back->node.found_ref) {
5699 list_del(&back->node.list);
5703 maybe_free_extent_rec(extent_cache, rec);
5708 static int delete_extent_records(struct btrfs_trans_handle *trans,
5709 struct btrfs_root *root,
5710 struct btrfs_path *path,
5711 u64 bytenr, u64 new_len)
5713 struct btrfs_key key;
5714 struct btrfs_key found_key;
5715 struct extent_buffer *leaf;
5720 key.objectid = bytenr;
5722 key.offset = (u64)-1;
5725 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
5732 if (path->slots[0] == 0)
5738 leaf = path->nodes[0];
5739 slot = path->slots[0];
5741 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5742 if (found_key.objectid != bytenr)
5745 if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
5746 found_key.type != BTRFS_METADATA_ITEM_KEY &&
5747 found_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
5748 found_key.type != BTRFS_EXTENT_DATA_REF_KEY &&
5749 found_key.type != BTRFS_EXTENT_REF_V0_KEY &&
5750 found_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
5751 found_key.type != BTRFS_SHARED_DATA_REF_KEY) {
5752 btrfs_release_path(path);
5753 if (found_key.type == 0) {
5754 if (found_key.offset == 0)
5756 key.offset = found_key.offset - 1;
5757 key.type = found_key.type;
5759 key.type = found_key.type - 1;
5760 key.offset = (u64)-1;
5764 fprintf(stderr, "repair deleting extent record: key %Lu %u %Lu\n",
5765 found_key.objectid, found_key.type, found_key.offset);
5767 ret = btrfs_del_item(trans, root->fs_info->extent_root, path);
5770 btrfs_release_path(path);
5772 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5773 found_key.type == BTRFS_METADATA_ITEM_KEY) {
5774 u64 bytes = (found_key.type == BTRFS_EXTENT_ITEM_KEY) ?
5775 found_key.offset : root->leafsize;
5777 ret = btrfs_update_block_group(trans, root, bytenr,
5784 btrfs_release_path(path);
5789 * for a single backref, this will allocate a new extent
5790 * and add the backref to it.
5792 static int record_extent(struct btrfs_trans_handle *trans,
5793 struct btrfs_fs_info *info,
5794 struct btrfs_path *path,
5795 struct extent_record *rec,
5796 struct extent_backref *back,
5797 int allocated, u64 flags)
5800 struct btrfs_root *extent_root = info->extent_root;
5801 struct extent_buffer *leaf;
5802 struct btrfs_key ins_key;
5803 struct btrfs_extent_item *ei;
5804 struct tree_backref *tback;
5805 struct data_backref *dback;
5806 struct btrfs_tree_block_info *bi;
5809 rec->max_size = max_t(u64, rec->max_size,
5810 info->extent_root->leafsize);
5813 u32 item_size = sizeof(*ei);
5816 item_size += sizeof(*bi);
5818 ins_key.objectid = rec->start;
5819 ins_key.offset = rec->max_size;
5820 ins_key.type = BTRFS_EXTENT_ITEM_KEY;
5822 ret = btrfs_insert_empty_item(trans, extent_root, path,
5823 &ins_key, item_size);
5827 leaf = path->nodes[0];
5828 ei = btrfs_item_ptr(leaf, path->slots[0],
5829 struct btrfs_extent_item);
5831 btrfs_set_extent_refs(leaf, ei, 0);
5832 btrfs_set_extent_generation(leaf, ei, rec->generation);
5834 if (back->is_data) {
5835 btrfs_set_extent_flags(leaf, ei,
5836 BTRFS_EXTENT_FLAG_DATA);
5838 struct btrfs_disk_key copy_key;;
5840 tback = (struct tree_backref *)back;
5841 bi = (struct btrfs_tree_block_info *)(ei + 1);
5842 memset_extent_buffer(leaf, 0, (unsigned long)bi,
5845 btrfs_set_disk_key_objectid(©_key,
5846 rec->info_objectid);
5847 btrfs_set_disk_key_type(©_key, 0);
5848 btrfs_set_disk_key_offset(©_key, 0);
5850 btrfs_set_tree_block_level(leaf, bi, rec->info_level);
5851 btrfs_set_tree_block_key(leaf, bi, ©_key);
5853 btrfs_set_extent_flags(leaf, ei,
5854 BTRFS_EXTENT_FLAG_TREE_BLOCK | flags);
5857 btrfs_mark_buffer_dirty(leaf);
5858 ret = btrfs_update_block_group(trans, extent_root, rec->start,
5859 rec->max_size, 1, 0);
5862 btrfs_release_path(path);
5865 if (back->is_data) {
5869 dback = (struct data_backref *)back;
5870 if (back->full_backref)
5871 parent = dback->parent;
5875 for (i = 0; i < dback->found_ref; i++) {
5876 /* if parent != 0, we're doing a full backref
5877 * passing BTRFS_FIRST_FREE_OBJECTID as the owner
5878 * just makes the backref allocator create a data
5881 ret = btrfs_inc_extent_ref(trans, info->extent_root,
5882 rec->start, rec->max_size,
5886 BTRFS_FIRST_FREE_OBJECTID :
5892 fprintf(stderr, "adding new data backref"
5893 " on %llu %s %llu owner %llu"
5894 " offset %llu found %d\n",
5895 (unsigned long long)rec->start,
5896 back->full_backref ?
5898 back->full_backref ?
5899 (unsigned long long)parent :
5900 (unsigned long long)dback->root,
5901 (unsigned long long)dback->owner,
5902 (unsigned long long)dback->offset,
5907 tback = (struct tree_backref *)back;
5908 if (back->full_backref)
5909 parent = tback->parent;
5913 ret = btrfs_inc_extent_ref(trans, info->extent_root,
5914 rec->start, rec->max_size,
5915 parent, tback->root, 0, 0);
5916 fprintf(stderr, "adding new tree backref on "
5917 "start %llu len %llu parent %llu root %llu\n",
5918 rec->start, rec->max_size, tback->parent, tback->root);
5923 btrfs_release_path(path);
5927 struct extent_entry {
5932 struct list_head list;
5935 static struct extent_entry *find_entry(struct list_head *entries,
5936 u64 bytenr, u64 bytes)
5938 struct extent_entry *entry = NULL;
5940 list_for_each_entry(entry, entries, list) {
5941 if (entry->bytenr == bytenr && entry->bytes == bytes)
5948 static struct extent_entry *find_most_right_entry(struct list_head *entries)
5950 struct extent_entry *entry, *best = NULL, *prev = NULL;
5952 list_for_each_entry(entry, entries, list) {
5959 * If there are as many broken entries as entries then we know
5960 * not to trust this particular entry.
5962 if (entry->broken == entry->count)
5966 * If our current entry == best then we can't be sure our best
5967 * is really the best, so we need to keep searching.
5969 if (best && best->count == entry->count) {
5975 /* Prev == entry, not good enough, have to keep searching */
5976 if (!prev->broken && prev->count == entry->count)
5980 best = (prev->count > entry->count) ? prev : entry;
5981 else if (best->count < entry->count)
5989 static int repair_ref(struct btrfs_trans_handle *trans,
5990 struct btrfs_fs_info *info, struct btrfs_path *path,
5991 struct data_backref *dback, struct extent_entry *entry)
5993 struct btrfs_root *root;
5994 struct btrfs_file_extent_item *fi;
5995 struct extent_buffer *leaf;
5996 struct btrfs_key key;
6000 key.objectid = dback->root;
6001 key.type = BTRFS_ROOT_ITEM_KEY;
6002 key.offset = (u64)-1;
6003 root = btrfs_read_fs_root(info, &key);
6005 fprintf(stderr, "Couldn't find root for our ref\n");
6010 * The backref points to the original offset of the extent if it was
6011 * split, so we need to search down to the offset we have and then walk
6012 * forward until we find the backref we're looking for.
6014 key.objectid = dback->owner;
6015 key.type = BTRFS_EXTENT_DATA_KEY;
6016 key.offset = dback->offset;
6017 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6019 fprintf(stderr, "Error looking up ref %d\n", ret);
6024 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
6025 ret = btrfs_next_leaf(root, path);
6027 fprintf(stderr, "Couldn't find our ref, next\n");
6031 leaf = path->nodes[0];
6032 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6033 if (key.objectid != dback->owner ||
6034 key.type != BTRFS_EXTENT_DATA_KEY) {
6035 fprintf(stderr, "Couldn't find our ref, search\n");
6038 fi = btrfs_item_ptr(leaf, path->slots[0],
6039 struct btrfs_file_extent_item);
6040 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6041 bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6043 if (bytenr == dback->disk_bytenr && bytes == dback->bytes)
6048 btrfs_release_path(path);
6051 * Have to make sure that this root gets updated when we commit the
6054 record_root_in_trans(trans, root);
6057 * Ok we have the key of the file extent we want to fix, now we can cow
6058 * down to the thing and fix it.
6060 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6062 fprintf(stderr, "Error cowing down to ref [%Lu, %u, %Lu]: %d\n",
6063 key.objectid, key.type, key.offset, ret);
6067 fprintf(stderr, "Well that's odd, we just found this key "
6068 "[%Lu, %u, %Lu]\n", key.objectid, key.type,
6072 leaf = path->nodes[0];
6073 fi = btrfs_item_ptr(leaf, path->slots[0],
6074 struct btrfs_file_extent_item);
6076 if (btrfs_file_extent_compression(leaf, fi) &&
6077 dback->disk_bytenr != entry->bytenr) {
6078 fprintf(stderr, "Ref doesn't match the record start and is "
6079 "compressed, please take a btrfs-image of this file "
6080 "system and send it to a btrfs developer so they can "
6081 "complete this functionality for bytenr %Lu\n",
6082 dback->disk_bytenr);
6086 if (dback->node.broken && dback->disk_bytenr != entry->bytenr) {
6087 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6088 } else if (dback->disk_bytenr > entry->bytenr) {
6089 u64 off_diff, offset;
6091 off_diff = dback->disk_bytenr - entry->bytenr;
6092 offset = btrfs_file_extent_offset(leaf, fi);
6093 if (dback->disk_bytenr + offset +
6094 btrfs_file_extent_num_bytes(leaf, fi) >
6095 entry->bytenr + entry->bytes) {
6096 fprintf(stderr, "Ref is past the entry end, please "
6097 "take a btrfs-image of this file system and "
6098 "send it to a btrfs developer, ref %Lu\n",
6099 dback->disk_bytenr);
6103 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6104 btrfs_set_file_extent_offset(leaf, fi, offset);
6105 } else if (dback->disk_bytenr < entry->bytenr) {
6108 offset = btrfs_file_extent_offset(leaf, fi);
6109 if (dback->disk_bytenr + offset < entry->bytenr) {
6110 fprintf(stderr, "Ref is before the entry start, please"
6111 " take a btrfs-image of this file system and "
6112 "send it to a btrfs developer, ref %Lu\n",
6113 dback->disk_bytenr);
6117 offset += dback->disk_bytenr;
6118 offset -= entry->bytenr;
6119 btrfs_set_file_extent_disk_bytenr(leaf, fi, entry->bytenr);
6120 btrfs_set_file_extent_offset(leaf, fi, offset);
6123 btrfs_set_file_extent_disk_num_bytes(leaf, fi, entry->bytes);
6126 * Chances are if disk_num_bytes were wrong then so is ram_bytes, but
6127 * only do this if we aren't using compression, otherwise it's a
6130 if (!btrfs_file_extent_compression(leaf, fi))
6131 btrfs_set_file_extent_ram_bytes(leaf, fi, entry->bytes);
6133 printf("ram bytes may be wrong?\n");
6134 btrfs_mark_buffer_dirty(leaf);
6135 btrfs_release_path(path);
6139 static int verify_backrefs(struct btrfs_trans_handle *trans,
6140 struct btrfs_fs_info *info, struct btrfs_path *path,
6141 struct extent_record *rec)
6143 struct extent_backref *back;
6144 struct data_backref *dback;
6145 struct extent_entry *entry, *best = NULL;
6148 int broken_entries = 0;
6153 * Metadata is easy and the backrefs should always agree on bytenr and
6154 * size, if not we've got bigger issues.
6159 list_for_each_entry(back, &rec->backrefs, list) {
6160 if (back->full_backref || !back->is_data)
6163 dback = (struct data_backref *)back;
6166 * We only pay attention to backrefs that we found a real
6169 if (dback->found_ref == 0)
6173 * For now we only catch when the bytes don't match, not the
6174 * bytenr. We can easily do this at the same time, but I want
6175 * to have a fs image to test on before we just add repair
6176 * functionality willy-nilly so we know we won't screw up the
6180 entry = find_entry(&entries, dback->disk_bytenr,
6183 entry = malloc(sizeof(struct extent_entry));
6188 memset(entry, 0, sizeof(*entry));
6189 entry->bytenr = dback->disk_bytenr;
6190 entry->bytes = dback->bytes;
6191 list_add_tail(&entry->list, &entries);
6196 * If we only have on entry we may think the entries agree when
6197 * in reality they don't so we have to do some extra checking.
6199 if (dback->disk_bytenr != rec->start ||
6200 dback->bytes != rec->nr || back->broken)
6211 /* Yay all the backrefs agree, carry on good sir */
6212 if (nr_entries <= 1 && !mismatch)
6215 fprintf(stderr, "attempting to repair backref discrepency for bytenr "
6216 "%Lu\n", rec->start);
6219 * First we want to see if the backrefs can agree amongst themselves who
6220 * is right, so figure out which one of the entries has the highest
6223 best = find_most_right_entry(&entries);
6226 * Ok so we may have an even split between what the backrefs think, so
6227 * this is where we use the extent ref to see what it thinks.
6230 entry = find_entry(&entries, rec->start, rec->nr);
6231 if (!entry && (!broken_entries || !rec->found_rec)) {
6232 fprintf(stderr, "Backrefs don't agree with each other "
6233 "and extent record doesn't agree with anybody,"
6234 " so we can't fix bytenr %Lu bytes %Lu\n",
6235 rec->start, rec->nr);
6238 } else if (!entry) {
6240 * Ok our backrefs were broken, we'll assume this is the
6241 * correct value and add an entry for this range.
6243 entry = malloc(sizeof(struct extent_entry));
6248 memset(entry, 0, sizeof(*entry));
6249 entry->bytenr = rec->start;
6250 entry->bytes = rec->nr;
6251 list_add_tail(&entry->list, &entries);
6255 best = find_most_right_entry(&entries);
6257 fprintf(stderr, "Backrefs and extent record evenly "
6258 "split on who is right, this is going to "
6259 "require user input to fix bytenr %Lu bytes "
6260 "%Lu\n", rec->start, rec->nr);
6267 * I don't think this can happen currently as we'll abort() if we catch
6268 * this case higher up, but in case somebody removes that we still can't
6269 * deal with it properly here yet, so just bail out of that's the case.
6271 if (best->bytenr != rec->start) {
6272 fprintf(stderr, "Extent start and backref starts don't match, "
6273 "please use btrfs-image on this file system and send "
6274 "it to a btrfs developer so they can make fsck fix "
6275 "this particular case. bytenr is %Lu, bytes is %Lu\n",
6276 rec->start, rec->nr);
6282 * Ok great we all agreed on an extent record, let's go find the real
6283 * references and fix up the ones that don't match.
6285 list_for_each_entry(back, &rec->backrefs, list) {
6286 if (back->full_backref || !back->is_data)
6289 dback = (struct data_backref *)back;
6292 * Still ignoring backrefs that don't have a real ref attached
6295 if (dback->found_ref == 0)
6298 if (dback->bytes == best->bytes &&
6299 dback->disk_bytenr == best->bytenr)
6302 ret = repair_ref(trans, info, path, dback, best);
6308 * Ok we messed with the actual refs, which means we need to drop our
6309 * entire cache and go back and rescan. I know this is a huge pain and
6310 * adds a lot of extra work, but it's the only way to be safe. Once all
6311 * the backrefs agree we may not need to do anything to the extent
6316 while (!list_empty(&entries)) {
6317 entry = list_entry(entries.next, struct extent_entry, list);
6318 list_del_init(&entry->list);
6324 static int process_duplicates(struct btrfs_root *root,
6325 struct cache_tree *extent_cache,
6326 struct extent_record *rec)
6328 struct extent_record *good, *tmp;
6329 struct cache_extent *cache;
6333 * If we found a extent record for this extent then return, or if we
6334 * have more than one duplicate we are likely going to need to delete
6337 if (rec->found_rec || rec->num_duplicates > 1)
6340 /* Shouldn't happen but just in case */
6341 BUG_ON(!rec->num_duplicates);
6344 * So this happens if we end up with a backref that doesn't match the
6345 * actual extent entry. So either the backref is bad or the extent
6346 * entry is bad. Either way we want to have the extent_record actually
6347 * reflect what we found in the extent_tree, so we need to take the
6348 * duplicate out and use that as the extent_record since the only way we
6349 * get a duplicate is if we find a real life BTRFS_EXTENT_ITEM_KEY.
6351 remove_cache_extent(extent_cache, &rec->cache);
6353 good = list_entry(rec->dups.next, struct extent_record, list);
6354 list_del_init(&good->list);
6355 INIT_LIST_HEAD(&good->backrefs);
6356 INIT_LIST_HEAD(&good->dups);
6357 good->cache.start = good->start;
6358 good->cache.size = good->nr;
6359 good->content_checked = 0;
6360 good->owner_ref_checked = 0;
6361 good->num_duplicates = 0;
6362 good->refs = rec->refs;
6363 list_splice_init(&rec->backrefs, &good->backrefs);
6365 cache = lookup_cache_extent(extent_cache, good->start,
6369 tmp = container_of(cache, struct extent_record, cache);
6372 * If we find another overlapping extent and it's found_rec is
6373 * set then it's a duplicate and we need to try and delete
6376 if (tmp->found_rec || tmp->num_duplicates > 0) {
6377 if (list_empty(&good->list))
6378 list_add_tail(&good->list,
6379 &duplicate_extents);
6380 good->num_duplicates += tmp->num_duplicates + 1;
6381 list_splice_init(&tmp->dups, &good->dups);
6382 list_del_init(&tmp->list);
6383 list_add_tail(&tmp->list, &good->dups);
6384 remove_cache_extent(extent_cache, &tmp->cache);
6389 * Ok we have another non extent item backed extent rec, so lets
6390 * just add it to this extent and carry on like we did above.
6392 good->refs += tmp->refs;
6393 list_splice_init(&tmp->backrefs, &good->backrefs);
6394 remove_cache_extent(extent_cache, &tmp->cache);
6397 ret = insert_cache_extent(extent_cache, &good->cache);
6400 return good->num_duplicates ? 0 : 1;
6403 static int delete_duplicate_records(struct btrfs_trans_handle *trans,
6404 struct btrfs_root *root,
6405 struct extent_record *rec)
6407 LIST_HEAD(delete_list);
6408 struct btrfs_path *path;
6409 struct extent_record *tmp, *good, *n;
6412 struct btrfs_key key;
6414 path = btrfs_alloc_path();
6421 /* Find the record that covers all of the duplicates. */
6422 list_for_each_entry(tmp, &rec->dups, list) {
6423 if (good->start < tmp->start)
6425 if (good->nr > tmp->nr)
6428 if (tmp->start + tmp->nr < good->start + good->nr) {
6429 fprintf(stderr, "Ok we have overlapping extents that "
6430 "aren't completely covered by eachother, this "
6431 "is going to require more careful thought. "
6432 "The extents are [%Lu-%Lu] and [%Lu-%Lu]\n",
6433 tmp->start, tmp->nr, good->start, good->nr);
6440 list_add_tail(&rec->list, &delete_list);
6442 list_for_each_entry_safe(tmp, n, &rec->dups, list) {
6445 list_move_tail(&tmp->list, &delete_list);
6448 root = root->fs_info->extent_root;
6449 list_for_each_entry(tmp, &delete_list, list) {
6450 if (tmp->found_rec == 0)
6452 key.objectid = tmp->start;
6453 key.type = BTRFS_EXTENT_ITEM_KEY;
6454 key.offset = tmp->nr;
6456 /* Shouldn't happen but just in case */
6457 if (tmp->metadata) {
6458 fprintf(stderr, "Well this shouldn't happen, extent "
6459 "record overlaps but is metadata? "
6460 "[%Lu, %Lu]\n", tmp->start, tmp->nr);
6464 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
6470 ret = btrfs_del_item(trans, root, path);
6473 btrfs_release_path(path);
6478 while (!list_empty(&delete_list)) {
6479 tmp = list_entry(delete_list.next, struct extent_record, list);
6480 list_del_init(&tmp->list);
6486 while (!list_empty(&rec->dups)) {
6487 tmp = list_entry(rec->dups.next, struct extent_record, list);
6488 list_del_init(&tmp->list);
6492 btrfs_free_path(path);
6494 if (!ret && !nr_del)
6495 rec->num_duplicates = 0;
6497 return ret ? ret : nr_del;
6500 static int find_possible_backrefs(struct btrfs_trans_handle *trans,
6501 struct btrfs_fs_info *info,
6502 struct btrfs_path *path,
6503 struct cache_tree *extent_cache,
6504 struct extent_record *rec)
6506 struct btrfs_root *root;
6507 struct extent_backref *back;
6508 struct data_backref *dback;
6509 struct cache_extent *cache;
6510 struct btrfs_file_extent_item *fi;
6511 struct btrfs_key key;
6515 list_for_each_entry(back, &rec->backrefs, list) {
6516 /* Don't care about full backrefs (poor unloved backrefs) */
6517 if (back->full_backref || !back->is_data)
6520 dback = (struct data_backref *)back;
6522 /* We found this one, we don't need to do a lookup */
6523 if (dback->found_ref)
6526 key.objectid = dback->root;
6527 key.type = BTRFS_ROOT_ITEM_KEY;
6528 key.offset = (u64)-1;
6530 root = btrfs_read_fs_root(info, &key);
6532 /* No root, definitely a bad ref, skip */
6533 if (IS_ERR(root) && PTR_ERR(root) == -ENOENT)
6535 /* Other err, exit */
6537 return PTR_ERR(root);
6539 key.objectid = dback->owner;
6540 key.type = BTRFS_EXTENT_DATA_KEY;
6541 key.offset = dback->offset;
6542 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6544 btrfs_release_path(path);
6547 /* Didn't find it, we can carry on */
6552 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6553 struct btrfs_file_extent_item);
6554 bytenr = btrfs_file_extent_disk_bytenr(path->nodes[0], fi);
6555 bytes = btrfs_file_extent_disk_num_bytes(path->nodes[0], fi);
6556 btrfs_release_path(path);
6557 cache = lookup_cache_extent(extent_cache, bytenr, 1);
6559 struct extent_record *tmp;
6560 tmp = container_of(cache, struct extent_record, cache);
6563 * If we found an extent record for the bytenr for this
6564 * particular backref then we can't add it to our
6565 * current extent record. We only want to add backrefs
6566 * that don't have a corresponding extent item in the
6567 * extent tree since they likely belong to this record
6568 * and we need to fix it if it doesn't match bytenrs.
6574 dback->found_ref += 1;
6575 dback->disk_bytenr = bytenr;
6576 dback->bytes = bytes;
6579 * Set this so the verify backref code knows not to trust the
6580 * values in this backref.
6589 * Record orphan data ref into corresponding root.
6591 * Return 0 if the extent item contains data ref and recorded.
6592 * Return 1 if the extent item contains no useful data ref
6593 * On that case, it may contains only shared_dataref or metadata backref
6594 * or the file extent exists(this should be handled by the extent bytenr
6596 * Return <0 if something goes wrong.
6598 static int record_orphan_data_extents(struct btrfs_fs_info *fs_info,
6599 struct extent_record *rec)
6601 struct btrfs_key key;
6602 struct btrfs_root *dest_root;
6603 struct extent_backref *back;
6604 struct data_backref *dback;
6605 struct orphan_data_extent *orphan;
6606 struct btrfs_path *path;
6607 int recorded_data_ref = 0;
6612 path = btrfs_alloc_path();
6615 list_for_each_entry(back, &rec->backrefs, list) {
6616 if (back->full_backref || !back->is_data ||
6617 !back->found_extent_tree)
6619 dback = (struct data_backref *)back;
6620 if (dback->found_ref)
6622 key.objectid = dback->root;
6623 key.type = BTRFS_ROOT_ITEM_KEY;
6624 key.offset = (u64)-1;
6626 dest_root = btrfs_read_fs_root(fs_info, &key);
6628 /* For non-exist root we just skip it */
6629 if (IS_ERR(dest_root) || !dest_root)
6632 key.objectid = dback->owner;
6633 key.type = BTRFS_EXTENT_DATA_KEY;
6634 key.offset = dback->offset;
6636 ret = btrfs_search_slot(NULL, dest_root, &key, path, 0, 0);
6638 * For ret < 0, it's OK since the fs-tree may be corrupted,
6639 * we need to record it for inode/file extent rebuild.
6640 * For ret > 0, we record it only for file extent rebuild.
6641 * For ret == 0, the file extent exists but only bytenr
6642 * mismatch, let the original bytenr fix routine to handle,
6648 orphan = malloc(sizeof(*orphan));
6653 INIT_LIST_HEAD(&orphan->list);
6654 orphan->root = dback->root;
6655 orphan->objectid = dback->owner;
6656 orphan->offset = dback->offset;
6657 orphan->disk_bytenr = rec->cache.start;
6658 orphan->disk_len = rec->cache.size;
6659 list_add(&dest_root->orphan_data_extents, &orphan->list);
6660 recorded_data_ref = 1;
6663 btrfs_free_path(path);
6665 return !recorded_data_ref;
6671 * when an incorrect extent item is found, this will delete
6672 * all of the existing entries for it and recreate them
6673 * based on what the tree scan found.
6675 static int fixup_extent_refs(struct btrfs_trans_handle *trans,
6676 struct btrfs_fs_info *info,
6677 struct cache_tree *extent_cache,
6678 struct extent_record *rec)
6681 struct btrfs_path *path;
6682 struct list_head *cur = rec->backrefs.next;
6683 struct cache_extent *cache;
6684 struct extent_backref *back;
6689 * remember our flags for recreating the extent.
6690 * FIXME, if we have cleared extent tree, we can not
6691 * lookup extent info in extent tree.
6693 if (!init_extent_tree) {
6694 ret = btrfs_lookup_extent_info(NULL, info->extent_root,
6695 rec->start, rec->max_size,
6696 rec->metadata, NULL, &flags);
6700 if (rec->flag_block_full_backref)
6701 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6704 path = btrfs_alloc_path();
6708 if (rec->refs != rec->extent_item_refs && !rec->metadata) {
6710 * Sometimes the backrefs themselves are so broken they don't
6711 * get attached to any meaningful rec, so first go back and
6712 * check any of our backrefs that we couldn't find and throw
6713 * them into the list if we find the backref so that
6714 * verify_backrefs can figure out what to do.
6716 ret = find_possible_backrefs(trans, info, path, extent_cache,
6722 /* step one, make sure all of the backrefs agree */
6723 ret = verify_backrefs(trans, info, path, rec);
6727 /* step two, delete all the existing records */
6728 ret = delete_extent_records(trans, info->extent_root, path,
6729 rec->start, rec->max_size);
6734 /* was this block corrupt? If so, don't add references to it */
6735 cache = lookup_cache_extent(info->corrupt_blocks,
6736 rec->start, rec->max_size);
6742 /* step three, recreate all the refs we did find */
6743 while(cur != &rec->backrefs) {
6744 back = list_entry(cur, struct extent_backref, list);
6748 * if we didn't find any references, don't create a
6751 if (!back->found_ref)
6754 ret = record_extent(trans, info, path, rec, back, allocated, flags);
6761 btrfs_free_path(path);
6765 /* right now we only prune from the extent allocation tree */
6766 static int prune_one_block(struct btrfs_trans_handle *trans,
6767 struct btrfs_fs_info *info,
6768 struct btrfs_corrupt_block *corrupt)
6771 struct btrfs_path path;
6772 struct extent_buffer *eb;
6776 int level = corrupt->level + 1;
6778 btrfs_init_path(&path);
6780 /* we want to stop at the parent to our busted block */
6781 path.lowest_level = level;
6783 ret = btrfs_search_slot(trans, info->extent_root,
6784 &corrupt->key, &path, -1, 1);
6789 eb = path.nodes[level];
6796 * hopefully the search gave us the block we want to prune,
6797 * lets try that first
6799 slot = path.slots[level];
6800 found = btrfs_node_blockptr(eb, slot);
6801 if (found == corrupt->cache.start)
6804 nritems = btrfs_header_nritems(eb);
6806 /* the search failed, lets scan this node and hope we find it */
6807 for (slot = 0; slot < nritems; slot++) {
6808 found = btrfs_node_blockptr(eb, slot);
6809 if (found == corrupt->cache.start)
6813 * we couldn't find the bad block. TODO, search all the nodes for pointers
6816 if (eb == info->extent_root->node) {
6821 btrfs_release_path(&path);
6826 printk("deleting pointer to block %Lu\n", corrupt->cache.start);
6827 ret = btrfs_del_ptr(trans, info->extent_root, &path, level, slot);
6830 btrfs_release_path(&path);
6834 static int prune_corrupt_blocks(struct btrfs_trans_handle *trans,
6835 struct btrfs_fs_info *info)
6837 struct cache_extent *cache;
6838 struct btrfs_corrupt_block *corrupt;
6840 cache = search_cache_extent(info->corrupt_blocks, 0);
6844 corrupt = container_of(cache, struct btrfs_corrupt_block, cache);
6845 prune_one_block(trans, info, corrupt);
6846 cache = next_cache_extent(cache);
6851 static void reset_cached_block_groups(struct btrfs_fs_info *fs_info)
6853 struct btrfs_block_group_cache *cache;
6858 ret = find_first_extent_bit(&fs_info->free_space_cache, 0,
6859 &start, &end, EXTENT_DIRTY);
6862 clear_extent_dirty(&fs_info->free_space_cache, start, end,
6868 cache = btrfs_lookup_first_block_group(fs_info, start);
6873 start = cache->key.objectid + cache->key.offset;
6877 static int check_extent_refs(struct btrfs_trans_handle *trans,
6878 struct btrfs_root *root,
6879 struct cache_tree *extent_cache)
6881 struct extent_record *rec;
6882 struct cache_extent *cache;
6891 * if we're doing a repair, we have to make sure
6892 * we don't allocate from the problem extents.
6893 * In the worst case, this will be all the
6896 cache = search_cache_extent(extent_cache, 0);
6898 rec = container_of(cache, struct extent_record, cache);
6899 btrfs_pin_extent(root->fs_info,
6900 rec->start, rec->max_size);
6901 cache = next_cache_extent(cache);
6904 /* pin down all the corrupted blocks too */
6905 cache = search_cache_extent(root->fs_info->corrupt_blocks, 0);
6907 btrfs_pin_extent(root->fs_info,
6908 cache->start, cache->size);
6909 cache = next_cache_extent(cache);
6911 prune_corrupt_blocks(trans, root->fs_info);
6912 reset_cached_block_groups(root->fs_info);
6916 * We need to delete any duplicate entries we find first otherwise we
6917 * could mess up the extent tree when we have backrefs that actually
6918 * belong to a different extent item and not the weird duplicate one.
6920 while (repair && !list_empty(&duplicate_extents)) {
6921 rec = list_entry(duplicate_extents.next, struct extent_record,
6923 list_del_init(&rec->list);
6925 /* Sometimes we can find a backref before we find an actual
6926 * extent, so we need to process it a little bit to see if there
6927 * truly are multiple EXTENT_ITEM_KEY's for the same range, or
6928 * if this is a backref screwup. If we need to delete stuff
6929 * process_duplicates() will return 0, otherwise it will return
6932 if (process_duplicates(root, extent_cache, rec))
6934 ret = delete_duplicate_records(trans, root, rec);
6938 * delete_duplicate_records will return the number of entries
6939 * deleted, so if it's greater than 0 then we know we actually
6940 * did something and we need to remove.
6952 cache = search_cache_extent(extent_cache, 0);
6955 rec = container_of(cache, struct extent_record, cache);
6956 if (rec->num_duplicates) {
6957 fprintf(stderr, "extent item %llu has multiple extent "
6958 "items\n", (unsigned long long)rec->start);
6962 if (rec->refs != rec->extent_item_refs) {
6963 fprintf(stderr, "ref mismatch on [%llu %llu] ",
6964 (unsigned long long)rec->start,
6965 (unsigned long long)rec->nr);
6966 fprintf(stderr, "extent item %llu, found %llu\n",
6967 (unsigned long long)rec->extent_item_refs,
6968 (unsigned long long)rec->refs);
6969 ret = record_orphan_data_extents(root->fs_info, rec);
6976 * we can't use the extent to repair file
6977 * extent, let the fallback method handle it.
6979 if (!fixed && repair) {
6980 ret = fixup_extent_refs(trans,
6991 if (all_backpointers_checked(rec, 1)) {
6992 fprintf(stderr, "backpointer mismatch on [%llu %llu]\n",
6993 (unsigned long long)rec->start,
6994 (unsigned long long)rec->nr);
6996 if (!fixed && !recorded && repair) {
6997 ret = fixup_extent_refs(trans, root->fs_info,
7005 if (!rec->owner_ref_checked) {
7006 fprintf(stderr, "owner ref check failed [%llu %llu]\n",
7007 (unsigned long long)rec->start,
7008 (unsigned long long)rec->nr);
7009 if (!fixed && !recorded && repair) {
7010 ret = fixup_extent_refs(trans, root->fs_info,
7019 remove_cache_extent(extent_cache, cache);
7020 free_all_extent_backrefs(rec);
7025 if (ret && ret != -EAGAIN) {
7026 fprintf(stderr, "failed to repair damaged filesystem, aborting\n");
7029 btrfs_fix_block_accounting(trans, root);
7032 fprintf(stderr, "repaired damaged extent references\n");
7038 u64 calc_stripe_length(u64 type, u64 length, int num_stripes)
7042 if (type & BTRFS_BLOCK_GROUP_RAID0) {
7043 stripe_size = length;
7044 stripe_size /= num_stripes;
7045 } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
7046 stripe_size = length * 2;
7047 stripe_size /= num_stripes;
7048 } else if (type & BTRFS_BLOCK_GROUP_RAID5) {
7049 stripe_size = length;
7050 stripe_size /= (num_stripes - 1);
7051 } else if (type & BTRFS_BLOCK_GROUP_RAID6) {
7052 stripe_size = length;
7053 stripe_size /= (num_stripes - 2);
7055 stripe_size = length;
7061 * Check the chunk with its block group/dev list ref:
7062 * Return 0 if all refs seems valid.
7063 * Return 1 if part of refs seems valid, need later check for rebuild ref
7064 * like missing block group and needs to search extent tree to rebuild them.
7065 * Return -1 if essential refs are missing and unable to rebuild.
7067 static int check_chunk_refs(struct chunk_record *chunk_rec,
7068 struct block_group_tree *block_group_cache,
7069 struct device_extent_tree *dev_extent_cache,
7072 struct cache_extent *block_group_item;
7073 struct block_group_record *block_group_rec;
7074 struct cache_extent *dev_extent_item;
7075 struct device_extent_record *dev_extent_rec;
7082 block_group_item = lookup_cache_extent(&block_group_cache->tree,
7085 if (block_group_item) {
7086 block_group_rec = container_of(block_group_item,
7087 struct block_group_record,
7089 if (chunk_rec->length != block_group_rec->offset ||
7090 chunk_rec->offset != block_group_rec->objectid ||
7091 chunk_rec->type_flags != block_group_rec->flags) {
7094 "Chunk[%llu, %u, %llu]: length(%llu), offset(%llu), type(%llu) mismatch with block group[%llu, %u, %llu]: offset(%llu), objectid(%llu), flags(%llu)\n",
7095 chunk_rec->objectid,
7100 chunk_rec->type_flags,
7101 block_group_rec->objectid,
7102 block_group_rec->type,
7103 block_group_rec->offset,
7104 block_group_rec->offset,
7105 block_group_rec->objectid,
7106 block_group_rec->flags);
7109 list_del_init(&block_group_rec->list);
7110 chunk_rec->bg_rec = block_group_rec;
7115 "Chunk[%llu, %u, %llu]: length(%llu), offset(%llu), type(%llu) is not found in block group\n",
7116 chunk_rec->objectid,
7121 chunk_rec->type_flags);
7125 length = calc_stripe_length(chunk_rec->type_flags, chunk_rec->length,
7126 chunk_rec->num_stripes);
7127 for (i = 0; i < chunk_rec->num_stripes; ++i) {
7128 devid = chunk_rec->stripes[i].devid;
7129 offset = chunk_rec->stripes[i].offset;
7130 dev_extent_item = lookup_cache_extent2(&dev_extent_cache->tree,
7131 devid, offset, length);
7132 if (dev_extent_item) {
7133 dev_extent_rec = container_of(dev_extent_item,
7134 struct device_extent_record,
7136 if (dev_extent_rec->objectid != devid ||
7137 dev_extent_rec->offset != offset ||
7138 dev_extent_rec->chunk_offset != chunk_rec->offset ||
7139 dev_extent_rec->length != length) {
7142 "Chunk[%llu, %u, %llu] stripe[%llu, %llu] dismatch dev extent[%llu, %llu, %llu]\n",
7143 chunk_rec->objectid,
7146 chunk_rec->stripes[i].devid,
7147 chunk_rec->stripes[i].offset,
7148 dev_extent_rec->objectid,
7149 dev_extent_rec->offset,
7150 dev_extent_rec->length);
7153 list_move(&dev_extent_rec->chunk_list,
7154 &chunk_rec->dextents);
7159 "Chunk[%llu, %u, %llu] stripe[%llu, %llu] is not found in dev extent\n",
7160 chunk_rec->objectid,
7163 chunk_rec->stripes[i].devid,
7164 chunk_rec->stripes[i].offset);
7171 /* check btrfs_chunk -> btrfs_dev_extent / btrfs_block_group_item */
7172 int check_chunks(struct cache_tree *chunk_cache,
7173 struct block_group_tree *block_group_cache,
7174 struct device_extent_tree *dev_extent_cache,
7175 struct list_head *good, struct list_head *bad,
7176 struct list_head *rebuild, int silent)
7178 struct cache_extent *chunk_item;
7179 struct chunk_record *chunk_rec;
7180 struct block_group_record *bg_rec;
7181 struct device_extent_record *dext_rec;
7185 chunk_item = first_cache_extent(chunk_cache);
7186 while (chunk_item) {
7187 chunk_rec = container_of(chunk_item, struct chunk_record,
7189 err = check_chunk_refs(chunk_rec, block_group_cache,
7190 dev_extent_cache, silent);
7193 if (err == 0 && good)
7194 list_add_tail(&chunk_rec->list, good);
7195 if (err > 0 && rebuild)
7196 list_add_tail(&chunk_rec->list, rebuild);
7198 list_add_tail(&chunk_rec->list, bad);
7199 chunk_item = next_cache_extent(chunk_item);
7202 list_for_each_entry(bg_rec, &block_group_cache->block_groups, list) {
7205 "Block group[%llu, %llu] (flags = %llu) didn't find the relative chunk.\n",
7213 list_for_each_entry(dext_rec, &dev_extent_cache->no_chunk_orphans,
7217 "Device extent[%llu, %llu, %llu] didn't find the relative chunk.\n",
7228 static int check_device_used(struct device_record *dev_rec,
7229 struct device_extent_tree *dext_cache)
7231 struct cache_extent *cache;
7232 struct device_extent_record *dev_extent_rec;
7235 cache = search_cache_extent2(&dext_cache->tree, dev_rec->devid, 0);
7237 dev_extent_rec = container_of(cache,
7238 struct device_extent_record,
7240 if (dev_extent_rec->objectid != dev_rec->devid)
7243 list_del_init(&dev_extent_rec->device_list);
7244 total_byte += dev_extent_rec->length;
7245 cache = next_cache_extent(cache);
7248 if (total_byte != dev_rec->byte_used) {
7250 "Dev extent's total-byte(%llu) is not equal to byte-used(%llu) in dev[%llu, %u, %llu]\n",
7251 total_byte, dev_rec->byte_used, dev_rec->objectid,
7252 dev_rec->type, dev_rec->offset);
7259 /* check btrfs_dev_item -> btrfs_dev_extent */
7260 static int check_devices(struct rb_root *dev_cache,
7261 struct device_extent_tree *dev_extent_cache)
7263 struct rb_node *dev_node;
7264 struct device_record *dev_rec;
7265 struct device_extent_record *dext_rec;
7269 dev_node = rb_first(dev_cache);
7271 dev_rec = container_of(dev_node, struct device_record, node);
7272 err = check_device_used(dev_rec, dev_extent_cache);
7276 dev_node = rb_next(dev_node);
7278 list_for_each_entry(dext_rec, &dev_extent_cache->no_device_orphans,
7281 "Device extent[%llu, %llu, %llu] didn't find its device.\n",
7282 dext_rec->objectid, dext_rec->offset, dext_rec->length);
7289 static int add_root_item_to_list(struct list_head *head,
7290 u64 objectid, u64 bytenr,
7291 u8 level, u8 drop_level,
7292 int level_size, struct btrfs_key *drop_key)
7295 struct root_item_record *ri_rec;
7296 ri_rec = malloc(sizeof(*ri_rec));
7299 ri_rec->bytenr = bytenr;
7300 ri_rec->objectid = objectid;
7301 ri_rec->level = level;
7302 ri_rec->level_size = level_size;
7303 ri_rec->drop_level = drop_level;
7305 memcpy(&ri_rec->drop_key, drop_key, sizeof(*drop_key));
7306 list_add_tail(&ri_rec->list, head);
7311 static int deal_root_from_list(struct list_head *list,
7312 struct btrfs_trans_handle *trans,
7313 struct btrfs_root *root,
7314 struct block_info *bits,
7316 struct cache_tree *pending,
7317 struct cache_tree *seen,
7318 struct cache_tree *reada,
7319 struct cache_tree *nodes,
7320 struct cache_tree *extent_cache,
7321 struct cache_tree *chunk_cache,
7322 struct rb_root *dev_cache,
7323 struct block_group_tree *block_group_cache,
7324 struct device_extent_tree *dev_extent_cache)
7329 while (!list_empty(list)) {
7330 struct root_item_record *rec;
7331 struct extent_buffer *buf;
7332 rec = list_entry(list->next,
7333 struct root_item_record, list);
7335 buf = read_tree_block(root->fs_info->tree_root,
7336 rec->bytenr, rec->level_size, 0);
7337 if (!extent_buffer_uptodate(buf)) {
7338 free_extent_buffer(buf);
7342 add_root_to_pending(buf, extent_cache, pending,
7343 seen, nodes, rec->objectid);
7345 * To rebuild extent tree, we need deal with snapshot
7346 * one by one, otherwise we deal with node firstly which
7347 * can maximize readahead.
7349 if (!init_extent_tree && !rec->drop_level)
7352 ret = run_next_block(trans, root, bits, bits_nr, &last,
7353 pending, seen, reada,
7354 nodes, extent_cache,
7355 chunk_cache, dev_cache,
7357 dev_extent_cache, rec);
7362 free_extent_buffer(buf);
7363 list_del(&rec->list);
7367 ret = run_next_block(trans, root, bits, bits_nr, &last,
7368 pending, seen, reada,
7369 nodes, extent_cache,
7370 chunk_cache, dev_cache,
7372 dev_extent_cache, NULL);
7382 static int check_chunks_and_extents(struct btrfs_root *root)
7384 struct rb_root dev_cache;
7385 struct cache_tree chunk_cache;
7386 struct block_group_tree block_group_cache;
7387 struct device_extent_tree dev_extent_cache;
7388 struct cache_tree extent_cache;
7389 struct cache_tree seen;
7390 struct cache_tree pending;
7391 struct cache_tree reada;
7392 struct cache_tree nodes;
7393 struct cache_tree corrupt_blocks;
7394 struct btrfs_path path;
7395 struct btrfs_key key;
7396 struct btrfs_key found_key;
7398 struct block_info *bits;
7400 struct extent_buffer *leaf;
7401 struct btrfs_trans_handle *trans = NULL;
7403 struct btrfs_root_item ri;
7404 struct list_head dropping_trees;
7405 struct list_head normal_trees;
7406 struct btrfs_root *root1;
7411 dev_cache = RB_ROOT;
7412 cache_tree_init(&chunk_cache);
7413 block_group_tree_init(&block_group_cache);
7414 device_extent_tree_init(&dev_extent_cache);
7416 cache_tree_init(&extent_cache);
7417 cache_tree_init(&seen);
7418 cache_tree_init(&pending);
7419 cache_tree_init(&nodes);
7420 cache_tree_init(&reada);
7421 cache_tree_init(&corrupt_blocks);
7422 INIT_LIST_HEAD(&dropping_trees);
7423 INIT_LIST_HEAD(&normal_trees);
7426 trans = btrfs_start_transaction(root, 1);
7427 if (IS_ERR(trans)) {
7428 fprintf(stderr, "Error starting transaction\n");
7429 return PTR_ERR(trans);
7431 root->fs_info->fsck_extent_cache = &extent_cache;
7432 root->fs_info->free_extent_hook = free_extent_hook;
7433 root->fs_info->corrupt_blocks = &corrupt_blocks;
7437 bits = malloc(bits_nr * sizeof(struct block_info));
7444 root1 = root->fs_info->tree_root;
7445 level = btrfs_header_level(root1->node);
7446 ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
7447 root1->node->start, level, 0,
7448 btrfs_level_size(root1, level), NULL);
7451 root1 = root->fs_info->chunk_root;
7452 level = btrfs_header_level(root1->node);
7453 ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
7454 root1->node->start, level, 0,
7455 btrfs_level_size(root1, level), NULL);
7458 btrfs_init_path(&path);
7461 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
7462 ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
7467 leaf = path.nodes[0];
7468 slot = path.slots[0];
7469 if (slot >= btrfs_header_nritems(path.nodes[0])) {
7470 ret = btrfs_next_leaf(root, &path);
7473 leaf = path.nodes[0];
7474 slot = path.slots[0];
7476 btrfs_item_key_to_cpu(leaf, &found_key, path.slots[0]);
7477 if (btrfs_key_type(&found_key) == BTRFS_ROOT_ITEM_KEY) {
7478 unsigned long offset;
7480 offset = btrfs_item_ptr_offset(leaf, path.slots[0]);
7481 read_extent_buffer(leaf, &ri, offset, sizeof(ri));
7482 if (btrfs_disk_key_objectid(&ri.drop_progress) == 0) {
7483 level = btrfs_root_level(&ri);
7484 level_size = btrfs_level_size(root, level);
7485 ret = add_root_item_to_list(&normal_trees,
7487 btrfs_root_bytenr(&ri), level,
7488 0, level_size, NULL);
7492 level = btrfs_root_level(&ri);
7493 level_size = btrfs_level_size(root, level);
7494 objectid = found_key.objectid;
7495 btrfs_disk_key_to_cpu(&found_key,
7497 ret = add_root_item_to_list(&dropping_trees,
7499 btrfs_root_bytenr(&ri),
7500 level, ri.drop_level,
7501 level_size, &found_key);
7508 btrfs_release_path(&path);
7509 ret = deal_root_from_list(&normal_trees, trans, root,
7510 bits, bits_nr, &pending, &seen,
7511 &reada, &nodes, &extent_cache,
7512 &chunk_cache, &dev_cache, &block_group_cache,
7516 ret = deal_root_from_list(&dropping_trees, trans, root,
7517 bits, bits_nr, &pending, &seen,
7518 &reada, &nodes, &extent_cache,
7519 &chunk_cache, &dev_cache, &block_group_cache,
7524 ret = check_extent_refs(trans, root, &extent_cache);
7525 if (ret == -EAGAIN) {
7526 ret = btrfs_commit_transaction(trans, root);
7530 trans = btrfs_start_transaction(root, 1);
7531 if (IS_ERR(trans)) {
7532 ret = PTR_ERR(trans);
7536 free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
7537 free_extent_cache_tree(&seen);
7538 free_extent_cache_tree(&pending);
7539 free_extent_cache_tree(&reada);
7540 free_extent_cache_tree(&nodes);
7541 free_chunk_cache_tree(&chunk_cache);
7542 free_block_group_tree(&block_group_cache);
7543 free_device_cache_tree(&dev_cache);
7544 free_device_extent_tree(&dev_extent_cache);
7545 free_extent_record_cache(root->fs_info, &extent_cache);
7549 err = check_chunks(&chunk_cache, &block_group_cache,
7550 &dev_extent_cache, NULL, NULL, NULL, 0);
7554 err = check_devices(&dev_cache, &dev_extent_cache);
7560 err = btrfs_commit_transaction(trans, root);
7565 free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
7566 root->fs_info->fsck_extent_cache = NULL;
7567 root->fs_info->free_extent_hook = NULL;
7568 root->fs_info->corrupt_blocks = NULL;
7571 free_chunk_cache_tree(&chunk_cache);
7572 free_device_cache_tree(&dev_cache);
7573 free_block_group_tree(&block_group_cache);
7574 free_device_extent_tree(&dev_extent_cache);
7575 free_extent_cache_tree(&seen);
7576 free_extent_cache_tree(&pending);
7577 free_extent_cache_tree(&reada);
7578 free_extent_cache_tree(&nodes);
7582 static int btrfs_fsck_reinit_root(struct btrfs_trans_handle *trans,
7583 struct btrfs_root *root, int overwrite)
7585 struct extent_buffer *c;
7586 struct extent_buffer *old = root->node;
7589 struct btrfs_disk_key disk_key = {0,0,0};
7595 extent_buffer_get(c);
7598 c = btrfs_alloc_free_block(trans, root,
7599 btrfs_level_size(root, 0),
7600 root->root_key.objectid,
7601 &disk_key, level, 0, 0);
7604 extent_buffer_get(c);
7608 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
7609 btrfs_set_header_level(c, level);
7610 btrfs_set_header_bytenr(c, c->start);
7611 btrfs_set_header_generation(c, trans->transid);
7612 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
7613 btrfs_set_header_owner(c, root->root_key.objectid);
7615 write_extent_buffer(c, root->fs_info->fsid,
7616 btrfs_header_fsid(), BTRFS_FSID_SIZE);
7618 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
7619 btrfs_header_chunk_tree_uuid(c),
7622 btrfs_mark_buffer_dirty(c);
7624 * this case can happen in the following case:
7626 * 1.overwrite previous root.
7628 * 2.reinit reloc data root, this is because we skip pin
7629 * down reloc data tree before which means we can allocate
7630 * same block bytenr here.
7632 if (old->start == c->start) {
7633 btrfs_set_root_generation(&root->root_item,
7635 root->root_item.level = btrfs_header_level(root->node);
7636 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7637 &root->root_key, &root->root_item);
7639 free_extent_buffer(c);
7643 free_extent_buffer(old);
7645 add_root_to_dirty_list(root);
7649 static int pin_down_tree_blocks(struct btrfs_fs_info *fs_info,
7650 struct extent_buffer *eb, int tree_root)
7652 struct extent_buffer *tmp;
7653 struct btrfs_root_item *ri;
7654 struct btrfs_key key;
7657 int level = btrfs_header_level(eb);
7663 * If we have pinned this block before, don't pin it again.
7664 * This can not only avoid forever loop with broken filesystem
7665 * but also give us some speedups.
7667 if (test_range_bit(&fs_info->pinned_extents, eb->start,
7668 eb->start + eb->len - 1, EXTENT_DIRTY, 0))
7671 btrfs_pin_extent(fs_info, eb->start, eb->len);
7673 leafsize = btrfs_super_leafsize(fs_info->super_copy);
7674 nritems = btrfs_header_nritems(eb);
7675 for (i = 0; i < nritems; i++) {
7677 btrfs_item_key_to_cpu(eb, &key, i);
7678 if (key.type != BTRFS_ROOT_ITEM_KEY)
7680 /* Skip the extent root and reloc roots */
7681 if (key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
7682 key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
7683 key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
7685 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
7686 bytenr = btrfs_disk_root_bytenr(eb, ri);
7689 * If at any point we start needing the real root we
7690 * will have to build a stump root for the root we are
7691 * in, but for now this doesn't actually use the root so
7692 * just pass in extent_root.
7694 tmp = read_tree_block(fs_info->extent_root, bytenr,
7697 fprintf(stderr, "Error reading root block\n");
7700 ret = pin_down_tree_blocks(fs_info, tmp, 0);
7701 free_extent_buffer(tmp);
7705 bytenr = btrfs_node_blockptr(eb, i);
7707 /* If we aren't the tree root don't read the block */
7708 if (level == 1 && !tree_root) {
7709 btrfs_pin_extent(fs_info, bytenr, leafsize);
7713 tmp = read_tree_block(fs_info->extent_root, bytenr,
7716 fprintf(stderr, "Error reading tree block\n");
7719 ret = pin_down_tree_blocks(fs_info, tmp, tree_root);
7720 free_extent_buffer(tmp);
7729 static int pin_metadata_blocks(struct btrfs_fs_info *fs_info)
7733 ret = pin_down_tree_blocks(fs_info, fs_info->chunk_root->node, 0);
7737 return pin_down_tree_blocks(fs_info, fs_info->tree_root->node, 1);
7740 static int reset_block_groups(struct btrfs_fs_info *fs_info)
7742 struct btrfs_block_group_cache *cache;
7743 struct btrfs_path *path;
7744 struct extent_buffer *leaf;
7745 struct btrfs_chunk *chunk;
7746 struct btrfs_key key;
7750 path = btrfs_alloc_path();
7755 key.type = BTRFS_CHUNK_ITEM_KEY;
7758 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
7760 btrfs_free_path(path);
7765 * We do this in case the block groups were screwed up and had alloc
7766 * bits that aren't actually set on the chunks. This happens with
7767 * restored images every time and could happen in real life I guess.
7769 fs_info->avail_data_alloc_bits = 0;
7770 fs_info->avail_metadata_alloc_bits = 0;
7771 fs_info->avail_system_alloc_bits = 0;
7773 /* First we need to create the in-memory block groups */
7775 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7776 ret = btrfs_next_leaf(fs_info->chunk_root, path);
7778 btrfs_free_path(path);
7786 leaf = path->nodes[0];
7787 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7788 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7793 chunk = btrfs_item_ptr(leaf, path->slots[0],
7794 struct btrfs_chunk);
7795 btrfs_add_block_group(fs_info, 0,
7796 btrfs_chunk_type(leaf, chunk),
7797 key.objectid, key.offset,
7798 btrfs_chunk_length(leaf, chunk));
7799 set_extent_dirty(&fs_info->free_space_cache, key.offset,
7800 key.offset + btrfs_chunk_length(leaf, chunk),
7806 cache = btrfs_lookup_first_block_group(fs_info, start);
7810 start = cache->key.objectid + cache->key.offset;
7813 btrfs_free_path(path);
7817 static int reset_balance(struct btrfs_trans_handle *trans,
7818 struct btrfs_fs_info *fs_info)
7820 struct btrfs_root *root = fs_info->tree_root;
7821 struct btrfs_path *path;
7822 struct extent_buffer *leaf;
7823 struct btrfs_key key;
7824 int del_slot, del_nr = 0;
7828 path = btrfs_alloc_path();
7832 key.objectid = BTRFS_BALANCE_OBJECTID;
7833 key.type = BTRFS_BALANCE_ITEM_KEY;
7836 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7841 goto reinit_data_reloc;
7846 ret = btrfs_del_item(trans, root, path);
7849 btrfs_release_path(path);
7851 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7852 key.type = BTRFS_ROOT_ITEM_KEY;
7855 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7859 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7864 ret = btrfs_del_items(trans, root, path,
7871 btrfs_release_path(path);
7874 ret = btrfs_search_slot(trans, root, &key, path,
7881 leaf = path->nodes[0];
7882 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7883 if (key.objectid > BTRFS_TREE_RELOC_OBJECTID)
7885 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7890 del_slot = path->slots[0];
7899 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
7903 btrfs_release_path(path);
7906 key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7907 key.type = BTRFS_ROOT_ITEM_KEY;
7908 key.offset = (u64)-1;
7909 root = btrfs_read_fs_root(fs_info, &key);
7911 fprintf(stderr, "Error reading data reloc tree\n");
7912 ret = PTR_ERR(root);
7915 record_root_in_trans(trans, root);
7916 ret = btrfs_fsck_reinit_root(trans, root, 0);
7919 ret = btrfs_make_root_dir(trans, root, BTRFS_FIRST_FREE_OBJECTID);
7921 btrfs_free_path(path);
7925 static int reinit_extent_tree(struct btrfs_trans_handle *trans,
7926 struct btrfs_fs_info *fs_info)
7932 * The only reason we don't do this is because right now we're just
7933 * walking the trees we find and pinning down their bytes, we don't look
7934 * at any of the leaves. In order to do mixed groups we'd have to check
7935 * the leaves of any fs roots and pin down the bytes for any file
7936 * extents we find. Not hard but why do it if we don't have to?
7938 if (btrfs_fs_incompat(fs_info, BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)) {
7939 fprintf(stderr, "We don't support re-initing the extent tree "
7940 "for mixed block groups yet, please notify a btrfs "
7941 "developer you want to do this so they can add this "
7942 "functionality.\n");
7947 * first we need to walk all of the trees except the extent tree and pin
7948 * down the bytes that are in use so we don't overwrite any existing
7951 ret = pin_metadata_blocks(fs_info);
7953 fprintf(stderr, "error pinning down used bytes\n");
7958 * Need to drop all the block groups since we're going to recreate all
7961 btrfs_free_block_groups(fs_info);
7962 ret = reset_block_groups(fs_info);
7964 fprintf(stderr, "error resetting the block groups\n");
7968 /* Ok we can allocate now, reinit the extent root */
7969 ret = btrfs_fsck_reinit_root(trans, fs_info->extent_root, 0);
7971 fprintf(stderr, "extent root initialization failed\n");
7973 * When the transaction code is updated we should end the
7974 * transaction, but for now progs only knows about commit so
7975 * just return an error.
7981 * Now we have all the in-memory block groups setup so we can make
7982 * allocations properly, and the metadata we care about is safe since we
7983 * pinned all of it above.
7986 struct btrfs_block_group_cache *cache;
7988 cache = btrfs_lookup_first_block_group(fs_info, start);
7991 start = cache->key.objectid + cache->key.offset;
7992 ret = btrfs_insert_item(trans, fs_info->extent_root,
7993 &cache->key, &cache->item,
7994 sizeof(cache->item));
7996 fprintf(stderr, "Error adding block group\n");
7999 btrfs_extent_post_op(trans, fs_info->extent_root);
8002 ret = reset_balance(trans, fs_info);
8004 fprintf(stderr, "error reseting the pending balance\n");
8009 static int recow_extent_buffer(struct btrfs_root *root, struct extent_buffer *eb)
8011 struct btrfs_path *path;
8012 struct btrfs_trans_handle *trans;
8013 struct btrfs_key key;
8016 printf("Recowing metadata block %llu\n", eb->start);
8017 key.objectid = btrfs_header_owner(eb);
8018 key.type = BTRFS_ROOT_ITEM_KEY;
8019 key.offset = (u64)-1;
8021 root = btrfs_read_fs_root(root->fs_info, &key);
8023 fprintf(stderr, "Couldn't find owner root %llu\n",
8025 return PTR_ERR(root);
8028 path = btrfs_alloc_path();
8032 trans = btrfs_start_transaction(root, 1);
8033 if (IS_ERR(trans)) {
8034 btrfs_free_path(path);
8035 return PTR_ERR(trans);
8038 path->lowest_level = btrfs_header_level(eb);
8039 if (path->lowest_level)
8040 btrfs_node_key_to_cpu(eb, &key, 0);
8042 btrfs_item_key_to_cpu(eb, &key, 0);
8044 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
8045 btrfs_commit_transaction(trans, root);
8046 btrfs_free_path(path);
8050 static int delete_bad_item(struct btrfs_root *root, struct bad_item *bad)
8052 struct btrfs_path *path;
8053 struct btrfs_trans_handle *trans;
8054 struct btrfs_key key;
8057 printf("Deleting bad item [%llu,%u,%llu]\n", bad->key.objectid,
8058 bad->key.type, bad->key.offset);
8059 key.objectid = bad->root_id;
8060 key.type = BTRFS_ROOT_ITEM_KEY;
8061 key.offset = (u64)-1;
8063 root = btrfs_read_fs_root(root->fs_info, &key);
8065 fprintf(stderr, "Couldn't find owner root %llu\n",
8067 return PTR_ERR(root);
8070 path = btrfs_alloc_path();
8074 trans = btrfs_start_transaction(root, 1);
8075 if (IS_ERR(trans)) {
8076 btrfs_free_path(path);
8077 return PTR_ERR(trans);
8080 ret = btrfs_search_slot(trans, root, &bad->key, path, -1, 1);
8086 ret = btrfs_del_item(trans, root, path);
8088 btrfs_commit_transaction(trans, root);
8089 btrfs_free_path(path);
8093 static int zero_log_tree(struct btrfs_root *root)
8095 struct btrfs_trans_handle *trans;
8098 trans = btrfs_start_transaction(root, 1);
8099 if (IS_ERR(trans)) {
8100 ret = PTR_ERR(trans);
8103 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
8104 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
8105 ret = btrfs_commit_transaction(trans, root);
8109 static int populate_csum(struct btrfs_trans_handle *trans,
8110 struct btrfs_root *csum_root, char *buf, u64 start,
8117 while (offset < len) {
8118 sectorsize = csum_root->sectorsize;
8119 ret = read_extent_data(csum_root, buf, start + offset,
8123 ret = btrfs_csum_file_block(trans, csum_root, start + len,
8124 start + offset, buf, sectorsize);
8127 offset += sectorsize;
8132 static int fill_csum_tree(struct btrfs_trans_handle *trans,
8133 struct btrfs_root *csum_root)
8135 struct btrfs_root *extent_root = csum_root->fs_info->extent_root;
8136 struct btrfs_path *path;
8137 struct btrfs_extent_item *ei;
8138 struct extent_buffer *leaf;
8140 struct btrfs_key key;
8143 path = btrfs_alloc_path();
8148 key.type = BTRFS_EXTENT_ITEM_KEY;
8151 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
8153 btrfs_free_path(path);
8157 buf = malloc(csum_root->sectorsize);
8159 btrfs_free_path(path);
8164 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8165 ret = btrfs_next_leaf(extent_root, path);
8173 leaf = path->nodes[0];
8175 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
8176 if (key.type != BTRFS_EXTENT_ITEM_KEY) {
8181 ei = btrfs_item_ptr(leaf, path->slots[0],
8182 struct btrfs_extent_item);
8183 if (!(btrfs_extent_flags(leaf, ei) &
8184 BTRFS_EXTENT_FLAG_DATA)) {
8189 ret = populate_csum(trans, csum_root, buf, key.objectid,
8196 btrfs_free_path(path);
8201 struct root_item_info {
8202 /* level of the root */
8204 /* number of nodes at this level, must be 1 for a root */
8208 struct cache_extent cache_extent;
8211 static struct cache_tree *roots_info_cache = NULL;
8213 static void free_roots_info_cache(void)
8215 if (!roots_info_cache)
8218 while (!cache_tree_empty(roots_info_cache)) {
8219 struct cache_extent *entry;
8220 struct root_item_info *rii;
8222 entry = first_cache_extent(roots_info_cache);
8225 remove_cache_extent(roots_info_cache, entry);
8226 rii = container_of(entry, struct root_item_info, cache_extent);
8230 free(roots_info_cache);
8231 roots_info_cache = NULL;
8234 static int build_roots_info_cache(struct btrfs_fs_info *info)
8237 struct btrfs_key key;
8238 struct extent_buffer *leaf;
8239 struct btrfs_path *path;
8241 if (!roots_info_cache) {
8242 roots_info_cache = malloc(sizeof(*roots_info_cache));
8243 if (!roots_info_cache)
8245 cache_tree_init(roots_info_cache);
8248 path = btrfs_alloc_path();
8253 key.type = BTRFS_EXTENT_ITEM_KEY;
8256 ret = btrfs_search_slot(NULL, info->extent_root, &key, path, 0, 0);
8259 leaf = path->nodes[0];
8262 struct btrfs_key found_key;
8263 struct btrfs_extent_item *ei;
8264 struct btrfs_extent_inline_ref *iref;
8265 int slot = path->slots[0];
8270 struct cache_extent *entry;
8271 struct root_item_info *rii;
8273 if (slot >= btrfs_header_nritems(leaf)) {
8274 ret = btrfs_next_leaf(info->extent_root, path);
8281 leaf = path->nodes[0];
8282 slot = path->slots[0];
8285 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8287 if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
8288 found_key.type != BTRFS_METADATA_ITEM_KEY)
8291 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
8292 flags = btrfs_extent_flags(leaf, ei);
8294 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
8295 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
8298 if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
8299 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
8300 level = found_key.offset;
8302 struct btrfs_tree_block_info *info;
8304 info = (struct btrfs_tree_block_info *)(ei + 1);
8305 iref = (struct btrfs_extent_inline_ref *)(info + 1);
8306 level = btrfs_tree_block_level(leaf, info);
8310 * For a root extent, it must be of the following type and the
8311 * first (and only one) iref in the item.
8313 type = btrfs_extent_inline_ref_type(leaf, iref);
8314 if (type != BTRFS_TREE_BLOCK_REF_KEY)
8317 root_id = btrfs_extent_inline_ref_offset(leaf, iref);
8318 entry = lookup_cache_extent(roots_info_cache, root_id, 1);
8320 rii = malloc(sizeof(struct root_item_info));
8325 rii->cache_extent.start = root_id;
8326 rii->cache_extent.size = 1;
8327 rii->level = (u8)-1;
8328 entry = &rii->cache_extent;
8329 ret = insert_cache_extent(roots_info_cache, entry);
8332 rii = container_of(entry, struct root_item_info,
8336 ASSERT(rii->cache_extent.start == root_id);
8337 ASSERT(rii->cache_extent.size == 1);
8339 if (level > rii->level || rii->level == (u8)-1) {
8341 rii->bytenr = found_key.objectid;
8342 rii->gen = btrfs_extent_generation(leaf, ei);
8343 rii->node_count = 1;
8344 } else if (level == rii->level) {
8352 btrfs_free_path(path);
8357 static int maybe_repair_root_item(struct btrfs_fs_info *info,
8358 struct btrfs_path *path,
8359 const struct btrfs_key *root_key,
8360 const int read_only_mode)
8362 const u64 root_id = root_key->objectid;
8363 struct cache_extent *entry;
8364 struct root_item_info *rii;
8365 struct btrfs_root_item ri;
8366 unsigned long offset;
8368 entry = lookup_cache_extent(roots_info_cache, root_id, 1);
8371 "Error: could not find extent items for root %llu\n",
8372 root_key->objectid);
8376 rii = container_of(entry, struct root_item_info, cache_extent);
8377 ASSERT(rii->cache_extent.start == root_id);
8378 ASSERT(rii->cache_extent.size == 1);
8380 if (rii->node_count != 1) {
8382 "Error: could not find btree root extent for root %llu\n",
8387 offset = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
8388 read_extent_buffer(path->nodes[0], &ri, offset, sizeof(ri));
8390 if (btrfs_root_bytenr(&ri) != rii->bytenr ||
8391 btrfs_root_level(&ri) != rii->level ||
8392 btrfs_root_generation(&ri) != rii->gen) {
8395 * If we're in repair mode but our caller told us to not update
8396 * the root item, i.e. just check if it needs to be updated, don't
8397 * print this message, since the caller will call us again shortly
8398 * for the same root item without read only mode (the caller will
8399 * open a transaction first).
8401 if (!(read_only_mode && repair))
8403 "%sroot item for root %llu,"
8404 " current bytenr %llu, current gen %llu, current level %u,"
8405 " new bytenr %llu, new gen %llu, new level %u\n",
8406 (read_only_mode ? "" : "fixing "),
8408 btrfs_root_bytenr(&ri), btrfs_root_generation(&ri),
8409 btrfs_root_level(&ri),
8410 rii->bytenr, rii->gen, rii->level);
8412 if (btrfs_root_generation(&ri) > rii->gen) {
8414 "root %llu has a root item with a more recent gen (%llu) compared to the found root node (%llu)\n",
8415 root_id, btrfs_root_generation(&ri), rii->gen);
8419 if (!read_only_mode) {
8420 btrfs_set_root_bytenr(&ri, rii->bytenr);
8421 btrfs_set_root_level(&ri, rii->level);
8422 btrfs_set_root_generation(&ri, rii->gen);
8423 write_extent_buffer(path->nodes[0], &ri,
8424 offset, sizeof(ri));
8434 * A regression introduced in the 3.17 kernel (more specifically in 3.17-rc2),
8435 * caused read-only snapshots to be corrupted if they were created at a moment
8436 * when the source subvolume/snapshot had orphan items. The issue was that the
8437 * on-disk root items became incorrect, referring to the pre orphan cleanup root
8438 * node instead of the post orphan cleanup root node.
8439 * So this function, and its callees, just detects and fixes those cases. Even
8440 * though the regression was for read-only snapshots, this function applies to
8441 * any snapshot/subvolume root.
8442 * This must be run before any other repair code - not doing it so, makes other
8443 * repair code delete or modify backrefs in the extent tree for example, which
8444 * will result in an inconsistent fs after repairing the root items.
8446 static int repair_root_items(struct btrfs_fs_info *info)
8448 struct btrfs_path *path = NULL;
8449 struct btrfs_key key;
8450 struct extent_buffer *leaf;
8451 struct btrfs_trans_handle *trans = NULL;
8456 ret = build_roots_info_cache(info);
8460 path = btrfs_alloc_path();
8466 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
8467 key.type = BTRFS_ROOT_ITEM_KEY;
8472 * Avoid opening and committing transactions if a leaf doesn't have
8473 * any root items that need to be fixed, so that we avoid rotating
8474 * backup roots unnecessarily.
8477 trans = btrfs_start_transaction(info->tree_root, 1);
8478 if (IS_ERR(trans)) {
8479 ret = PTR_ERR(trans);
8484 ret = btrfs_search_slot(trans, info->tree_root, &key, path,
8488 leaf = path->nodes[0];
8491 struct btrfs_key found_key;
8493 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
8494 int no_more_keys = find_next_key(path, &key);
8496 btrfs_release_path(path);
8498 ret = btrfs_commit_transaction(trans,
8510 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8512 if (found_key.type != BTRFS_ROOT_ITEM_KEY)
8515 ret = maybe_repair_root_item(info, path, &found_key,
8520 if (!trans && repair) {
8523 btrfs_release_path(path);
8533 free_roots_info_cache();
8535 btrfs_free_path(path);
8542 const char * const cmd_check_usage[] = {
8543 "btrfs check [options] <device>",
8544 "Check an unmounted btrfs filesystem.",
8546 "-s|--super <superblock> use this superblock copy",
8547 "-b|--backup use the backup root copy",
8548 "--repair try to repair the filesystem",
8549 "--init-csum-tree create a new CRC tree",
8550 "--init-extent-tree create a new extent tree",
8551 "--check-data-csum verify checkums of data blocks",
8552 "--qgroup-report print a report on qgroup consistency",
8553 "--subvol-extents <subvolid> print subvolume extents and sharing state",
8554 "--tree-root <bytenr> use the given bytenr for the tree root",
8558 int cmd_check(int argc, char **argv)
8560 struct cache_tree root_cache;
8561 struct btrfs_root *root;
8562 struct btrfs_fs_info *info;
8565 u64 tree_root_bytenr = 0;
8566 char uuidbuf[BTRFS_UUID_UNPARSED_SIZE];
8569 int init_csum_tree = 0;
8571 int qgroup_report = 0;
8572 enum btrfs_open_ctree_flags ctree_flags = OPEN_CTREE_EXCLUSIVE;
8576 int option_index = 0;
8577 enum { OPT_REPAIR = 257, OPT_INIT_CSUM, OPT_INIT_EXTENT,
8578 OPT_CHECK_CSUM, OPT_READONLY };
8579 static const struct option long_options[] = {
8580 { "super", 1, NULL, 's' },
8581 { "repair", 0, NULL, OPT_REPAIR },
8582 { "readonly", 0, NULL, OPT_READONLY },
8583 { "init-csum-tree", 0, NULL, OPT_INIT_CSUM },
8584 { "init-extent-tree", 0, NULL, OPT_INIT_EXTENT },
8585 { "check-data-csum", 0, NULL, OPT_CHECK_CSUM },
8586 { "backup", 0, NULL, 'b' },
8587 { "subvol-extents", 1, NULL, 'E' },
8588 { "qgroup-report", 0, NULL, 'Q' },
8589 { "tree-root", 1, NULL, 'r' },
8593 c = getopt_long(argc, argv, "as:br:", long_options,
8598 case 'a': /* ignored */ break;
8600 ctree_flags |= OPEN_CTREE_BACKUP_ROOT;
8603 num = arg_strtou64(optarg);
8604 if (num >= BTRFS_SUPER_MIRROR_MAX) {
8606 "ERROR: super mirror should be less than: %d\n",
8607 BTRFS_SUPER_MIRROR_MAX);
8610 bytenr = btrfs_sb_offset(((int)num));
8611 printf("using SB copy %llu, bytenr %llu\n", num,
8612 (unsigned long long)bytenr);
8618 subvolid = arg_strtou64(optarg);
8621 tree_root_bytenr = arg_strtou64(optarg);
8625 usage(cmd_check_usage);
8627 printf("enabling repair mode\n");
8629 ctree_flags |= OPEN_CTREE_WRITES;
8635 printf("Creating a new CRC tree\n");
8638 ctree_flags |= OPEN_CTREE_WRITES;
8640 case OPT_INIT_EXTENT:
8641 init_extent_tree = 1;
8642 ctree_flags |= (OPEN_CTREE_WRITES |
8643 OPEN_CTREE_NO_BLOCK_GROUPS);
8646 case OPT_CHECK_CSUM:
8647 check_data_csum = 1;
8651 argc = argc - optind;
8653 if (check_argc_exact(argc, 1))
8654 usage(cmd_check_usage);
8656 /* This check is the only reason for --readonly to exist */
8657 if (readonly && repair) {
8658 fprintf(stderr, "Repair options are not compatible with --readonly\n");
8663 cache_tree_init(&root_cache);
8665 if((ret = check_mounted(argv[optind])) < 0) {
8666 fprintf(stderr, "Could not check mount status: %s\n", strerror(-ret));
8669 fprintf(stderr, "%s is currently mounted. Aborting.\n", argv[optind]);
8674 /* only allow partial opening under repair mode */
8676 ctree_flags |= OPEN_CTREE_PARTIAL;
8678 info = open_ctree_fs_info(argv[optind], bytenr, tree_root_bytenr,
8681 fprintf(stderr, "Couldn't open file system\n");
8686 root = info->fs_root;
8689 * repair mode will force us to commit transaction which
8690 * will make us fail to load log tree when mounting.
8692 if (repair && btrfs_super_log_root(info->super_copy)) {
8693 ret = ask_user("repair mode will force to clear out log tree, Are you sure?");
8698 ret = zero_log_tree(root);
8700 fprintf(stderr, "fail to zero log tree\n");
8705 uuid_unparse(info->super_copy->fsid, uuidbuf);
8706 if (qgroup_report) {
8707 printf("Print quota groups for %s\nUUID: %s\n", argv[optind],
8709 ret = qgroup_verify_all(info);
8711 print_qgroup_report(1);
8715 printf("Print extent state for subvolume %llu on %s\nUUID: %s\n",
8716 subvolid, argv[optind], uuidbuf);
8717 ret = print_extent_state(info, subvolid);
8720 printf("Checking filesystem on %s\nUUID: %s\n", argv[optind], uuidbuf);
8722 if (!extent_buffer_uptodate(info->tree_root->node) ||
8723 !extent_buffer_uptodate(info->dev_root->node) ||
8724 !extent_buffer_uptodate(info->chunk_root->node)) {
8725 fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
8730 if (init_extent_tree || init_csum_tree) {
8731 struct btrfs_trans_handle *trans;
8733 trans = btrfs_start_transaction(info->extent_root, 0);
8734 if (IS_ERR(trans)) {
8735 fprintf(stderr, "Error starting transaction\n");
8736 ret = PTR_ERR(trans);
8740 if (init_extent_tree) {
8741 printf("Creating a new extent tree\n");
8742 ret = reinit_extent_tree(trans, info);
8747 if (init_csum_tree) {
8748 fprintf(stderr, "Reinit crc root\n");
8749 ret = btrfs_fsck_reinit_root(trans, info->csum_root, 0);
8751 fprintf(stderr, "crc root initialization failed\n");
8756 ret = fill_csum_tree(trans, info->csum_root);
8758 fprintf(stderr, "crc refilling failed\n");
8763 * Ok now we commit and run the normal fsck, which will add
8764 * extent entries for all of the items it finds.
8766 ret = btrfs_commit_transaction(trans, info->extent_root);
8770 if (!extent_buffer_uptodate(info->extent_root->node)) {
8771 fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
8775 if (!extent_buffer_uptodate(info->csum_root->node)) {
8776 fprintf(stderr, "Checksum root corrupted, rerun with --init-csum-tree option\n");
8781 fprintf(stderr, "checking extents\n");
8782 ret = check_chunks_and_extents(root);
8784 fprintf(stderr, "Errors found in extent allocation tree or chunk allocation\n");
8786 ret = repair_root_items(info);
8790 fprintf(stderr, "Fixed %d roots.\n", ret);
8792 } else if (ret > 0) {
8794 "Found %d roots with an outdated root item.\n",
8797 "Please run a filesystem check with the option --repair to fix them.\n");
8802 fprintf(stderr, "checking free space cache\n");
8803 ret = check_space_cache(root);
8808 * We used to have to have these hole extents in between our real
8809 * extents so if we don't have this flag set we need to make sure there
8810 * are no gaps in the file extents for inodes, otherwise we can just
8811 * ignore it when this happens.
8813 no_holes = btrfs_fs_incompat(root->fs_info,
8814 BTRFS_FEATURE_INCOMPAT_NO_HOLES);
8815 fprintf(stderr, "checking fs roots\n");
8816 ret = check_fs_roots(root, &root_cache);
8820 fprintf(stderr, "checking csums\n");
8821 ret = check_csums(root);
8825 fprintf(stderr, "checking root refs\n");
8826 ret = check_root_refs(root, &root_cache);
8830 while (repair && !list_empty(&root->fs_info->recow_ebs)) {
8831 struct extent_buffer *eb;
8833 eb = list_first_entry(&root->fs_info->recow_ebs,
8834 struct extent_buffer, recow);
8835 list_del_init(&eb->recow);
8836 ret = recow_extent_buffer(root, eb);
8841 while (!list_empty(&delete_items)) {
8842 struct bad_item *bad;
8844 bad = list_first_entry(&delete_items, struct bad_item, list);
8845 list_del_init(&bad->list);
8847 ret = delete_bad_item(root, bad);
8851 if (info->quota_enabled) {
8853 fprintf(stderr, "checking quota groups\n");
8854 err = qgroup_verify_all(info);
8859 if (!list_empty(&root->fs_info->recow_ebs)) {
8860 fprintf(stderr, "Transid errors in file system\n");
8864 print_qgroup_report(0);
8865 if (found_old_backref) { /*
8866 * there was a disk format change when mixed
8867 * backref was in testing tree. The old format
8868 * existed about one week.
8870 printf("\n * Found old mixed backref format. "
8871 "The old format is not supported! *"
8872 "\n * Please mount the FS in readonly mode, "
8873 "backup data and re-format the FS. *\n\n");
8876 printf("found %llu bytes used err is %d\n",
8877 (unsigned long long)bytes_used, ret);
8878 printf("total csum bytes: %llu\n",(unsigned long long)total_csum_bytes);
8879 printf("total tree bytes: %llu\n",
8880 (unsigned long long)total_btree_bytes);
8881 printf("total fs tree bytes: %llu\n",
8882 (unsigned long long)total_fs_tree_bytes);
8883 printf("total extent tree bytes: %llu\n",
8884 (unsigned long long)total_extent_tree_bytes);
8885 printf("btree space waste bytes: %llu\n",
8886 (unsigned long long)btree_space_waste);
8887 printf("file data blocks allocated: %llu\n referenced %llu\n",
8888 (unsigned long long)data_bytes_allocated,
8889 (unsigned long long)data_bytes_referenced);
8890 printf("%s\n", PACKAGE_STRING);
8892 free_root_recs_tree(&root_cache);