4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
17 uint32_t tree_mark_size = 256;
19 int f2fs_set_main_bitmap(struct f2fs_sb_info *sbi, u32 blk, int type)
21 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
25 se = get_seg_entry(sbi, GET_SEGNO(sbi, blk));
26 if (se->type >= NO_CHECK_TYPE)
28 else if (IS_DATASEG(se->type) != IS_DATASEG(type))
31 /* just check data and node types */
33 DBG(1, "Wrong segment type [0x%x] %x -> %x",
34 GET_SEGNO(sbi, blk), se->type, type);
37 return f2fs_set_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->main_area_bitmap);
40 static inline int f2fs_test_main_bitmap(struct f2fs_sb_info *sbi, u32 blk)
42 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
44 return f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, blk),
45 fsck->main_area_bitmap);
48 static inline int f2fs_clear_main_bitmap(struct f2fs_sb_info *sbi, u32 blk)
50 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
52 return f2fs_clear_bit(BLKOFF_FROM_MAIN(sbi, blk),
53 fsck->main_area_bitmap);
56 static inline int f2fs_test_sit_bitmap(struct f2fs_sb_info *sbi, u32 blk)
58 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
60 return f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->sit_area_bitmap);
63 int f2fs_set_sit_bitmap(struct f2fs_sb_info *sbi, u32 blk)
65 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
67 return f2fs_set_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->sit_area_bitmap);
70 static int add_into_hard_link_list(struct f2fs_sb_info *sbi,
71 u32 nid, u32 link_cnt)
73 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
74 struct hard_link_node *node = NULL, *tmp = NULL, *prev = NULL;
76 node = calloc(sizeof(struct hard_link_node), 1);
80 node->links = link_cnt;
81 node->actual_links = 1;
84 if (fsck->hard_link_list_head == NULL) {
85 fsck->hard_link_list_head = node;
89 tmp = fsck->hard_link_list_head;
91 /* Find insertion position */
92 while (tmp && (nid < tmp->nid)) {
93 ASSERT(tmp->nid != nid);
98 if (tmp == fsck->hard_link_list_head) {
100 fsck->hard_link_list_head = node;
107 DBG(2, "ino[0x%x] has hard links [0x%x]\n", nid, link_cnt);
111 static int find_and_dec_hard_link_list(struct f2fs_sb_info *sbi, u32 nid)
113 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
114 struct hard_link_node *node = NULL, *prev = NULL;
116 if (fsck->hard_link_list_head == NULL)
119 node = fsck->hard_link_list_head;
121 while (node && (nid < node->nid)) {
126 if (node == NULL || (nid != node->nid))
129 /* Decrease link count */
130 node->links = node->links - 1;
131 node->actual_links++;
133 /* if link count becomes one, remove the node */
134 if (node->links == 1) {
135 if (fsck->hard_link_list_head == node)
136 fsck->hard_link_list_head = node->next;
138 prev->next = node->next;
144 static int is_valid_ssa_node_blk(struct f2fs_sb_info *sbi, u32 nid,
147 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
148 struct f2fs_summary_block *sum_blk;
149 struct f2fs_summary *sum_entry;
150 struct seg_entry * se;
152 int need_fix = 0, ret = 0;
155 if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
158 segno = GET_SEGNO(sbi, blk_addr);
159 offset = OFFSET_IN_SEG(sbi, blk_addr);
161 sum_blk = get_sum_block(sbi, segno, &type);
163 if (type != SEG_TYPE_NODE && type != SEG_TYPE_CUR_NODE) {
164 /* can't fix current summary, then drop the block */
165 if (!c.fix_on || type < 0) {
166 ASSERT_MSG("Summary footer is not for node segment");
172 se = get_seg_entry(sbi, segno);
173 if(IS_NODESEG(se->type)) {
174 FIX_MSG("Summary footer indicates a node segment: 0x%x", segno);
175 sum_blk->footer.entry_type = SUM_TYPE_NODE;
182 sum_entry = &(sum_blk->entries[offset]);
184 if (le32_to_cpu(sum_entry->nid) != nid) {
185 if (!c.fix_on || type < 0) {
186 DBG(0, "nid [0x%x]\n", nid);
187 DBG(0, "target blk_addr [0x%x]\n", blk_addr);
188 DBG(0, "summary blk_addr [0x%x]\n",
190 GET_SEGNO(sbi, blk_addr)));
191 DBG(0, "seg no / offset [0x%x / 0x%x]\n",
192 GET_SEGNO(sbi, blk_addr),
193 OFFSET_IN_SEG(sbi, blk_addr));
194 DBG(0, "summary_entry.nid [0x%x]\n",
195 le32_to_cpu(sum_entry->nid));
196 DBG(0, "--> node block's nid [0x%x]\n", nid);
197 ASSERT_MSG("Invalid node seg summary\n");
200 FIX_MSG("Set node summary 0x%x -> [0x%x] [0x%x]",
201 segno, nid, blk_addr);
202 sum_entry->nid = cpu_to_le32(nid);
206 if (need_fix && f2fs_dev_is_writable()) {
210 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
211 ret2 = dev_write_block(sum_blk, ssa_blk);
215 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
216 type == SEG_TYPE_MAX)
221 static int is_valid_summary(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
224 u16 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
225 u32 nid = le32_to_cpu(sum->nid);
226 struct f2fs_node *node_blk = NULL;
227 __le32 target_blk_addr;
231 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
232 ASSERT(node_blk != NULL);
234 if (!IS_VALID_NID(sbi, nid))
237 get_node_info(sbi, nid, &ni);
239 if (!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
242 /* read node_block */
243 ret = dev_read_block(node_blk, ni.blk_addr);
246 if (le32_to_cpu(node_blk->footer.nid) != nid)
249 /* check its block address */
250 if (node_blk->footer.nid == node_blk->footer.ino) {
251 int ofs = get_extra_isize(node_blk);
253 if (ofs + ofs_in_node >= DEF_ADDRS_PER_INODE)
255 target_blk_addr = node_blk->i.i_addr[ofs + ofs_in_node];
257 if (ofs_in_node >= DEF_ADDRS_PER_BLOCK)
259 target_blk_addr = node_blk->dn.addr[ofs_in_node];
262 if (blk_addr == le32_to_cpu(target_blk_addr))
269 static int is_valid_ssa_data_blk(struct f2fs_sb_info *sbi, u32 blk_addr,
270 u32 parent_nid, u16 idx_in_node, u8 version)
272 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
273 struct f2fs_summary_block *sum_blk;
274 struct f2fs_summary *sum_entry;
275 struct seg_entry * se;
277 int need_fix = 0, ret = 0;
280 if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
283 segno = GET_SEGNO(sbi, blk_addr);
284 offset = OFFSET_IN_SEG(sbi, blk_addr);
286 sum_blk = get_sum_block(sbi, segno, &type);
288 if (type != SEG_TYPE_DATA && type != SEG_TYPE_CUR_DATA) {
289 /* can't fix current summary, then drop the block */
290 if (!c.fix_on || type < 0) {
291 ASSERT_MSG("Summary footer is not for data segment");
297 se = get_seg_entry(sbi, segno);
298 if (IS_DATASEG(se->type)) {
299 FIX_MSG("Summary footer indicates a data segment: 0x%x", segno);
300 sum_blk->footer.entry_type = SUM_TYPE_DATA;
307 sum_entry = &(sum_blk->entries[offset]);
309 if (le32_to_cpu(sum_entry->nid) != parent_nid ||
310 sum_entry->version != version ||
311 le16_to_cpu(sum_entry->ofs_in_node) != idx_in_node) {
312 if (!c.fix_on || type < 0) {
313 DBG(0, "summary_entry.nid [0x%x]\n",
314 le32_to_cpu(sum_entry->nid));
315 DBG(0, "summary_entry.version [0x%x]\n",
317 DBG(0, "summary_entry.ofs_in_node [0x%x]\n",
318 le16_to_cpu(sum_entry->ofs_in_node));
319 DBG(0, "parent nid [0x%x]\n",
321 DBG(0, "version from nat [0x%x]\n", version);
322 DBG(0, "idx in parent node [0x%x]\n",
325 DBG(0, "Target data block addr [0x%x]\n", blk_addr);
326 ASSERT_MSG("Invalid data seg summary\n");
328 } else if (is_valid_summary(sbi, sum_entry, blk_addr)) {
329 /* delete wrong index */
332 FIX_MSG("Set data summary 0x%x -> [0x%x] [0x%x] [0x%x]",
333 segno, parent_nid, version, idx_in_node);
334 sum_entry->nid = cpu_to_le32(parent_nid);
335 sum_entry->version = version;
336 sum_entry->ofs_in_node = cpu_to_le16(idx_in_node);
340 if (need_fix && f2fs_dev_is_writable()) {
344 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
345 ret2 = dev_write_block(sum_blk, ssa_blk);
349 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
350 type == SEG_TYPE_MAX)
355 static int __check_inode_mode(u32 nid, enum FILE_TYPE ftype, u16 mode)
357 if (ftype >= F2FS_FT_MAX)
359 /* f2fs_iget will return -EIO if mode is not valid file type */
360 if (!S_ISLNK(mode) && !S_ISREG(mode) && !S_ISDIR(mode) &&
361 !S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode) &&
363 ASSERT_MSG("inode [0x%x] unknown file type i_mode [0x%x]",
368 if (S_ISLNK(mode) && ftype != F2FS_FT_SYMLINK)
370 if (S_ISREG(mode) && ftype != F2FS_FT_REG_FILE)
372 if (S_ISDIR(mode) && ftype != F2FS_FT_DIR)
374 if (S_ISCHR(mode) && ftype != F2FS_FT_CHRDEV)
376 if (S_ISBLK(mode) && ftype != F2FS_FT_BLKDEV)
378 if (S_ISFIFO(mode) && ftype != F2FS_FT_FIFO)
380 if (S_ISSOCK(mode) && ftype != F2FS_FT_SOCK)
384 ASSERT_MSG("inode [0x%x] mismatch i_mode [0x%x vs. 0x%x]",
389 static int sanity_check_nat(struct f2fs_sb_info *sbi, u32 nid,
390 struct node_info *ni)
392 if (!IS_VALID_NID(sbi, nid)) {
393 ASSERT_MSG("nid is not valid. [0x%x]", nid);
397 get_node_info(sbi, nid, ni);
399 ASSERT_MSG("nid[0x%x] ino is 0", nid);
403 if (!is_valid_data_blkaddr(ni->blk_addr)) {
404 ASSERT_MSG("nid->blk_addr is 0x%x. [0x%x]", ni->blk_addr, nid);
408 if (!f2fs_is_valid_blkaddr(sbi, ni->blk_addr, DATA_GENERIC)) {
409 ASSERT_MSG("blkaddress is not valid. [0x%x]", ni->blk_addr);
416 int fsck_sanity_check_nat(struct f2fs_sb_info *sbi, u32 nid)
420 return sanity_check_nat(sbi, nid, &ni);
423 static int sanity_check_nid(struct f2fs_sb_info *sbi, u32 nid,
424 struct f2fs_node *node_blk,
425 enum FILE_TYPE ftype, enum NODE_TYPE ntype,
426 struct node_info *ni)
428 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
431 ret = sanity_check_nat(sbi, nid, ni);
435 ret = dev_read_block(node_blk, ni->blk_addr);
438 if (ntype == TYPE_INODE &&
439 node_blk->footer.nid != node_blk->footer.ino) {
440 ASSERT_MSG("nid[0x%x] footer.nid[0x%x] footer.ino[0x%x]",
441 nid, le32_to_cpu(node_blk->footer.nid),
442 le32_to_cpu(node_blk->footer.ino));
445 if (ni->ino != le32_to_cpu(node_blk->footer.ino)) {
446 ASSERT_MSG("nid[0x%x] nat_entry->ino[0x%x] footer.ino[0x%x]",
447 nid, ni->ino, le32_to_cpu(node_blk->footer.ino));
450 if (ntype != TYPE_INODE &&
451 node_blk->footer.nid == node_blk->footer.ino) {
452 ASSERT_MSG("nid[0x%x] footer.nid[0x%x] footer.ino[0x%x]",
453 nid, le32_to_cpu(node_blk->footer.nid),
454 le32_to_cpu(node_blk->footer.ino));
458 if (le32_to_cpu(node_blk->footer.nid) != nid) {
459 ASSERT_MSG("nid[0x%x] blk_addr[0x%x] footer.nid[0x%x]",
461 le32_to_cpu(node_blk->footer.nid));
465 if (ntype == TYPE_XATTR) {
466 u32 flag = le32_to_cpu(node_blk->footer.flag);
468 if ((flag >> OFFSET_BIT_SHIFT) != XATTR_NODE_OFFSET) {
469 ASSERT_MSG("xnid[0x%x] has wrong ofs:[0x%x]",
475 if ((ntype == TYPE_INODE && ftype == F2FS_FT_DIR) ||
476 (ntype == TYPE_XATTR && ftype == F2FS_FT_XATTR)) {
477 /* not included '.' & '..' */
478 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) != 0) {
479 ASSERT_MSG("Duplicated node blk. nid[0x%x][0x%x]\n",
485 /* this if only from fix_hard_links */
486 if (ftype == F2FS_FT_MAX)
489 if (ntype == TYPE_INODE &&
490 __check_inode_mode(nid, ftype, le16_to_cpu(node_blk->i.i_mode)))
493 /* workaround to fix later */
494 if (ftype != F2FS_FT_ORPHAN ||
495 f2fs_test_bit(nid, fsck->nat_area_bitmap) != 0) {
496 f2fs_clear_bit(nid, fsck->nat_area_bitmap);
497 /* avoid reusing nid when reconnecting files */
498 f2fs_set_bit(nid, NM_I(sbi)->nid_bitmap);
500 ASSERT_MSG("orphan or xattr nid is duplicated [0x%x]\n",
503 if (is_valid_ssa_node_blk(sbi, nid, ni->blk_addr)) {
504 ASSERT_MSG("summary node block is not valid. [0x%x]", nid);
508 if (f2fs_test_sit_bitmap(sbi, ni->blk_addr) == 0)
509 ASSERT_MSG("SIT bitmap is 0x0. blk_addr[0x%x]",
512 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0) {
514 fsck->chk.valid_blk_cnt++;
515 fsck->chk.valid_node_cnt++;
517 /* Progress report */
518 if (!c.show_file_map && sbi->total_valid_node_count > 1000) {
519 unsigned int p10 = sbi->total_valid_node_count / 10;
521 if (sbi->fsck->chk.checked_node_cnt++ % p10)
524 printf("[FSCK] Check node %"PRIu64" / %u (%.2f%%)\n",
525 sbi->fsck->chk.checked_node_cnt,
526 sbi->total_valid_node_count,
527 10 * (float)sbi->fsck->chk.checked_node_cnt /
534 int fsck_sanity_check_nid(struct f2fs_sb_info *sbi, u32 nid,
535 enum FILE_TYPE ftype, enum NODE_TYPE ntype)
537 struct f2fs_node *node_blk = NULL;
541 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
542 ASSERT(node_blk != NULL);
544 ret = sanity_check_nid(sbi, nid, node_blk, ftype, ntype, &ni);
550 static int fsck_chk_xattr_blk(struct f2fs_sb_info *sbi, u32 ino,
551 u32 x_nid, u32 *blk_cnt)
553 struct f2fs_node *node_blk = NULL;
560 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
561 ASSERT(node_blk != NULL);
564 if (sanity_check_nid(sbi, x_nid, node_blk,
565 F2FS_FT_XATTR, TYPE_XATTR, &ni)) {
570 *blk_cnt = *blk_cnt + 1;
571 f2fs_set_main_bitmap(sbi, ni.blk_addr, CURSEG_COLD_NODE);
572 DBG(2, "ino[0x%x] x_nid[0x%x]\n", ino, x_nid);
578 int fsck_chk_node_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
579 u32 nid, enum FILE_TYPE ftype, enum NODE_TYPE ntype,
580 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
581 struct child_info *child)
584 struct f2fs_node *node_blk = NULL;
586 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
587 ASSERT(node_blk != NULL);
589 if (sanity_check_nid(sbi, nid, node_blk, ftype, ntype, &ni))
592 if (ntype == TYPE_INODE) {
593 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
595 fsck_chk_inode_blk(sbi, nid, ftype, node_blk, blk_cnt, cbc,
597 quota_add_inode_usage(fsck->qctx, nid, &node_blk->i);
600 case TYPE_DIRECT_NODE:
601 f2fs_set_main_bitmap(sbi, ni.blk_addr,
603 fsck_chk_dnode_blk(sbi, inode, nid, ftype, node_blk,
604 blk_cnt, cbc, child, &ni);
606 case TYPE_INDIRECT_NODE:
607 f2fs_set_main_bitmap(sbi, ni.blk_addr,
609 fsck_chk_idnode_blk(sbi, inode, ftype, node_blk,
610 blk_cnt, cbc, child);
612 case TYPE_DOUBLE_INDIRECT_NODE:
613 f2fs_set_main_bitmap(sbi, ni.blk_addr,
615 fsck_chk_didnode_blk(sbi, inode, ftype, node_blk,
616 blk_cnt, cbc, child);
629 static bool is_sit_bitmap_set(struct f2fs_sb_info *sbi, u32 blk_addr)
631 struct seg_entry *se;
634 se = get_seg_entry(sbi, GET_SEGNO(sbi, blk_addr));
635 offset = OFFSET_IN_SEG(sbi, blk_addr);
637 return f2fs_test_bit(offset,
638 (const char *)se->cur_valid_map) != 0;
641 int fsck_chk_root_inode(struct f2fs_sb_info *sbi)
643 struct f2fs_node *node_blk;
644 int segment_count = SM_I(sbi)->main_segments;
646 bool valid_bitmap = true;
647 block_t last_blkaddr = NULL_ADDR;
648 nid_t root_ino = sbi->root_ino_num;
650 u32 last_ctime_nsec = 0;
653 node_blk = calloc(BLOCK_SZ, 1);
656 MSG(0, "Info: root inode is corrupted, search and relink it\n");
659 for (segno = 0; segno < segment_count; segno++) {
660 struct seg_entry *se = get_seg_entry(sbi, segno);
661 block_t blkaddr = START_BLOCK(sbi, segno);
665 if (IS_DATASEG(se->type))
668 dev_readahead(blkaddr << F2FS_BLKSIZE_BITS,
669 sbi->blocks_per_seg << F2FS_BLKSIZE_BITS);
671 for (i = 0; i < sbi->blocks_per_seg; i++, blkaddr++) {
672 if (valid_bitmap ^ is_sit_bitmap_set(sbi, blkaddr))
675 ret = dev_read_block(node_blk, blkaddr);
678 if (le32_to_cpu(node_blk->footer.ino) !=
680 le32_to_cpu(node_blk->footer.nid) !=
684 if (!IS_INODE(node_blk))
687 if (le32_to_cpu(node_blk->i.i_generation) ||
688 le32_to_cpu(node_blk->i.i_namelen))
693 if (i == sbi->blocks_per_seg)
697 last_blkaddr = blkaddr;
698 MSG(0, "Info: possible root inode blkaddr: 0x%x\n",
703 if (last_blkaddr == NULL_ADDR)
705 if (le64_to_cpu(node_blk->i.i_ctime) < last_ctime)
707 if (le64_to_cpu(node_blk->i.i_ctime) == last_ctime &&
708 le32_to_cpu(node_blk->i.i_ctime_nsec) <=
712 last_blkaddr = blkaddr;
713 last_ctime = le64_to_cpu(node_blk->i.i_ctime);
714 last_ctime_nsec = le32_to_cpu(node_blk->i.i_ctime_nsec);
716 MSG(0, "Info: possible root inode blkaddr: %u\n",
721 valid_bitmap = false;
726 MSG(0, "Info: there is no valid root inode\n");
727 } else if (c.fix_on) {
728 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
730 FIX_MSG("Relink root inode, blkaddr: 0x%x", last_blkaddr);
731 update_nat_journal_blkaddr(sbi, root_ino, last_blkaddr);
732 update_nat_blkaddr(sbi, root_ino, root_ino, last_blkaddr);
734 if (f2fs_test_bit(root_ino, fsck->nat_area_bitmap))
735 f2fs_clear_bit(root_ino, fsck->nat_area_bitmap);
736 fsck->chk.valid_nat_entry_cnt++;
738 if (!f2fs_test_sit_bitmap(sbi, last_blkaddr))
739 f2fs_set_sit_bitmap(sbi, last_blkaddr);
746 static inline void get_extent_info(struct extent_info *ext,
747 struct f2fs_extent *i_ext)
749 ext->fofs = le32_to_cpu(i_ext->fofs);
750 ext->blk = le32_to_cpu(i_ext->blk_addr);
751 ext->len = le32_to_cpu(i_ext->len);
754 static void check_extent_info(struct child_info *child,
755 block_t blkaddr, int last)
757 struct extent_info *ei = &child->ei;
758 u32 pgofs = child->pgofs;
764 if (child->state & FSCK_UNMATCHED_EXTENT)
767 if ((child->state & FSCK_INLINE_INODE) && ei->len)
771 /* hole exist in the back of extent */
772 if (child->last_blk != ei->blk + ei->len - 1)
773 child->state |= FSCK_UNMATCHED_EXTENT;
777 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR)
780 if (pgofs >= ei->fofs && pgofs < ei->fofs + ei->len) {
781 /* unmatched blkaddr */
782 if (is_hole || (blkaddr != pgofs - ei->fofs + ei->blk))
785 if (!child->last_blk) {
786 /* hole exists in the front of extent */
787 if (pgofs != ei->fofs)
789 } else if (child->last_blk + 1 != blkaddr) {
790 /* hole exists in the middle of extent */
793 child->last_blk = blkaddr;
800 if (blkaddr < ei->blk || blkaddr >= ei->blk + ei->len)
802 /* unmatched file offset */
804 child->state |= FSCK_UNMATCHED_EXTENT;
807 void fsck_reada_node_block(struct f2fs_sb_info *sbi, u32 nid)
811 if (nid != 0 && IS_VALID_NID(sbi, nid)) {
812 get_node_info(sbi, nid, &ni);
813 if (f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
814 dev_reada_block(ni.blk_addr);
818 void fsck_reada_all_direct_node_blocks(struct f2fs_sb_info *sbi,
819 struct f2fs_node *node_blk)
823 for (i = 0; i < NIDS_PER_BLOCK; i++) {
824 u32 nid = le32_to_cpu(node_blk->in.nid[i]);
826 fsck_reada_node_block(sbi, nid);
830 /* start with valid nid and blkaddr */
831 void fsck_chk_inode_blk(struct f2fs_sb_info *sbi, u32 nid,
832 enum FILE_TYPE ftype, struct f2fs_node *node_blk,
833 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
834 struct node_info *ni, struct child_info *child_d)
836 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
837 struct child_info child;
838 enum NODE_TYPE ntype;
839 u32 i_links = le32_to_cpu(node_blk->i.i_links);
840 u64 i_size = le64_to_cpu(node_blk->i.i_size);
841 u64 i_blocks = le64_to_cpu(node_blk->i.i_blocks);
842 bool compr_supported = c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION);
843 u32 i_flags = le32_to_cpu(node_blk->i.i_flags);
844 bool compressed = i_flags & F2FS_COMPR_FL;
845 bool compr_rel = node_blk->i.i_inline & F2FS_COMPRESS_RELEASED;
846 u64 i_compr_blocks = le64_to_cpu(node_blk->i.i_compr_blocks);
847 nid_t i_xattr_nid = le32_to_cpu(node_blk->i.i_xattr_nid);
851 unsigned int addrs, idx = 0;
852 unsigned short i_gc_failures;
855 u32 cluster_size = 1 << node_blk->i.i_log_cluster_size;
860 if (!compr_supported || (node_blk->i.i_inline & F2FS_INLINE_DATA)) {
862 * The 'compression' flag in i_flags affects the traverse of
863 * the node tree. Thus, it must be fixed unconditionally
864 * in the memory (node_blk).
866 node_blk->i.i_flags &= ~cpu_to_le32(F2FS_COMPR_FL);
870 FIX_MSG("[0x%x] i_flags=0x%x -> 0x%x",
871 nid, i_flags, node_blk->i.i_flags);
873 i_flags &= ~F2FS_COMPR_FL;
876 memset(&child, 0, sizeof(child));
879 child.pp_ino = le32_to_cpu(node_blk->i.i_pino);
880 child.dir_level = node_blk->i.i_dir_level;
882 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0)
883 fsck->chk.valid_inode_cnt++;
885 if (ftype == F2FS_FT_DIR) {
886 f2fs_set_main_bitmap(sbi, ni->blk_addr, CURSEG_HOT_NODE);
887 namelen = le32_to_cpu(node_blk->i.i_namelen);
888 if (namelen > F2FS_NAME_LEN)
889 namelen = F2FS_NAME_LEN;
890 memcpy(child.p_name, node_blk->i.i_name, namelen);
892 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0) {
893 f2fs_set_main_bitmap(sbi, ni->blk_addr,
895 if (i_links > 1 && ftype != F2FS_FT_ORPHAN &&
896 !is_qf_ino(F2FS_RAW_SUPER(sbi), nid)) {
897 /* First time. Create new hard link node */
898 add_into_hard_link_list(sbi, nid, i_links);
899 fsck->chk.multi_hard_link_files++;
902 DBG(3, "[0x%x] has hard links [0x%x]\n", nid, i_links);
903 if (find_and_dec_hard_link_list(sbi, nid)) {
904 ASSERT_MSG("[0x%x] needs more i_links=0x%x",
907 node_blk->i.i_links =
908 cpu_to_le32(i_links + 1);
910 FIX_MSG("File: 0x%x "
911 "i_links= 0x%x -> 0x%x",
912 nid, i_links, i_links + 1);
914 goto skip_blkcnt_fix;
916 /* No need to go deep into the node */
921 /* readahead xattr node block */
922 fsck_reada_node_block(sbi, i_xattr_nid);
924 if (fsck_chk_xattr_blk(sbi, nid, i_xattr_nid, blk_cnt)) {
926 node_blk->i.i_xattr_nid = 0;
928 FIX_MSG("Remove xattr block: 0x%x, x_nid = 0x%x",
933 if (ftype == F2FS_FT_CHRDEV || ftype == F2FS_FT_BLKDEV ||
934 ftype == F2FS_FT_FIFO || ftype == F2FS_FT_SOCK)
937 /* init extent info */
938 get_extent_info(&child.ei, &node_blk->i.i_ext);
941 if (f2fs_has_extra_isize(&node_blk->i)) {
942 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
944 le16_to_cpu(node_blk->i.i_extra_isize);
945 if (isize > 4 * DEF_ADDRS_PER_INODE) {
946 ASSERT_MSG("[0x%x] wrong i_extra_isize=0x%x",
949 FIX_MSG("ino[0x%x] recover i_extra_isize "
953 node_blk->i.i_extra_isize =
954 cpu_to_le16(calc_extra_isize());
959 ASSERT_MSG("[0x%x] wrong extra_attr flag", nid);
961 FIX_MSG("ino[0x%x] remove F2FS_EXTRA_ATTR "
962 "flag in i_inline:%u",
963 nid, node_blk->i.i_inline);
964 /* we don't support tuning F2FS_FEATURE_EXTRA_ATTR now */
965 node_blk->i.i_inline &= ~F2FS_EXTRA_ATTR;
971 cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) &&
972 (node_blk->i.i_inline & F2FS_INLINE_XATTR)) {
973 unsigned int inline_size =
974 le16_to_cpu(node_blk->i.i_inline_xattr_size);
977 inline_size > MAX_INLINE_XATTR_SIZE) {
978 ASSERT_MSG("[0x%x] wrong inline_xattr_size:%u",
981 FIX_MSG("ino[0x%x] recover inline xattr size "
984 DEFAULT_INLINE_XATTR_ADDRS);
985 node_blk->i.i_inline_xattr_size =
986 cpu_to_le16(DEFAULT_INLINE_XATTR_ADDRS);
992 ofs = get_extra_isize(node_blk);
994 if ((node_blk->i.i_flags & cpu_to_le32(F2FS_CASEFOLD_FL)) &&
995 (ftype != F2FS_FT_DIR ||
996 !(c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)))) {
997 ASSERT_MSG("[0x%x] unexpected casefold flag", nid);
999 FIX_MSG("ino[0x%x] clear casefold flag", nid);
1000 node_blk->i.i_flags &= ~cpu_to_le32(F2FS_CASEFOLD_FL);
1005 if ((node_blk->i.i_inline & F2FS_INLINE_DATA)) {
1006 unsigned int inline_size = MAX_INLINE_DATA(node_blk);
1007 if (cur_qtype != -1)
1008 qf_szchk_type[cur_qtype] = QF_SZCHK_INLINE;
1009 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs]);
1012 ASSERT_MSG("[0x%x] wrong inline reserve blkaddr:%u",
1015 FIX_MSG("inline_data has wrong 0'th block = %x",
1017 node_blk->i.i_addr[ofs] = 0;
1018 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
1022 if (i_size > inline_size) {
1023 ASSERT_MSG("[0x%x] wrong inline size:%lu",
1024 nid, (unsigned long)i_size);
1026 node_blk->i.i_size = cpu_to_le64(inline_size);
1027 FIX_MSG("inline_data has wrong i_size %lu",
1028 (unsigned long)i_size);
1032 if (!(node_blk->i.i_inline & F2FS_DATA_EXIST)) {
1033 char buf[MAX_INLINE_DATA(node_blk)];
1034 memset(buf, 0, MAX_INLINE_DATA(node_blk));
1036 if (memcmp(buf, inline_data_addr(node_blk),
1037 MAX_INLINE_DATA(node_blk))) {
1038 ASSERT_MSG("[0x%x] junk inline data", nid);
1040 FIX_MSG("inline_data has DATA_EXIST");
1041 node_blk->i.i_inline |= F2FS_DATA_EXIST;
1046 DBG(3, "ino[0x%x] has inline data!\n", nid);
1047 child.state |= FSCK_INLINE_INODE;
1051 if ((node_blk->i.i_inline & F2FS_INLINE_DENTRY)) {
1052 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs]);
1054 DBG(3, "ino[0x%x] has inline dentry!\n", nid);
1056 ASSERT_MSG("[0x%x] wrong inline reserve blkaddr:%u",
1059 FIX_MSG("inline_dentry has wrong 0'th block = %x",
1061 node_blk->i.i_addr[ofs] = 0;
1062 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
1067 ret = fsck_chk_inline_dentries(sbi, node_blk, &child);
1072 child.state |= FSCK_INLINE_INODE;
1076 /* check data blocks in inode */
1077 addrs = ADDRS_PER_INODE(&node_blk->i);
1078 if (cur_qtype != -1) {
1079 u64 addrs_per_blk = (u64)ADDRS_PER_BLOCK(&node_blk->i);
1080 qf_szchk_type[cur_qtype] = QF_SZCHK_REGFILE;
1081 qf_maxsize[cur_qtype] = (u64)(addrs + 2 * addrs_per_blk +
1082 2 * addrs_per_blk * NIDS_PER_BLOCK +
1083 addrs_per_blk * NIDS_PER_BLOCK *
1084 NIDS_PER_BLOCK) * F2FS_BLKSIZE;
1086 for (idx = 0; idx < addrs; idx++, child.pgofs++) {
1087 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs + idx]);
1089 /* check extent info */
1090 check_extent_info(&child, blkaddr, 0);
1092 if (blkaddr == NULL_ADDR)
1094 if (blkaddr == COMPRESS_ADDR) {
1095 if (!compressed || (child.pgofs &
1096 (cluster_size - 1)) != 0) {
1098 node_blk->i.i_addr[ofs + idx] =
1101 FIX_MSG("[0x%x] i_addr[%d] = 0", nid,
1107 fsck->chk.valid_blk_cnt++;
1108 *blk_cnt = *blk_cnt + 1;
1109 cbc->cheader_pgofs = child.pgofs;
1114 if (!compr_rel && blkaddr == NEW_ADDR &&
1115 child.pgofs - cbc->cheader_pgofs < cluster_size)
1117 ret = fsck_chk_data_blk(sbi,
1118 IS_CASEFOLDED(&node_blk->i),
1120 &child, (i_blocks == *blk_cnt),
1121 ftype, nid, idx, ni->version,
1122 file_is_encrypt(&node_blk->i));
1124 *blk_cnt = *blk_cnt + 1;
1125 if (cur_qtype != -1 && blkaddr != NEW_ADDR)
1126 qf_last_blkofs[cur_qtype] = child.pgofs;
1127 } else if (c.fix_on) {
1128 node_blk->i.i_addr[ofs + idx] = 0;
1130 FIX_MSG("[0x%x] i_addr[%d] = 0", nid, ofs + idx);
1134 /* readahead node blocks */
1135 for (idx = 0; idx < 5; idx++) {
1136 u32 nid = le32_to_cpu(node_blk->i.i_nid[idx]);
1137 fsck_reada_node_block(sbi, nid);
1140 /* check node blocks in inode */
1141 for (idx = 0; idx < 5; idx++) {
1142 nid_t i_nid = le32_to_cpu(node_blk->i.i_nid[idx]);
1144 if (idx == 0 || idx == 1)
1145 ntype = TYPE_DIRECT_NODE;
1146 else if (idx == 2 || idx == 3)
1147 ntype = TYPE_INDIRECT_NODE;
1149 ntype = TYPE_DOUBLE_INDIRECT_NODE;
1156 ret = fsck_chk_node_blk(sbi, &node_blk->i, i_nid,
1157 ftype, ntype, blk_cnt, cbc, &child);
1159 *blk_cnt = *blk_cnt + 1;
1160 } else if (ret == -EINVAL) {
1162 node_blk->i.i_nid[idx] = 0;
1164 FIX_MSG("[0x%x] i_nid[%d] = 0", nid, idx);
1167 if (ntype == TYPE_DIRECT_NODE)
1168 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i);
1169 else if (ntype == TYPE_INDIRECT_NODE)
1170 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1173 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1174 NIDS_PER_BLOCK * NIDS_PER_BLOCK;
1180 /* check uncovered range in the back of extent */
1181 check_extent_info(&child, 0, 1);
1183 if (child.state & FSCK_UNMATCHED_EXTENT) {
1184 ASSERT_MSG("ino: 0x%x has wrong ext: [pgofs:%u, blk:%u, len:%u]",
1185 nid, child.ei.fofs, child.ei.blk, child.ei.len);
1190 if (i_blocks != *blk_cnt) {
1191 ASSERT_MSG("ino: 0x%x has i_blocks: %08"PRIx64", "
1192 "but has %u blocks",
1193 nid, i_blocks, *blk_cnt);
1195 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
1197 FIX_MSG("[0x%x] i_blocks=0x%08"PRIx64" -> 0x%x",
1198 nid, i_blocks, *blk_cnt);
1202 if (compressed && i_compr_blocks != cbc->cnt) {
1204 node_blk->i.i_compr_blocks = cpu_to_le64(cbc->cnt);
1206 FIX_MSG("[0x%x] i_compr_blocks=0x%08"PRIx64" -> 0x%x",
1207 nid, i_compr_blocks, cbc->cnt);
1212 en = malloc(F2FS_PRINT_NAMELEN);
1215 namelen = le32_to_cpu(node_blk->i.i_namelen);
1216 if (namelen > F2FS_NAME_LEN) {
1217 if (child_d && child_d->i_namelen <= F2FS_NAME_LEN) {
1218 ASSERT_MSG("ino: 0x%x has i_namelen: 0x%x, "
1219 "but has %d characters for name",
1220 nid, namelen, child_d->i_namelen);
1222 FIX_MSG("[0x%x] i_namelen=0x%x -> 0x%x", nid, namelen,
1223 child_d->i_namelen);
1224 node_blk->i.i_namelen = cpu_to_le32(child_d->i_namelen);
1227 namelen = child_d->i_namelen;
1229 namelen = F2FS_NAME_LEN;
1231 pretty_print_filename(node_blk->i.i_name, namelen, en,
1232 file_enc_name(&node_blk->i));
1233 if (ftype == F2FS_FT_ORPHAN)
1234 DBG(1, "Orphan Inode: 0x%x [%s] i_blocks: %u\n\n",
1235 le32_to_cpu(node_blk->footer.ino),
1238 if (is_qf_ino(F2FS_RAW_SUPER(sbi), nid))
1239 DBG(1, "Quota Inode: 0x%x [%s] i_blocks: %u\n\n",
1240 le32_to_cpu(node_blk->footer.ino),
1243 if (ftype == F2FS_FT_DIR) {
1244 DBG(1, "Directory Inode: 0x%x [%s] depth: %d has %d files\n\n",
1245 le32_to_cpu(node_blk->footer.ino), en,
1246 le32_to_cpu(node_blk->i.i_current_depth),
1249 if (i_links != child.links) {
1250 ASSERT_MSG("ino: 0x%x i_links: %u, real links: %u",
1251 nid, i_links, child.links);
1253 node_blk->i.i_links = cpu_to_le32(child.links);
1255 FIX_MSG("Dir: 0x%x i_links= 0x%x -> 0x%x",
1256 nid, i_links, child.links);
1259 if (child.dots < 2 &&
1260 !(node_blk->i.i_inline & F2FS_INLINE_DOTS)) {
1261 ASSERT_MSG("ino: 0x%x dots: %u",
1264 node_blk->i.i_inline |= F2FS_INLINE_DOTS;
1266 FIX_MSG("Dir: 0x%x set inline_dots", nid);
1271 i_gc_failures = le16_to_cpu(node_blk->i.i_gc_failures);
1274 * old kernel initialized i_gc_failures as 0x01, in preen mode 2,
1275 * let's skip repairing.
1277 if (ftype == F2FS_FT_REG_FILE && i_gc_failures &&
1278 (c.preen_mode != PREEN_MODE_2 || i_gc_failures != 0x01)) {
1280 DBG(1, "Regular Inode: 0x%x [%s] depth: %d\n\n",
1281 le32_to_cpu(node_blk->footer.ino), en,
1285 node_blk->i.i_gc_failures = cpu_to_le16(0);
1287 FIX_MSG("Regular: 0x%x reset i_gc_failures from 0x%x to 0x00",
1288 nid, i_gc_failures);
1294 if (ftype == F2FS_FT_SYMLINK && i_size == 0 &&
1295 i_blocks == (i_xattr_nid ? 3 : 2)) {
1296 node_blk->i.i_size = cpu_to_le64(F2FS_BLKSIZE);
1298 FIX_MSG("Symlink: recover 0x%x with i_size=%lu",
1299 nid, (unsigned long)F2FS_BLKSIZE);
1302 if (ftype == F2FS_FT_ORPHAN && i_links) {
1303 ASSERT_MSG("ino: 0x%x is orphan inode, but has i_links: %u",
1306 node_blk->i.i_links = 0;
1308 FIX_MSG("ino: 0x%x orphan_inode, i_links= 0x%x -> 0",
1313 /* drop extent information to avoid potential wrong access */
1314 if (need_fix && f2fs_dev_is_writable())
1315 node_blk->i.i_ext.len = 0;
1317 if ((c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) &&
1318 f2fs_has_extra_isize(&node_blk->i)) {
1319 __u32 provided, calculated;
1321 provided = le32_to_cpu(node_blk->i.i_inode_checksum);
1322 calculated = f2fs_inode_chksum(node_blk);
1324 if (provided != calculated) {
1325 ASSERT_MSG("ino: 0x%x chksum:0x%x, but calculated one is: 0x%x",
1326 nid, provided, calculated);
1328 node_blk->i.i_inode_checksum =
1329 cpu_to_le32(calculated);
1331 FIX_MSG("ino: 0x%x recover, i_inode_checksum= 0x%x -> 0x%x",
1332 nid, provided, calculated);
1337 if (need_fix && f2fs_dev_is_writable()) {
1338 ret = dev_write_block(node_blk, ni->blk_addr);
1343 int fsck_chk_dnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1344 u32 nid, enum FILE_TYPE ftype, struct f2fs_node *node_blk,
1345 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
1346 struct child_info *child, struct node_info *ni)
1351 child->pp_ino = le32_to_cpu(inode->i_pino);
1352 u32 i_flags = le32_to_cpu(inode->i_flags);
1353 bool compressed = i_flags & F2FS_COMPR_FL;
1354 bool compr_rel = inode->i_inline & F2FS_COMPRESS_RELEASED;
1355 u32 cluster_size = 1 << inode->i_log_cluster_size;
1357 for (idx = 0; idx < ADDRS_PER_BLOCK(inode); idx++, child->pgofs++) {
1358 block_t blkaddr = le32_to_cpu(node_blk->dn.addr[idx]);
1360 check_extent_info(child, blkaddr, 0);
1362 if (blkaddr == NULL_ADDR)
1364 if (blkaddr == COMPRESS_ADDR) {
1365 if (!compressed || (child->pgofs &
1366 (cluster_size - 1)) != 0) {
1368 node_blk->dn.addr[idx] = NULL_ADDR;
1370 FIX_MSG("[0x%x] dn.addr[%d] = 0", nid,
1376 F2FS_FSCK(sbi)->chk.valid_blk_cnt++;
1377 *blk_cnt = *blk_cnt + 1;
1378 cbc->cheader_pgofs = child->pgofs;
1383 if (!compr_rel && blkaddr == NEW_ADDR && child->pgofs -
1384 cbc->cheader_pgofs < cluster_size)
1386 ret = fsck_chk_data_blk(sbi, IS_CASEFOLDED(inode),
1388 le64_to_cpu(inode->i_blocks) == *blk_cnt, ftype,
1389 nid, idx, ni->version,
1390 file_is_encrypt(inode));
1392 *blk_cnt = *blk_cnt + 1;
1393 if (cur_qtype != -1 && blkaddr != NEW_ADDR)
1394 qf_last_blkofs[cur_qtype] = child->pgofs;
1395 } else if (c.fix_on) {
1396 node_blk->dn.addr[idx] = NULL_ADDR;
1398 FIX_MSG("[0x%x] dn.addr[%d] = 0", nid, idx);
1401 if (need_fix && f2fs_dev_is_writable()) {
1402 ret = dev_write_block(node_blk, ni->blk_addr);
1408 int fsck_chk_idnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1409 enum FILE_TYPE ftype, struct f2fs_node *node_blk, u32 *blk_cnt,
1410 struct f2fs_compr_blk_cnt *cbc, struct child_info *child)
1412 int need_fix = 0, ret;
1415 fsck_reada_all_direct_node_blocks(sbi, node_blk);
1417 for (i = 0; i < NIDS_PER_BLOCK; i++) {
1418 if (le32_to_cpu(node_blk->in.nid[i]) == 0x0)
1420 ret = fsck_chk_node_blk(sbi, inode,
1421 le32_to_cpu(node_blk->in.nid[i]),
1422 ftype, TYPE_DIRECT_NODE, blk_cnt,
1425 *blk_cnt = *blk_cnt + 1;
1426 else if (ret == -EINVAL) {
1428 printf("should delete in.nid[i] = 0;\n");
1430 node_blk->in.nid[i] = 0;
1432 FIX_MSG("Set indirect node 0x%x -> 0", i);
1435 child->pgofs += ADDRS_PER_BLOCK(&node_blk->i);
1439 if (need_fix && f2fs_dev_is_writable()) {
1440 struct node_info ni;
1441 nid_t nid = le32_to_cpu(node_blk->footer.nid);
1443 get_node_info(sbi, nid, &ni);
1444 ret = dev_write_block(node_blk, ni.blk_addr);
1451 int fsck_chk_didnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1452 enum FILE_TYPE ftype, struct f2fs_node *node_blk, u32 *blk_cnt,
1453 struct f2fs_compr_blk_cnt *cbc, struct child_info *child)
1456 int need_fix = 0, ret = 0;
1458 fsck_reada_all_direct_node_blocks(sbi, node_blk);
1460 for (i = 0; i < NIDS_PER_BLOCK; i++) {
1461 if (le32_to_cpu(node_blk->in.nid[i]) == 0x0)
1463 ret = fsck_chk_node_blk(sbi, inode,
1464 le32_to_cpu(node_blk->in.nid[i]),
1465 ftype, TYPE_INDIRECT_NODE, blk_cnt, cbc, child);
1467 *blk_cnt = *blk_cnt + 1;
1468 else if (ret == -EINVAL) {
1470 printf("should delete in.nid[i] = 0;\n");
1472 node_blk->in.nid[i] = 0;
1474 FIX_MSG("Set double indirect node 0x%x -> 0", i);
1477 child->pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1482 if (need_fix && f2fs_dev_is_writable()) {
1483 struct node_info ni;
1484 nid_t nid = le32_to_cpu(node_blk->footer.nid);
1486 get_node_info(sbi, nid, &ni);
1487 ret = dev_write_block(node_blk, ni.blk_addr);
1494 static const char *lookup_table =
1495 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
1500 * Encodes the input string using characters from the set [A-Za-z0-9+,].
1501 * The encoded string is roughly 4/3 times the size of the input string.
1503 static int base64_encode(const u8 *src, int len, char *dst)
1505 int i, bits = 0, ac = 0;
1508 for (i = 0; i < len; i++) {
1509 ac += src[i] << bits;
1512 *cp++ = lookup_table[ac & 0x3f];
1515 } while (bits >= 6);
1518 *cp++ = lookup_table[ac & 0x3f];
1522 void pretty_print_filename(const u8 *raw_name, u32 len,
1523 char out[F2FS_PRINT_NAMELEN], int enc_name)
1525 len = min(len, (u32)F2FS_NAME_LEN);
1528 len = base64_encode(raw_name, len, out);
1530 memcpy(out, raw_name, len);
1534 static void print_dentry(struct f2fs_sb_info *sbi, __u8 *name,
1535 u8 *bitmap, struct f2fs_dir_entry *dentry,
1536 int max, int idx, int last_blk, int enc_name)
1538 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1539 u32 depth = fsck->dentry_depth;
1545 char new[F2FS_PRINT_NAMELEN];
1547 if (!c.show_dentry && !c.show_file_map)
1550 name_len = le16_to_cpu(dentry[idx].name_len);
1551 next_idx = idx + (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN;
1553 bit_offset = find_next_bit_le(bitmap, max, next_idx);
1554 if (bit_offset >= max && last_blk)
1557 if (tree_mark_size <= depth) {
1558 tree_mark_size *= 2;
1559 ASSERT(tree_mark_size != 0);
1560 tree_mark = realloc(tree_mark, tree_mark_size);
1561 ASSERT(tree_mark != NULL);
1565 tree_mark[depth] = '`';
1567 tree_mark[depth] = '|';
1569 if (tree_mark[depth - 1] == '`')
1570 tree_mark[depth - 1] = ' ';
1572 pretty_print_filename(name, name_len, new, enc_name);
1574 if (c.show_file_map) {
1575 struct f2fs_dentry *d = fsck->dentry;
1577 if (dentry[idx].file_type != F2FS_FT_REG_FILE)
1582 printf("/%s", d->name);
1586 if (dump_node(sbi, le32_to_cpu(dentry[idx].ino), 0))
1589 for (i = 1; i < depth; i++)
1590 printf("%c ", tree_mark[i]);
1592 printf("%c-- %s <ino = 0x%x>, <encrypted (%d)>\n",
1593 last_de ? '`' : '|',
1594 new, le32_to_cpu(dentry[idx].ino),
1599 static int f2fs_check_hash_code(int encoding, int casefolded,
1600 struct f2fs_dir_entry *dentry,
1601 const unsigned char *name, u32 len, int enc_name)
1603 /* Casefolded Encrypted names require a key to compute siphash */
1604 if (enc_name && casefolded)
1607 f2fs_hash_t hash_code = f2fs_dentry_hash(encoding, casefolded, name, len);
1608 /* fix hash_code made by old buggy code */
1609 if (dentry->hash_code != hash_code) {
1610 char new[F2FS_PRINT_NAMELEN];
1612 pretty_print_filename(name, len, new, enc_name);
1613 FIX_MSG("Mismatch hash_code for \"%s\" [%x:%x]",
1614 new, le32_to_cpu(dentry->hash_code),
1616 dentry->hash_code = cpu_to_le32(hash_code);
1623 static int __get_current_level(int dir_level, u32 pgofs)
1625 unsigned int bidx = 0;
1628 for (i = 0; i < MAX_DIR_HASH_DEPTH; i++) {
1629 bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
1636 static int f2fs_check_dirent_position(const struct f2fs_dir_entry *dentry,
1637 const char *printable_name,
1638 u32 pgofs, u8 dir_level, u32 pino)
1640 unsigned int nbucket, nblock;
1641 unsigned int bidx, end_block;
1644 level = __get_current_level(dir_level, pgofs);
1646 nbucket = dir_buckets(level, dir_level);
1647 nblock = bucket_blocks(level);
1649 bidx = dir_block_index(level, dir_level,
1650 le32_to_cpu(dentry->hash_code) % nbucket);
1651 end_block = bidx + nblock;
1653 if (pgofs >= bidx && pgofs < end_block)
1656 ASSERT_MSG("Wrong position of dirent pino:%u, name:%s, level:%d, "
1657 "dir_level:%d, pgofs:%u, correct range:[%u, %u]\n",
1658 pino, printable_name, level, dir_level, pgofs, bidx,
1663 static int __chk_dots_dentries(struct f2fs_sb_info *sbi,
1665 struct f2fs_dir_entry *dentry,
1666 struct child_info *child,
1668 __u8 (*filename)[F2FS_SLOT_LEN],
1673 if ((name[0] == '.' && len == 1)) {
1674 if (le32_to_cpu(dentry->ino) != child->p_ino) {
1675 ASSERT_MSG("Bad inode number[0x%x] for '.', parent_ino is [0x%x]\n",
1676 le32_to_cpu(dentry->ino), child->p_ino);
1677 dentry->ino = cpu_to_le32(child->p_ino);
1682 if (name[0] == '.' && name[1] == '.' && len == 2) {
1683 if (child->p_ino == F2FS_ROOT_INO(sbi)) {
1684 if (le32_to_cpu(dentry->ino) != F2FS_ROOT_INO(sbi)) {
1685 ASSERT_MSG("Bad inode number[0x%x] for '..'\n",
1686 le32_to_cpu(dentry->ino));
1687 dentry->ino = cpu_to_le32(F2FS_ROOT_INO(sbi));
1690 } else if (le32_to_cpu(dentry->ino) != child->pp_ino) {
1691 ASSERT_MSG("Bad inode number[0x%x] for '..', parent parent ino is [0x%x]\n",
1692 le32_to_cpu(dentry->ino), child->pp_ino);
1693 dentry->ino = cpu_to_le32(child->pp_ino);
1698 if (f2fs_check_hash_code(get_encoding(sbi), casefolded, dentry, name, len, enc_name))
1701 if (name[len] != '\0') {
1702 ASSERT_MSG("'.' is not NULL terminated\n");
1704 memcpy(*filename, name, len);
1710 static void nullify_dentry(struct f2fs_dir_entry *dentry, int offs,
1711 __u8 (*filename)[F2FS_SLOT_LEN], u8 **bitmap)
1713 memset(dentry, 0, sizeof(struct f2fs_dir_entry));
1714 test_and_clear_bit_le(offs, *bitmap);
1715 memset(*filename, 0, F2FS_SLOT_LEN);
1718 static int __chk_dentries(struct f2fs_sb_info *sbi, int casefolded,
1719 struct child_info *child,
1720 u8 *bitmap, struct f2fs_dir_entry *dentry,
1721 __u8 (*filenames)[F2FS_SLOT_LEN],
1722 int max, int last_blk, int enc_name)
1724 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1725 enum FILE_TYPE ftype;
1728 struct f2fs_compr_blk_cnt cbc;
1730 char en[F2FS_PRINT_NAMELEN];
1736 /* readahead inode blocks */
1737 for (i = 0; i < max; i++) {
1740 if (test_bit_le(i, bitmap) == 0)
1743 ino = le32_to_cpu(dentry[i].ino);
1745 if (IS_VALID_NID(sbi, ino)) {
1746 struct node_info ni;
1748 get_node_info(sbi, ino, &ni);
1749 if (f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1751 dev_reada_block(ni.blk_addr);
1752 name_len = le16_to_cpu(dentry[i].name_len);
1754 i += (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN - 1;
1759 for (i = 0; i < max;) {
1760 if (test_bit_le(i, bitmap) == 0) {
1764 if (!IS_VALID_NID(sbi, le32_to_cpu(dentry[i].ino))) {
1765 ASSERT_MSG("Bad dentry 0x%x with invalid NID/ino 0x%x",
1766 i, le32_to_cpu(dentry[i].ino));
1768 FIX_MSG("Clear bad dentry 0x%x with bad ino 0x%x",
1769 i, le32_to_cpu(dentry[i].ino));
1770 test_and_clear_bit_le(i, bitmap);
1777 ftype = dentry[i].file_type;
1778 if ((ftype <= F2FS_FT_UNKNOWN || ftype > F2FS_FT_LAST_FILE_TYPE)) {
1779 ASSERT_MSG("Bad dentry 0x%x with unexpected ftype 0x%x",
1780 le32_to_cpu(dentry[i].ino), ftype);
1782 FIX_MSG("Clear bad dentry 0x%x with bad ftype 0x%x",
1784 test_and_clear_bit_le(i, bitmap);
1791 name_len = le16_to_cpu(dentry[i].name_len);
1793 if (name_len == 0 || name_len > F2FS_NAME_LEN) {
1794 ASSERT_MSG("Bad dentry 0x%x with invalid name_len", i);
1796 FIX_MSG("Clear bad dentry 0x%x", i);
1797 test_and_clear_bit_le(i, bitmap);
1803 name = calloc(name_len + 1, 1);
1806 memcpy(name, filenames[i], name_len);
1807 slots = (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN;
1809 /* Becareful. 'dentry.file_type' is not imode. */
1810 if (ftype == F2FS_FT_DIR) {
1811 if ((name[0] == '.' && name_len == 1) ||
1812 (name[0] == '.' && name[1] == '.' &&
1814 ret = __chk_dots_dentries(sbi, casefolded, &dentry[i],
1815 child, name, name_len, &filenames[i],
1826 if (child->dots > 2) {
1827 ASSERT_MSG("More than one '.' or '..', should delete the extra one\n");
1828 nullify_dentry(&dentry[i], i,
1829 &filenames[i], &bitmap);
1840 if (f2fs_check_hash_code(get_encoding(sbi), casefolded, dentry + i, name, name_len, enc_name))
1843 pretty_print_filename(name, name_len, en, enc_name);
1845 if (max == NR_DENTRY_IN_BLOCK) {
1846 ret = f2fs_check_dirent_position(dentry + i, en,
1847 child->pgofs, child->dir_level,
1851 FIX_MSG("Clear bad dentry 0x%x", i);
1852 test_and_clear_bit_le(i, bitmap);
1861 DBG(1, "[%3u]-[0x%x] name[%s] len[0x%x] ino[0x%x] type[0x%x]\n",
1862 fsck->dentry_depth, i, en, name_len,
1863 le32_to_cpu(dentry[i].ino),
1864 dentry[i].file_type);
1866 print_dentry(sbi, name, bitmap,
1867 dentry, max, i, last_blk, enc_name);
1871 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
1872 child->i_namelen = name_len;
1873 ret = fsck_chk_node_blk(sbi,
1874 NULL, le32_to_cpu(dentry[i].ino),
1875 ftype, TYPE_INODE, &blk_cnt, &cbc, child);
1877 if (ret && c.fix_on) {
1880 for (j = 0; j < slots; j++)
1881 test_and_clear_bit_le(i + j, bitmap);
1882 FIX_MSG("Unlink [0x%x] - %s len[0x%x], type[0x%x]",
1883 le32_to_cpu(dentry[i].ino),
1885 dentry[i].file_type);
1887 } else if (ret == 0) {
1888 if (ftype == F2FS_FT_DIR)
1897 return fixed ? -1 : dentries;
1900 int fsck_chk_inline_dentries(struct f2fs_sb_info *sbi,
1901 struct f2fs_node *node_blk, struct child_info *child)
1903 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1904 struct f2fs_dentry *cur_dentry = fsck->dentry_end;
1905 struct f2fs_dentry *new_dentry;
1906 struct f2fs_dentry_ptr d;
1907 void *inline_dentry;
1910 inline_dentry = inline_data_addr(node_blk);
1911 ASSERT(inline_dentry != NULL);
1913 make_dentry_ptr(&d, node_blk, inline_dentry, 2);
1915 fsck->dentry_depth++;
1916 new_dentry = calloc(sizeof(struct f2fs_dentry), 1);
1917 ASSERT(new_dentry != NULL);
1919 new_dentry->depth = fsck->dentry_depth;
1920 memcpy(new_dentry->name, child->p_name, F2FS_NAME_LEN);
1921 cur_dentry->next = new_dentry;
1922 fsck->dentry_end = new_dentry;
1924 dentries = __chk_dentries(sbi, IS_CASEFOLDED(&node_blk->i), child,
1925 d.bitmap, d.dentry, d.filename, d.max, 1,
1926 file_is_encrypt(&node_blk->i));// pass through
1928 DBG(1, "[%3d] Inline Dentry Block Fixed hash_codes\n\n",
1929 fsck->dentry_depth);
1931 DBG(1, "[%3d] Inline Dentry Block Done : "
1932 "dentries:%d in %d slots (len:%d)\n\n",
1933 fsck->dentry_depth, dentries,
1934 d.max, F2FS_NAME_LEN);
1936 fsck->dentry = cur_dentry;
1937 fsck->dentry_end = cur_dentry;
1938 cur_dentry->next = NULL;
1940 fsck->dentry_depth--;
1944 int fsck_chk_dentry_blk(struct f2fs_sb_info *sbi, int casefolded, u32 blk_addr,
1945 struct child_info *child, int last_blk, int enc_name)
1947 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1948 struct f2fs_dentry_block *de_blk;
1949 struct f2fs_dentry *cur_dentry = fsck->dentry_end;
1950 struct f2fs_dentry *new_dentry;
1953 de_blk = (struct f2fs_dentry_block *)calloc(BLOCK_SZ, 1);
1954 ASSERT(de_blk != NULL);
1956 ret = dev_read_block(de_blk, blk_addr);
1959 fsck->dentry_depth++;
1960 new_dentry = calloc(sizeof(struct f2fs_dentry), 1);
1961 ASSERT(new_dentry != NULL);
1962 new_dentry->depth = fsck->dentry_depth;
1963 memcpy(new_dentry->name, child->p_name, F2FS_NAME_LEN);
1964 cur_dentry->next = new_dentry;
1965 fsck->dentry_end = new_dentry;
1967 dentries = __chk_dentries(sbi, casefolded, child,
1968 de_blk->dentry_bitmap,
1969 de_blk->dentry, de_blk->filename,
1970 NR_DENTRY_IN_BLOCK, last_blk, enc_name);
1972 if (dentries < 0 && f2fs_dev_is_writable()) {
1973 ret = dev_write_block(de_blk, blk_addr);
1975 DBG(1, "[%3d] Dentry Block [0x%x] Fixed hash_codes\n\n",
1976 fsck->dentry_depth, blk_addr);
1978 DBG(1, "[%3d] Dentry Block [0x%x] Done : "
1979 "dentries:%d in %d slots (len:%d)\n\n",
1980 fsck->dentry_depth, blk_addr, dentries,
1981 NR_DENTRY_IN_BLOCK, F2FS_NAME_LEN);
1983 fsck->dentry = cur_dentry;
1984 fsck->dentry_end = cur_dentry;
1985 cur_dentry->next = NULL;
1987 fsck->dentry_depth--;
1992 int fsck_chk_data_blk(struct f2fs_sb_info *sbi, int casefolded,
1993 u32 blk_addr, struct child_info *child, int last_blk,
1994 enum FILE_TYPE ftype, u32 parent_nid, u16 idx_in_node, u8 ver,
1997 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1999 /* Is it reserved block? */
2000 if (blk_addr == NEW_ADDR) {
2001 fsck->chk.valid_blk_cnt++;
2005 if (!f2fs_is_valid_blkaddr(sbi, blk_addr, DATA_GENERIC)) {
2006 ASSERT_MSG("blkaddress is not valid. [0x%x]", blk_addr);
2010 if (is_valid_ssa_data_blk(sbi, blk_addr, parent_nid,
2011 idx_in_node, ver)) {
2012 ASSERT_MSG("summary data block is not valid. [0x%x]",
2017 if (f2fs_test_sit_bitmap(sbi, blk_addr) == 0)
2018 ASSERT_MSG("SIT bitmap is 0x0. blk_addr[0x%x]", blk_addr);
2020 if (f2fs_test_main_bitmap(sbi, blk_addr) != 0)
2021 ASSERT_MSG("Duplicated data [0x%x]. pnid[0x%x] idx[0x%x]",
2022 blk_addr, parent_nid, idx_in_node);
2024 fsck->chk.valid_blk_cnt++;
2026 if (ftype == F2FS_FT_DIR) {
2027 f2fs_set_main_bitmap(sbi, blk_addr, CURSEG_HOT_DATA);
2028 return fsck_chk_dentry_blk(sbi, casefolded, blk_addr, child,
2029 last_blk, enc_name);
2031 f2fs_set_main_bitmap(sbi, blk_addr, CURSEG_WARM_DATA);
2036 int fsck_chk_orphan_node(struct f2fs_sb_info *sbi)
2039 struct f2fs_compr_blk_cnt cbc = {0, CHEADER_PGOFS_NONE};
2040 block_t start_blk, orphan_blkaddr, i, j;
2041 struct f2fs_orphan_block *orphan_blk, *new_blk;
2042 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2045 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
2048 start_blk = __start_cp_addr(sbi) + 1 + get_sb(cp_payload);
2049 orphan_blkaddr = __start_sum_addr(sbi) - 1 - get_sb(cp_payload);
2051 f2fs_ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
2053 orphan_blk = calloc(BLOCK_SZ, 1);
2056 new_blk = calloc(BLOCK_SZ, 1);
2059 for (i = 0; i < orphan_blkaddr; i++) {
2060 int ret = dev_read_block(orphan_blk, start_blk + i);
2061 u32 new_entry_count = 0;
2064 entry_count = le32_to_cpu(orphan_blk->entry_count);
2066 for (j = 0; j < entry_count; j++) {
2067 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
2068 DBG(1, "[%3d] ino [0x%x]\n", i, ino);
2069 struct node_info ni;
2072 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
2074 if (c.preen_mode == PREEN_MODE_1 && !c.fix_on) {
2075 get_node_info(sbi, ino, &ni);
2076 if (!IS_VALID_NID(sbi, ino) ||
2077 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
2087 ret = fsck_chk_node_blk(sbi, NULL, ino,
2088 F2FS_FT_ORPHAN, TYPE_INODE, &blk_cnt,
2091 new_blk->ino[new_entry_count++] =
2093 else if (ret && c.fix_on)
2094 FIX_MSG("[0x%x] remove from orphan list", ino);
2096 ASSERT_MSG("[0x%x] wrong orphan inode", ino);
2098 if (f2fs_dev_is_writable() && c.fix_on &&
2099 entry_count != new_entry_count) {
2100 new_blk->entry_count = cpu_to_le32(new_entry_count);
2101 ret = dev_write_block(new_blk, start_blk + i);
2104 memset(orphan_blk, 0, BLOCK_SZ);
2105 memset(new_blk, 0, BLOCK_SZ);
2113 int fsck_chk_quota_node(struct f2fs_sb_info *sbi)
2115 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2116 enum quota_type qtype;
2119 struct f2fs_compr_blk_cnt cbc = {0, CHEADER_PGOFS_NONE};
2121 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
2123 if (sb->qf_ino[qtype] == 0)
2125 nid_t ino = QUOTA_INO(sb, qtype);
2126 struct node_info ni;
2128 DBG(1, "qtype [%d] ino [0x%x]\n", qtype, ino);
2131 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
2133 if (c.preen_mode == PREEN_MODE_1 && !c.fix_on) {
2134 get_node_info(sbi, ino, &ni);
2135 if (!IS_VALID_NID(sbi, ino) ||
2136 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
2141 ret = fsck_chk_node_blk(sbi, NULL, ino,
2142 F2FS_FT_REG_FILE, TYPE_INODE, &blk_cnt,
2145 ASSERT_MSG("wrong quota inode, qtype [%d] ino [0x%x]",
2147 qf_szchk_type[qtype] = QF_SZCHK_ERR;
2149 f2fs_rebuild_qf_inode(sbi, qtype);
2156 int fsck_chk_quota_files(struct f2fs_sb_info *sbi)
2158 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2159 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2160 enum quota_type qtype;
2165 /* Return if quota feature is disabled */
2169 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
2170 ino = sb->qf_ino[qtype];
2174 DBG(1, "Checking Quota file ([%3d] ino [0x%x])\n", qtype, ino);
2176 ret = quota_compare_and_update(sbi, qtype, &needs_writeout,
2178 if (ret == 0 && needs_writeout == 0) {
2183 /* Something is wrong */
2185 DBG(0, "Fixing Quota file ([%3d] ino [0x%x])\n",
2187 f2fs_filesize_update(sbi, ino, 0);
2188 ret = quota_write_inode(sbi, qtype);
2190 c.quota_fixed = true;
2193 ASSERT_MSG("Unable to write quota file");
2196 ASSERT_MSG("Quota file is missing or invalid"
2197 " quota file content found.");
2203 int fsck_chk_meta(struct f2fs_sb_info *sbi)
2205 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2206 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2207 struct seg_entry *se;
2208 unsigned int sit_valid_segs = 0, sit_node_blks = 0;
2211 /* 1. check sit usage with CP: curseg is lost? */
2212 for (i = 0; i < MAIN_SEGS(sbi); i++) {
2213 se = get_seg_entry(sbi, i);
2214 if (se->valid_blocks != 0)
2216 else if (IS_CUR_SEGNO(sbi, i)) {
2217 /* curseg has not been written back to device */
2218 MSG(1, "\tInfo: curseg %u is counted in valid segs\n", i);
2221 if (IS_NODESEG(se->type))
2222 sit_node_blks += se->valid_blocks;
2224 if (fsck->chk.sit_free_segs + sit_valid_segs !=
2225 get_usable_seg_count(sbi)) {
2226 ASSERT_MSG("SIT usage does not match: sit_free_segs %u, "
2227 "sit_valid_segs %u, total_segs %u",
2228 fsck->chk.sit_free_segs, sit_valid_segs,
2229 get_usable_seg_count(sbi));
2233 /* 2. check node count */
2234 if (fsck->chk.valid_nat_entry_cnt != sit_node_blks) {
2235 ASSERT_MSG("node count does not match: valid_nat_entry_cnt %u,"
2236 " sit_node_blks %u",
2237 fsck->chk.valid_nat_entry_cnt, sit_node_blks);
2241 /* 3. check SIT with CP */
2242 if (fsck->chk.sit_free_segs != le32_to_cpu(cp->free_segment_count)) {
2243 ASSERT_MSG("free segs does not match: sit_free_segs %u, "
2244 "free_segment_count %u",
2245 fsck->chk.sit_free_segs,
2246 le32_to_cpu(cp->free_segment_count));
2250 /* 4. check NAT with CP */
2251 if (fsck->chk.valid_nat_entry_cnt !=
2252 le32_to_cpu(cp->valid_node_count)) {
2253 ASSERT_MSG("valid node does not match: valid_nat_entry_cnt %u,"
2254 " valid_node_count %u",
2255 fsck->chk.valid_nat_entry_cnt,
2256 le32_to_cpu(cp->valid_node_count));
2260 /* 4. check orphan inode simply */
2261 if (fsck_chk_orphan_node(sbi))
2264 /* 5. check nat entry -- must be done before quota check */
2265 for (i = 0; i < fsck->nr_nat_entries; i++) {
2266 u32 blk = le32_to_cpu(fsck->entries[i].block_addr);
2267 nid_t ino = le32_to_cpu(fsck->entries[i].ino);
2271 * skip entry whose ino is 0, otherwise, we will
2272 * get a negative number by BLKOFF_FROM_MAIN(sbi, blk)
2276 if (!f2fs_is_valid_blkaddr(sbi, blk, DATA_GENERIC)) {
2277 MSG(0, "\tError: nat entry[ino %u block_addr 0x%x]"
2283 if (!f2fs_test_sit_bitmap(sbi, blk)) {
2284 MSG(0, "\tError: nat entry[ino %u block_addr 0x%x]"
2285 " not find it in sit_area_bitmap\n",
2290 if (!IS_VALID_NID(sbi, ino)) {
2291 MSG(0, "\tError: nat_entry->ino %u exceeds the range"
2292 " of nat entries %u\n",
2293 ino, fsck->nr_nat_entries);
2297 if (!f2fs_test_bit(ino, fsck->nat_area_bitmap)) {
2298 MSG(0, "\tError: nat_entry->ino %u is not set in"
2299 " nat_area_bitmap\n", ino);
2304 /* 6. check quota inode simply */
2305 if (fsck_chk_quota_node(sbi))
2308 if (fsck->nat_valid_inode_cnt != le32_to_cpu(cp->valid_inode_count)) {
2309 ASSERT_MSG("valid inode does not match: nat_valid_inode_cnt %u,"
2310 " valid_inode_count %u",
2311 fsck->nat_valid_inode_cnt,
2312 le32_to_cpu(cp->valid_inode_count));
2319 void fsck_chk_checkpoint(struct f2fs_sb_info *sbi)
2321 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2323 if (get_cp(ckpt_flags) & CP_LARGE_NAT_BITMAP_FLAG) {
2324 if (get_cp(checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
2325 ASSERT_MSG("Deprecated layout of large_nat_bitmap, "
2326 "chksum_offset:%u", get_cp(checksum_offset));
2332 void fsck_init(struct f2fs_sb_info *sbi)
2334 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2335 struct f2fs_sm_info *sm_i = SM_I(sbi);
2338 * We build three bitmap for main/sit/nat so that may check consistency
2340 * 1. main_area_bitmap will be used to check whether all blocks of main
2341 * area is used or not.
2342 * 2. nat_area_bitmap has bitmap information of used nid in NAT.
2343 * 3. sit_area_bitmap has bitmap information of used main block.
2344 * At Last sequence, we compare main_area_bitmap with sit_area_bitmap.
2346 fsck->nr_main_blks = sm_i->main_segments << sbi->log_blocks_per_seg;
2347 fsck->main_area_bitmap_sz = (fsck->nr_main_blks + 7) / 8;
2348 fsck->main_area_bitmap = calloc(fsck->main_area_bitmap_sz, 1);
2349 ASSERT(fsck->main_area_bitmap != NULL);
2351 build_nat_area_bitmap(sbi);
2353 build_sit_area_bitmap(sbi);
2355 ASSERT(tree_mark_size != 0);
2356 tree_mark = calloc(tree_mark_size, 1);
2357 ASSERT(tree_mark != NULL);
2358 fsck->dentry = calloc(sizeof(struct f2fs_dentry), 1);
2359 ASSERT(fsck->dentry != NULL);
2360 memcpy(fsck->dentry->name, "/", 1);
2361 fsck->dentry_end = fsck->dentry;
2363 c.quota_fixed = false;
2366 static void fix_hard_links(struct f2fs_sb_info *sbi)
2368 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2369 struct hard_link_node *tmp, *node;
2370 struct f2fs_node *node_blk = NULL;
2371 struct node_info ni;
2374 if (fsck->hard_link_list_head == NULL)
2377 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
2378 ASSERT(node_blk != NULL);
2380 node = fsck->hard_link_list_head;
2383 if (sanity_check_nid(sbi, node->nid, node_blk,
2384 F2FS_FT_MAX, TYPE_INODE, &ni))
2385 FIX_MSG("Failed to fix, rerun fsck.f2fs");
2387 node_blk->i.i_links = cpu_to_le32(node->actual_links);
2389 FIX_MSG("File: 0x%x i_links= 0x%x -> 0x%x",
2390 node->nid, node->links, node->actual_links);
2392 ret = dev_write_block(node_blk, ni.blk_addr);
2401 static void fix_nat_entries(struct f2fs_sb_info *sbi)
2403 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2406 for (i = 0; i < fsck->nr_nat_entries; i++)
2407 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0)
2408 nullify_nat_entry(sbi, i);
2411 static void flush_curseg_sit_entries(struct f2fs_sb_info *sbi)
2413 struct sit_info *sit_i = SIT_I(sbi);
2414 struct f2fs_sit_block *sit_blk;
2417 sit_blk = calloc(BLOCK_SZ, 1);
2419 /* update curseg sit entries, since we may change
2420 * a segment type in move_curseg_info
2422 for (i = 0; i < NO_CHECK_TYPE; i++) {
2423 struct curseg_info *curseg = CURSEG_I(sbi, i);
2424 struct f2fs_sit_entry *sit;
2425 struct seg_entry *se;
2427 se = get_seg_entry(sbi, curseg->segno);
2428 get_current_sit_page(sbi, curseg->segno, sit_blk);
2429 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, curseg->segno)];
2430 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2432 rewrite_current_sit_page(sbi, curseg->segno, sit_blk);
2438 static void fix_checksum(struct f2fs_sb_info *sbi)
2440 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2441 struct f2fs_nm_info *nm_i = NM_I(sbi);
2442 struct sit_info *sit_i = SIT_I(sbi);
2443 void *bitmap_offset;
2448 bitmap_offset = cp->sit_nat_version_bitmap + sizeof(__le32);
2450 memcpy(bitmap_offset, nm_i->nat_bitmap, nm_i->bitmap_size);
2451 memcpy(bitmap_offset + nm_i->bitmap_size,
2452 sit_i->sit_bitmap, sit_i->bitmap_size);
2455 static void fix_checkpoint(struct f2fs_sb_info *sbi)
2457 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2458 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2459 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2460 unsigned long long cp_blk_no;
2461 u32 flags = c.alloc_failed ? CP_FSCK_FLAG: CP_UMOUNT_FLAG;
2462 block_t orphan_blks = 0;
2468 /* should call from fsck */
2469 ASSERT(c.func == FSCK);
2471 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
2472 orphan_blks = __start_sum_addr(sbi) - 1;
2473 flags |= CP_ORPHAN_PRESENT_FLAG;
2475 if (is_set_ckpt_flags(cp, CP_TRIMMED_FLAG))
2476 flags |= CP_TRIMMED_FLAG;
2477 if (is_set_ckpt_flags(cp, CP_DISABLED_FLAG))
2478 flags |= CP_DISABLED_FLAG;
2479 if (is_set_ckpt_flags(cp, CP_LARGE_NAT_BITMAP_FLAG)) {
2480 flags |= CP_LARGE_NAT_BITMAP_FLAG;
2481 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
2483 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
2486 if (flags & CP_UMOUNT_FLAG)
2491 set_cp(cp_pack_total_block_count, cp_blocks +
2492 orphan_blks + get_sb(cp_payload));
2494 flags = update_nat_bits_flags(sb, cp, flags);
2495 flags |= CP_NOCRC_RECOVERY_FLAG;
2496 set_cp(ckpt_flags, flags);
2498 set_cp(free_segment_count, get_free_segments(sbi));
2499 set_cp(valid_block_count, fsck->chk.valid_blk_cnt);
2500 set_cp(valid_node_count, fsck->chk.valid_node_cnt);
2501 set_cp(valid_inode_count, fsck->chk.valid_inode_cnt);
2503 crc = f2fs_checkpoint_chksum(cp);
2504 *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
2507 cp_blk_no = get_sb(cp_blkaddr);
2508 if (sbi->cur_cp == 2)
2509 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
2511 ret = dev_write_block(cp, cp_blk_no++);
2514 for (i = 0; i < get_sb(cp_payload); i++) {
2515 ret = dev_write_block(((unsigned char *)cp) +
2516 (i + 1) * F2FS_BLKSIZE, cp_blk_no++);
2520 cp_blk_no += orphan_blks;
2522 for (i = 0; i < NO_CHECK_TYPE; i++) {
2523 struct curseg_info *curseg = CURSEG_I(sbi, i);
2525 if (!(flags & CP_UMOUNT_FLAG) && IS_NODESEG(i))
2528 ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
2532 /* Write nat bits */
2533 if (flags & CP_NAT_BITS_FLAG)
2534 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
2536 ret = f2fs_fsync_device();
2539 ret = dev_write_block(cp, cp_blk_no++);
2542 ret = f2fs_fsync_device();
2545 MSG(0, "Info: fix_checkpoint() cur_cp:%d\n", sbi->cur_cp);
2548 static void fix_checkpoints(struct f2fs_sb_info *sbi)
2550 /* copy valid checkpoint to its mirror position */
2551 duplicate_checkpoint(sbi);
2553 /* repair checkpoint at CP #0 position */
2555 fix_checkpoint(sbi);
2558 #ifdef HAVE_LINUX_BLKZONED_H
2561 * Refer valid block map and return offset of the last valid block in the zone.
2562 * Obtain valid block map from SIT and fsync data.
2563 * If there is no valid block in the zone, return -1.
2565 static int last_vblk_off_in_zone(struct f2fs_sb_info *sbi,
2566 unsigned int zone_segno)
2569 unsigned int segs_per_zone = sbi->segs_per_sec * sbi->secs_per_zone;
2570 struct seg_entry *se;
2572 for (s = segs_per_zone - 1; s >= 0; s--) {
2573 se = get_seg_entry(sbi, zone_segno + s);
2576 * Refer not cur_valid_map but ckpt_valid_map which reflects
2579 ASSERT(se->ckpt_valid_map);
2580 for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
2581 if (f2fs_test_bit(b, (const char*)se->ckpt_valid_map))
2582 return b + (s << sbi->log_blocks_per_seg);
2588 static int check_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
2590 struct curseg_info *curseg = CURSEG_I(sbi, type);
2591 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2592 struct blk_zone blkz;
2593 block_t cs_block, wp_block, zone_last_vblock;
2594 uint64_t cs_sector, wp_sector;
2596 unsigned int zone_segno;
2597 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
2599 /* get the device the curseg points to */
2600 cs_block = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
2601 for (i = 0; i < MAX_DEVICES; i++) {
2602 if (!c.devices[i].path)
2604 if (c.devices[i].start_blkaddr <= cs_block &&
2605 cs_block <= c.devices[i].end_blkaddr)
2609 if (i >= MAX_DEVICES)
2612 if (c.devices[i].zoned_model != F2FS_ZONED_HM)
2615 /* get write pointer position of the zone the curseg points to */
2616 cs_sector = (cs_block - c.devices[i].start_blkaddr)
2617 << log_sectors_per_block;
2618 ret = f2fs_report_zone(i, cs_sector, &blkz);
2622 if (blk_zone_type(&blkz) != BLK_ZONE_TYPE_SEQWRITE_REQ)
2625 /* check consistency between the curseg and the write pointer */
2626 wp_block = c.devices[i].start_blkaddr +
2627 (blk_zone_wp_sector(&blkz) >> log_sectors_per_block);
2628 wp_sector = blk_zone_wp_sector(&blkz);
2630 if (cs_sector == wp_sector)
2633 if (cs_sector > wp_sector) {
2634 MSG(0, "Inconsistent write pointer with curseg %d: "
2635 "curseg %d[0x%x,0x%x] > wp[0x%x,0x%x]\n",
2636 type, type, curseg->segno, curseg->next_blkoff,
2637 GET_SEGNO(sbi, wp_block), OFFSET_IN_SEG(sbi, wp_block));
2638 fsck->chk.wp_inconsistent_zones++;
2642 MSG(0, "Write pointer goes advance from curseg %d: "
2643 "curseg %d[0x%x,0x%x] wp[0x%x,0x%x]\n",
2644 type, type, curseg->segno, curseg->next_blkoff,
2645 GET_SEGNO(sbi, wp_block), OFFSET_IN_SEG(sbi, wp_block));
2647 zone_segno = GET_SEG_FROM_SEC(sbi,
2648 GET_SEC_FROM_SEG(sbi, curseg->segno));
2649 zone_last_vblock = START_BLOCK(sbi, zone_segno) +
2650 last_vblk_off_in_zone(sbi, zone_segno);
2653 * If valid blocks exist between the curseg position and the write
2654 * pointer, they are fsync data. This is not an error to fix. Leave it
2655 * for kernel to recover later.
2656 * If valid blocks exist between the curseg's zone start and the curseg
2657 * position, or if there is no valid block in the curseg's zone, fix
2658 * the inconsistency between the curseg and the writ pointer.
2659 * Of Note is that if there is no valid block in the curseg's zone,
2660 * last_vblk_off_in_zone() returns -1 and zone_last_vblock is always
2661 * smaller than cs_block.
2663 if (cs_block <= zone_last_vblock && zone_last_vblock < wp_block) {
2664 MSG(0, "Curseg has fsync data: curseg %d[0x%x,0x%x] "
2665 "last valid block in zone[0x%x,0x%x]\n",
2666 type, curseg->segno, curseg->next_blkoff,
2667 GET_SEGNO(sbi, zone_last_vblock),
2668 OFFSET_IN_SEG(sbi, zone_last_vblock));
2672 fsck->chk.wp_inconsistent_zones++;
2678 static int check_curseg_write_pointer(struct f2fs_sb_info *UNUSED(sbi),
2686 int check_curseg_offset(struct f2fs_sb_info *sbi, int type)
2688 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2689 struct curseg_info *curseg = CURSEG_I(sbi, type);
2690 struct seg_entry *se;
2693 if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO) &&
2694 type != CURSEG_HOT_DATA && type != CURSEG_HOT_NODE)
2697 if ((curseg->next_blkoff >> 3) >= SIT_VBLOCK_MAP_SIZE) {
2698 ASSERT_MSG("Next block offset:%u is invalid, type:%d",
2699 curseg->next_blkoff, type);
2702 se = get_seg_entry(sbi, curseg->segno);
2703 if (f2fs_test_bit(curseg->next_blkoff,
2704 (const char *)se->cur_valid_map)) {
2705 ASSERT_MSG("Next block offset is not free, type:%d", type);
2708 if (curseg->alloc_type == SSR)
2711 nblocks = sbi->blocks_per_seg;
2712 for (j = curseg->next_blkoff + 1; j < nblocks; j++) {
2713 if (f2fs_test_bit(j, (const char *)se->cur_valid_map)) {
2714 ASSERT_MSG("For LFS curseg, space after .next_blkoff "
2715 "should be unused, type:%d", type);
2720 if (c.zoned_model == F2FS_ZONED_HM)
2721 return check_curseg_write_pointer(sbi, type);
2726 int check_curseg_offsets(struct f2fs_sb_info *sbi)
2730 for (i = 0; i < NO_CHECK_TYPE; i++) {
2731 ret = check_curseg_offset(sbi, i);
2738 static void fix_curseg_info(struct f2fs_sb_info *sbi)
2740 int i, need_update = 0;
2742 for (i = 0; i < NO_CHECK_TYPE; i++) {
2743 if (check_curseg_offset(sbi, i)) {
2744 update_curseg_info(sbi, i);
2750 write_curseg_info(sbi);
2751 flush_curseg_sit_entries(sbi);
2755 int check_sit_types(struct f2fs_sb_info *sbi)
2760 for (i = 0; i < MAIN_SEGS(sbi); i++) {
2761 struct seg_entry *se;
2763 se = get_seg_entry(sbi, i);
2764 if (se->orig_type != se->type) {
2765 if (se->orig_type == CURSEG_COLD_DATA &&
2766 se->type <= CURSEG_COLD_DATA) {
2767 se->type = se->orig_type;
2769 FIX_MSG("Wrong segment type [0x%x] %x -> %x",
2770 i, se->orig_type, se->type);
2778 static struct f2fs_node *fsck_get_lpf(struct f2fs_sb_info *sbi)
2780 struct f2fs_node *node;
2781 struct node_info ni;
2785 /* read root inode first */
2786 node = calloc(F2FS_BLKSIZE, 1);
2788 get_node_info(sbi, F2FS_ROOT_INO(sbi), &ni);
2789 err = dev_read_block(node, ni.blk_addr);
2792 /* lookup lost+found in root directory */
2793 lpf_ino = f2fs_lookup(sbi, node, (u8 *)LPF, strlen(LPF));
2794 if (lpf_ino) { /* found */
2795 get_node_info(sbi, lpf_ino, &ni);
2796 err = dev_read_block(node, ni.blk_addr);
2798 DBG(1, "Found lost+found 0x%x at blkaddr [0x%x]\n",
2799 lpf_ino, ni.blk_addr);
2800 if (!S_ISDIR(le16_to_cpu(node->i.i_mode))) {
2801 ASSERT_MSG("lost+found is not directory [0%o]\n",
2802 le16_to_cpu(node->i.i_mode));
2803 /* FIXME: give up? */
2806 } else { /* not found, create it */
2809 memset(&de, 0, sizeof(de));
2810 de.name = (u8 *) LPF;
2811 de.len = strlen(LPF);
2813 de.pino = F2FS_ROOT_INO(sbi),
2814 de.file_type = F2FS_FT_DIR,
2817 de.mtime = time(NULL);
2819 err = f2fs_mkdir(sbi, &de);
2821 ASSERT_MSG("Failed create lost+found");
2825 get_node_info(sbi, de.ino, &ni);
2826 err = dev_read_block(node, ni.blk_addr);
2828 DBG(1, "Create lost+found 0x%x at blkaddr [0x%x]\n",
2829 de.ino, ni.blk_addr);
2832 c.lpf_ino = le32_to_cpu(node->footer.ino);
2839 static int fsck_do_reconnect_file(struct f2fs_sb_info *sbi,
2840 struct f2fs_node *lpf,
2841 struct f2fs_node *fnode)
2845 nid_t ino = le32_to_cpu(fnode->footer.ino);
2846 struct node_info ni;
2849 namelen = snprintf(name, 80, "%u", ino);
2851 /* ignore terminating '\0', should never happen */
2854 if (f2fs_lookup(sbi, lpf, (u8 *)name, namelen)) {
2855 ASSERT_MSG("Name %s already exist in lost+found", name);
2859 get_node_info(sbi, le32_to_cpu(lpf->footer.ino), &ni);
2860 ftype = map_de_type(le16_to_cpu(fnode->i.i_mode));
2861 ret = f2fs_add_link(sbi, lpf, (unsigned char *)name, namelen,
2862 ino, ftype, ni.blk_addr, 0);
2864 ASSERT_MSG("Failed to add inode [0x%x] to lost+found", ino);
2869 memcpy(fnode->i.i_name, name, namelen);
2870 fnode->i.i_namelen = cpu_to_le32(namelen);
2871 fnode->i.i_pino = c.lpf_ino;
2872 get_node_info(sbi, le32_to_cpu(fnode->footer.ino), &ni);
2873 ret = dev_write_block(fnode, ni.blk_addr);
2876 DBG(1, "Reconnect inode [0x%x] to lost+found\n", ino);
2880 static void fsck_failed_reconnect_file_dnode(struct f2fs_sb_info *sbi,
2883 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2884 struct f2fs_node *node;
2885 struct node_info ni;
2889 node = calloc(F2FS_BLKSIZE, 1);
2892 get_node_info(sbi, nid, &ni);
2893 err = dev_read_block(node, ni.blk_addr);
2896 fsck->chk.valid_node_cnt--;
2897 fsck->chk.valid_blk_cnt--;
2898 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2900 for (i = 0; i < ADDRS_PER_BLOCK(&node->i); i++) {
2901 addr = le32_to_cpu(node->dn.addr[i]);
2904 fsck->chk.valid_blk_cnt--;
2905 if (addr == NEW_ADDR)
2907 f2fs_clear_main_bitmap(sbi, addr);
2913 static void fsck_failed_reconnect_file_idnode(struct f2fs_sb_info *sbi,
2916 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2917 struct f2fs_node *node;
2918 struct node_info ni;
2922 node = calloc(F2FS_BLKSIZE, 1);
2925 get_node_info(sbi, nid, &ni);
2926 err = dev_read_block(node, ni.blk_addr);
2929 fsck->chk.valid_node_cnt--;
2930 fsck->chk.valid_blk_cnt--;
2931 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2933 for (i = 0; i < NIDS_PER_BLOCK; i++) {
2934 tmp = le32_to_cpu(node->in.nid[i]);
2937 fsck_failed_reconnect_file_dnode(sbi, tmp);
2943 static void fsck_failed_reconnect_file_didnode(struct f2fs_sb_info *sbi,
2946 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2947 struct f2fs_node *node;
2948 struct node_info ni;
2952 node = calloc(F2FS_BLKSIZE, 1);
2955 get_node_info(sbi, nid, &ni);
2956 err = dev_read_block(node, ni.blk_addr);
2959 fsck->chk.valid_node_cnt--;
2960 fsck->chk.valid_blk_cnt--;
2961 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2963 for (i = 0; i < NIDS_PER_BLOCK; i++) {
2964 tmp = le32_to_cpu(node->in.nid[i]);
2967 fsck_failed_reconnect_file_idnode(sbi, tmp);
2974 * Counters and main_area_bitmap are already changed during checking
2975 * inode block, so clear them. There is no need to clear new blocks
2976 * allocted to lost+found.
2978 static void fsck_failed_reconnect_file(struct f2fs_sb_info *sbi, nid_t ino)
2980 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2981 struct f2fs_node *node;
2982 struct node_info ni;
2986 node = calloc(F2FS_BLKSIZE, 1);
2989 get_node_info(sbi, ino, &ni);
2990 err = dev_read_block(node, ni.blk_addr);
2993 /* clear inode counters */
2994 fsck->chk.valid_inode_cnt--;
2995 fsck->chk.valid_node_cnt--;
2996 fsck->chk.valid_blk_cnt--;
2997 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2999 /* clear xnid counters */
3000 if (node->i.i_xattr_nid) {
3001 nid = le32_to_cpu(node->i.i_xattr_nid);
3002 fsck->chk.valid_node_cnt--;
3003 fsck->chk.valid_blk_cnt--;
3004 get_node_info(sbi, nid, &ni);
3005 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
3008 /* clear data counters */
3009 if(!(node->i.i_inline & F2FS_INLINE_DATA)) {
3010 ofs = get_extra_isize(node);
3011 for (i = 0; i < ADDRS_PER_INODE(&node->i); i++) {
3012 block_t addr = le32_to_cpu(node->i.i_addr[ofs + i]);
3015 fsck->chk.valid_blk_cnt--;
3016 if (addr == NEW_ADDR)
3018 f2fs_clear_main_bitmap(sbi, addr);
3022 for (i = 0; i < 5; i++) {
3023 nid = le32_to_cpu(node->i.i_nid[i]);
3028 case 0: /* direct node */
3030 fsck_failed_reconnect_file_dnode(sbi, nid);
3032 case 2: /* indirect node */
3034 fsck_failed_reconnect_file_idnode(sbi, nid);
3036 case 4: /* double indirect node */
3037 fsck_failed_reconnect_file_didnode(sbi, nid);
3046 * Scan unreachable nids and find only regular file inodes. If these files
3047 * are not corrupted, reconnect them to lost+found.
3049 * Since all unreachable nodes are already checked, we can allocate new
3052 * This function returns the number of files been reconnected.
3054 static int fsck_reconnect_file(struct f2fs_sb_info *sbi)
3056 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3057 struct f2fs_node *lpf_node, *node;
3058 struct node_info ni;
3059 char *reconnect_bitmap;
3061 struct f2fs_compr_blk_cnt cbc;
3063 int err, cnt = 0, ftype;
3065 node = calloc(F2FS_BLKSIZE, 1);
3068 reconnect_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
3069 ASSERT(reconnect_bitmap);
3071 for (nid = 0; nid < fsck->nr_nat_entries; nid++) {
3072 if (f2fs_test_bit(nid, fsck->nat_area_bitmap)) {
3073 if (is_qf_ino(F2FS_RAW_SUPER(sbi), nid)) {
3074 DBG(1, "Not support quota inode [0x%x]\n",
3079 get_node_info(sbi, nid, &ni);
3080 err = dev_read_block(node, ni.blk_addr);
3083 /* reconnection will restore these nodes if needed */
3084 if (node->footer.ino != node->footer.nid) {
3085 DBG(1, "Not support non-inode node [0x%x]\n",
3090 if (S_ISDIR(le16_to_cpu(node->i.i_mode))) {
3091 DBG(1, "Not support directory inode [0x%x]\n",
3096 ftype = map_de_type(le16_to_cpu(node->i.i_mode));
3097 if (sanity_check_nid(sbi, nid, node, ftype,
3099 ASSERT_MSG("Invalid nid [0x%x]\n", nid);
3103 DBG(1, "Check inode 0x%x\n", nid);
3106 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
3107 fsck_chk_inode_blk(sbi, nid, ftype, node,
3108 &blk_cnt, &cbc, &ni, NULL);
3110 f2fs_set_bit(nid, reconnect_bitmap);
3114 lpf_node = fsck_get_lpf(sbi);
3118 for (nid = 0; nid < fsck->nr_nat_entries; nid++) {
3119 if (f2fs_test_bit(nid, reconnect_bitmap)) {
3120 get_node_info(sbi, nid, &ni);
3121 err = dev_read_block(node, ni.blk_addr);
3124 if (fsck_do_reconnect_file(sbi, lpf_node, node)) {
3125 DBG(1, "Failed to reconnect inode [0x%x]\n",
3127 fsck_failed_reconnect_file(sbi, nid);
3131 quota_add_inode_usage(fsck->qctx, nid, &node->i);
3133 DBG(1, "Reconnected inode [0x%x] to lost+found\n", nid);
3141 free(reconnect_bitmap);
3145 #ifdef HAVE_LINUX_BLKZONED_H
3147 struct write_pointer_check_data {
3148 struct f2fs_sb_info *sbi;
3152 static int chk_and_fix_wp_with_sit(int UNUSED(i), void *blkzone, void *opaque)
3154 struct blk_zone *blkz = (struct blk_zone *)blkzone;
3155 struct write_pointer_check_data *wpd = opaque;
3156 struct f2fs_sb_info *sbi = wpd->sbi;
3157 struct device_info *dev = c.devices + wpd->dev_index;
3158 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3159 block_t zone_block, wp_block, wp_blkoff;
3160 unsigned int zone_segno, wp_segno;
3161 struct curseg_info *cs;
3162 int cs_index, ret, last_valid_blkoff;
3163 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
3164 unsigned int segs_per_zone = sbi->segs_per_sec * sbi->secs_per_zone;
3166 if (blk_zone_conv(blkz))
3169 zone_block = dev->start_blkaddr
3170 + (blk_zone_sector(blkz) >> log_sectors_per_block);
3171 zone_segno = GET_SEGNO(sbi, zone_block);
3172 if (zone_segno >= MAIN_SEGS(sbi))
3175 wp_block = dev->start_blkaddr
3176 + (blk_zone_wp_sector(blkz) >> log_sectors_per_block);
3177 wp_segno = GET_SEGNO(sbi, wp_block);
3178 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
3180 /* if a curseg points to the zone, skip the check */
3181 for (cs_index = 0; cs_index < NO_CHECK_TYPE; cs_index++) {
3182 cs = &SM_I(sbi)->curseg_array[cs_index];
3183 if (zone_segno <= cs->segno &&
3184 cs->segno < zone_segno + segs_per_zone)
3188 last_valid_blkoff = last_vblk_off_in_zone(sbi, zone_segno);
3191 * When there is no valid block in the zone, check write pointer is
3192 * at zone start. If not, reset the write pointer.
3194 if (last_valid_blkoff < 0 &&
3195 blk_zone_wp_sector(blkz) != blk_zone_sector(blkz)) {
3197 MSG(0, "Inconsistent write pointer: wp[0x%x,0x%x]\n",
3198 wp_segno, wp_blkoff);
3199 fsck->chk.wp_inconsistent_zones++;
3203 FIX_MSG("Reset write pointer of zone at segment 0x%x",
3205 ret = f2fs_reset_zone(wpd->dev_index, blkz);
3207 printf("[FSCK] Write pointer reset failed: %s\n",
3211 fsck->chk.wp_fixed = 1;
3216 * If valid blocks exist in the zone beyond the write pointer, it
3217 * is a bug. No need to fix because the zone is not selected for the
3218 * write. Just report it.
3220 if (last_valid_blkoff + zone_block > wp_block) {
3221 MSG(0, "Unexpected invalid write pointer: wp[0x%x,0x%x]\n",
3222 wp_segno, wp_blkoff);
3229 static void fix_wp_sit_alignment(struct f2fs_sb_info *sbi)
3232 struct write_pointer_check_data wpd = { sbi, 0 };
3234 if (c.zoned_model != F2FS_ZONED_HM)
3237 for (i = 0; i < MAX_DEVICES; i++) {
3238 if (!c.devices[i].path)
3240 if (c.devices[i].zoned_model != F2FS_ZONED_HM)
3244 if (f2fs_report_zones(i, chk_and_fix_wp_with_sit, &wpd)) {
3245 printf("[FSCK] Write pointer check failed: %s\n",
3254 static void fix_wp_sit_alignment(struct f2fs_sb_info *UNUSED(sbi))
3262 * Check and fix consistency with write pointers at the beginning of
3263 * fsck so that following writes by fsck do not fail.
3265 void fsck_chk_and_fix_write_pointers(struct f2fs_sb_info *sbi)
3267 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3269 if (c.zoned_model != F2FS_ZONED_HM)
3272 if (check_curseg_offsets(sbi) && c.fix_on) {
3273 fix_curseg_info(sbi);
3274 fsck->chk.wp_fixed = 1;
3277 fix_wp_sit_alignment(sbi);
3280 int fsck_chk_curseg_info(struct f2fs_sb_info *sbi)
3282 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3283 struct curseg_info *curseg;
3284 struct seg_entry *se;
3285 struct f2fs_summary_block *sum_blk;
3288 for (i = 0; i < NO_CHECK_TYPE; i++) {
3289 curseg = CURSEG_I(sbi, i);
3290 se = get_seg_entry(sbi, curseg->segno);
3291 sum_blk = curseg->sum_blk;
3293 if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
3294 (i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE))
3297 if (se->type != i) {
3298 ASSERT_MSG("Incorrect curseg [%d]: segno [0x%x] "
3299 "type(SIT) [%d]", i, curseg->segno,
3301 if (c.fix_on || c.preen_mode)
3305 if (i <= CURSEG_COLD_DATA && IS_SUM_DATA_SEG(sum_blk->footer)) {
3307 } else if (i > CURSEG_COLD_DATA && IS_SUM_NODE_SEG(sum_blk->footer)) {
3310 ASSERT_MSG("Incorrect curseg [%d]: segno [0x%x] "
3311 "type(SSA) [%d]", i, curseg->segno,
3312 sum_blk->footer.entry_type);
3313 if (c.fix_on || c.preen_mode)
3314 sum_blk->footer.entry_type =
3315 i <= CURSEG_COLD_DATA ?
3316 SUM_TYPE_DATA : SUM_TYPE_NODE;
3324 int fsck_verify(struct f2fs_sb_info *sbi)
3329 u32 nr_unref_nid = 0;
3330 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3331 struct hard_link_node *node = NULL;
3332 bool verify_failed = false;
3333 uint64_t max_blks, data_secs, node_secs, free_blks;
3335 if (c.show_file_map)
3340 if (c.zoned_model == F2FS_ZONED_HM) {
3341 printf("[FSCK] Write pointers consistency ");
3342 if (fsck->chk.wp_inconsistent_zones == 0x0) {
3343 printf(" [Ok..]\n");
3345 printf(" [Fail] [0x%x]\n",
3346 fsck->chk.wp_inconsistent_zones);
3347 verify_failed = true;
3350 if (fsck->chk.wp_fixed && c.fix_on)
3354 if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
3355 for (i = 0; i < fsck->nr_nat_entries; i++)
3356 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0)
3358 if (i < fsck->nr_nat_entries) {
3359 i = fsck_reconnect_file(sbi);
3360 printf("[FSCK] Reconnect %u files to lost+found\n", i);
3364 for (i = 0; i < fsck->nr_nat_entries; i++) {
3365 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0) {
3366 struct node_info ni;
3368 get_node_info(sbi, i, &ni);
3369 printf("NID[0x%x] is unreachable, blkaddr:0x%x\n",
3375 if (fsck->hard_link_list_head != NULL) {
3376 node = fsck->hard_link_list_head;
3378 printf("NID[0x%x] has [0x%x] more unreachable links\n",
3379 node->nid, node->links);
3385 data_secs = round_up(sbi->total_valid_node_count, BLKS_PER_SEC(sbi));
3386 node_secs = round_up(sbi->total_valid_block_count -
3387 sbi->total_valid_node_count, BLKS_PER_SEC(sbi));
3388 free_blks = (sbi->total_sections - data_secs - node_secs) *
3390 max_blks = SM_I(sbi)->main_blkaddr + (data_secs + node_secs) *
3392 printf("[FSCK] Max image size: %"PRIu64" MB, Free space: %"PRIu64" MB\n",
3393 max_blks >> 8, free_blks >> 8);
3394 printf("[FSCK] Unreachable nat entries ");
3395 if (nr_unref_nid == 0x0) {
3396 printf(" [Ok..] [0x%x]\n", nr_unref_nid);
3398 printf(" [Fail] [0x%x]\n", nr_unref_nid);
3399 verify_failed = true;
3402 printf("[FSCK] SIT valid block bitmap checking ");
3403 if (memcmp(fsck->sit_area_bitmap, fsck->main_area_bitmap,
3404 fsck->sit_area_bitmap_sz) == 0x0) {
3408 verify_failed = true;
3411 printf("[FSCK] Hard link checking for regular file ");
3412 if (fsck->hard_link_list_head == NULL) {
3413 printf(" [Ok..] [0x%x]\n", fsck->chk.multi_hard_link_files);
3415 printf(" [Fail] [0x%x]\n", fsck->chk.multi_hard_link_files);
3416 verify_failed = true;
3419 printf("[FSCK] valid_block_count matching with CP ");
3420 if (sbi->total_valid_block_count == fsck->chk.valid_blk_cnt) {
3421 printf(" [Ok..] [0x%x]\n", (u32)fsck->chk.valid_blk_cnt);
3423 printf(" [Fail] [0x%x, 0x%x]\n", sbi->total_valid_block_count,
3424 (u32)fsck->chk.valid_blk_cnt);
3425 verify_failed = true;
3428 printf("[FSCK] valid_node_count matching with CP (de lookup) ");
3429 if (sbi->total_valid_node_count == fsck->chk.valid_node_cnt) {
3430 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_node_cnt);
3432 printf(" [Fail] [0x%x, 0x%x]\n", sbi->total_valid_node_count,
3433 fsck->chk.valid_node_cnt);
3434 verify_failed = true;
3437 printf("[FSCK] valid_node_count matching with CP (nat lookup)");
3438 if (sbi->total_valid_node_count == fsck->chk.valid_nat_entry_cnt) {
3439 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_nat_entry_cnt);
3441 printf(" [Fail] [0x%x, 0x%x]\n", sbi->total_valid_node_count,
3442 fsck->chk.valid_nat_entry_cnt);
3443 verify_failed = true;
3446 printf("[FSCK] valid_inode_count matched with CP ");
3447 if (sbi->total_valid_inode_count == fsck->chk.valid_inode_cnt) {
3448 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_inode_cnt);
3450 printf(" [Fail] [0x%x, 0x%x]\n", sbi->total_valid_inode_count,
3451 fsck->chk.valid_inode_cnt);
3452 verify_failed = true;
3455 printf("[FSCK] free segment_count matched with CP ");
3456 if (le32_to_cpu(F2FS_CKPT(sbi)->free_segment_count) ==
3457 fsck->chk.sit_free_segs) {
3458 printf(" [Ok..] [0x%x]\n", fsck->chk.sit_free_segs);
3460 printf(" [Fail] [0x%x, 0x%x]\n",
3461 le32_to_cpu(F2FS_CKPT(sbi)->free_segment_count),
3462 fsck->chk.sit_free_segs);
3463 verify_failed = true;
3466 printf("[FSCK] next block offset is free ");
3467 if (check_curseg_offsets(sbi) == 0) {
3468 printf(" [Ok..]\n");
3470 printf(" [Fail]\n");
3471 verify_failed = true;
3474 printf("[FSCK] fixing SIT types\n");
3475 if (check_sit_types(sbi) != 0)
3478 printf("[FSCK] other corrupted bugs ");
3479 if (c.bug_on == 0) {
3480 printf(" [Ok..]\n");
3482 printf(" [Fail]\n");
3483 ret = EXIT_ERR_CODE;
3486 if (verify_failed) {
3487 ret = EXIT_ERR_CODE;
3491 #ifndef WITH_ANDROID
3492 if (nr_unref_nid && !c.ro) {
3493 char ans[255] = {0};
3496 printf("\nDo you want to restore lost files into ./lost_found/? [Y/N] ");
3497 res = scanf("%s", ans);
3499 if (!strcasecmp(ans, "y")) {
3500 for (i = 0; i < fsck->nr_nat_entries; i++) {
3501 if (f2fs_test_bit(i, fsck->nat_area_bitmap))
3502 dump_node(sbi, i, 1);
3508 /* fix global metadata */
3509 if (force || (c.fix_on && f2fs_dev_is_writable())) {
3510 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
3511 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3513 if (force || c.bug_on || c.bug_nat_bits || c.quota_fixed) {
3514 /* flush nats to write_nit_bits below */
3515 flush_journal_entries(sbi);
3516 fix_hard_links(sbi);
3517 fix_nat_entries(sbi);
3518 rewrite_sit_area_bitmap(sbi);
3519 fix_wp_sit_alignment(sbi);
3520 fix_curseg_info(sbi);
3522 fix_checkpoints(sbi);
3523 } else if (is_set_ckpt_flags(cp, CP_FSCK_FLAG) ||
3524 is_set_ckpt_flags(cp, CP_QUOTA_NEED_FSCK_FLAG)) {
3525 write_checkpoints(sbi);
3528 if (c.abnormal_stop)
3529 memset(sb->s_stop_reason, 0, MAX_STOP_REASON);
3532 memset(sb->s_errors, 0, MAX_F2FS_ERRORS);
3534 if (c.abnormal_stop || c.fs_errors)
3535 update_superblock(sb, SB_MASK_ALL);
3537 /* to return FSCK_ERROR_CORRECTED */
3543 void fsck_free(struct f2fs_sb_info *sbi)
3545 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3548 quota_release_context(&fsck->qctx);
3550 if (fsck->main_area_bitmap)
3551 free(fsck->main_area_bitmap);
3553 if (fsck->nat_area_bitmap)
3554 free(fsck->nat_area_bitmap);
3556 if (fsck->sit_area_bitmap)
3557 free(fsck->sit_area_bitmap);
3560 free(fsck->entries);
3565 while (fsck->dentry) {
3566 struct f2fs_dentry *dentry = fsck->dentry;
3568 fsck->dentry = fsck->dentry->next;