4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
17 uint32_t tree_mark_size = 256;
19 int f2fs_set_main_bitmap(struct f2fs_sb_info *sbi, u32 blk, int type)
21 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
25 se = get_seg_entry(sbi, GET_SEGNO(sbi, blk));
26 if (se->type >= NO_CHECK_TYPE)
28 else if (IS_DATASEG(se->type) != IS_DATASEG(type))
31 /* just check data and node types */
33 DBG(1, "Wrong segment type [0x%x] %x -> %x",
34 GET_SEGNO(sbi, blk), se->type, type);
37 return f2fs_set_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->main_area_bitmap);
40 static inline int f2fs_test_main_bitmap(struct f2fs_sb_info *sbi, u32 blk)
42 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
44 return f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, blk),
45 fsck->main_area_bitmap);
48 static inline int f2fs_clear_main_bitmap(struct f2fs_sb_info *sbi, u32 blk)
50 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
52 return f2fs_clear_bit(BLKOFF_FROM_MAIN(sbi, blk),
53 fsck->main_area_bitmap);
56 static inline int f2fs_test_sit_bitmap(struct f2fs_sb_info *sbi, u32 blk)
58 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
60 return f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->sit_area_bitmap);
63 int f2fs_set_sit_bitmap(struct f2fs_sb_info *sbi, u32 blk)
65 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
67 return f2fs_set_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->sit_area_bitmap);
70 static int add_into_hard_link_list(struct f2fs_sb_info *sbi,
71 u32 nid, u32 link_cnt)
73 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
74 struct hard_link_node *node = NULL, *tmp = NULL, *prev = NULL;
76 node = calloc(sizeof(struct hard_link_node), 1);
80 node->links = link_cnt;
81 node->actual_links = 1;
84 if (fsck->hard_link_list_head == NULL) {
85 fsck->hard_link_list_head = node;
89 tmp = fsck->hard_link_list_head;
91 /* Find insertion position */
92 while (tmp && (nid < tmp->nid)) {
93 ASSERT(tmp->nid != nid);
98 if (tmp == fsck->hard_link_list_head) {
100 fsck->hard_link_list_head = node;
107 DBG(2, "ino[0x%x] has hard links [0x%x]\n", nid, link_cnt);
111 static int find_and_dec_hard_link_list(struct f2fs_sb_info *sbi, u32 nid)
113 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
114 struct hard_link_node *node = NULL, *prev = NULL;
116 if (fsck->hard_link_list_head == NULL)
119 node = fsck->hard_link_list_head;
121 while (node && (nid < node->nid)) {
126 if (node == NULL || (nid != node->nid))
129 /* Decrease link count */
130 node->links = node->links - 1;
131 node->actual_links++;
133 /* if link count becomes one, remove the node */
134 if (node->links == 1) {
135 if (fsck->hard_link_list_head == node)
136 fsck->hard_link_list_head = node->next;
138 prev->next = node->next;
144 static int is_valid_ssa_node_blk(struct f2fs_sb_info *sbi, u32 nid,
147 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
148 struct f2fs_summary_block *sum_blk;
149 struct f2fs_summary *sum_entry;
150 struct seg_entry * se;
152 int need_fix = 0, ret = 0;
155 if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
158 segno = GET_SEGNO(sbi, blk_addr);
159 offset = OFFSET_IN_SEG(sbi, blk_addr);
161 sum_blk = get_sum_block(sbi, segno, &type);
163 if (type != SEG_TYPE_NODE && type != SEG_TYPE_CUR_NODE) {
164 /* can't fix current summary, then drop the block */
165 if (!c.fix_on || type < 0) {
166 ASSERT_MSG("Summary footer is not for node segment");
172 se = get_seg_entry(sbi, segno);
173 if(IS_NODESEG(se->type)) {
174 FIX_MSG("Summary footer indicates a node segment: 0x%x", segno);
175 sum_blk->footer.entry_type = SUM_TYPE_NODE;
182 sum_entry = &(sum_blk->entries[offset]);
184 if (le32_to_cpu(sum_entry->nid) != nid) {
185 if (!c.fix_on || type < 0) {
186 DBG(0, "nid [0x%x]\n", nid);
187 DBG(0, "target blk_addr [0x%x]\n", blk_addr);
188 DBG(0, "summary blk_addr [0x%x]\n",
190 GET_SEGNO(sbi, blk_addr)));
191 DBG(0, "seg no / offset [0x%x / 0x%x]\n",
192 GET_SEGNO(sbi, blk_addr),
193 OFFSET_IN_SEG(sbi, blk_addr));
194 DBG(0, "summary_entry.nid [0x%x]\n",
195 le32_to_cpu(sum_entry->nid));
196 DBG(0, "--> node block's nid [0x%x]\n", nid);
197 ASSERT_MSG("Invalid node seg summary\n");
200 FIX_MSG("Set node summary 0x%x -> [0x%x] [0x%x]",
201 segno, nid, blk_addr);
202 sum_entry->nid = cpu_to_le32(nid);
206 if (need_fix && f2fs_dev_is_writable()) {
210 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
211 ret2 = dev_write_block(sum_blk, ssa_blk);
215 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
216 type == SEG_TYPE_MAX)
221 static int is_valid_summary(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
224 u16 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
225 u32 nid = le32_to_cpu(sum->nid);
226 struct f2fs_node *node_blk = NULL;
227 __le32 target_blk_addr;
231 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
232 ASSERT(node_blk != NULL);
234 if (!IS_VALID_NID(sbi, nid))
237 get_node_info(sbi, nid, &ni);
239 if (!IS_VALID_BLK_ADDR(sbi, ni.blk_addr))
242 /* read node_block */
243 ret = dev_read_block(node_blk, ni.blk_addr);
246 if (le32_to_cpu(node_blk->footer.nid) != nid)
249 /* check its block address */
250 if (node_blk->footer.nid == node_blk->footer.ino) {
251 int ofs = get_extra_isize(node_blk);
253 if (ofs + ofs_in_node >= DEF_ADDRS_PER_INODE)
255 target_blk_addr = node_blk->i.i_addr[ofs + ofs_in_node];
257 if (ofs_in_node >= DEF_ADDRS_PER_BLOCK)
259 target_blk_addr = node_blk->dn.addr[ofs_in_node];
262 if (blk_addr == le32_to_cpu(target_blk_addr))
269 static int is_valid_ssa_data_blk(struct f2fs_sb_info *sbi, u32 blk_addr,
270 u32 parent_nid, u16 idx_in_node, u8 version)
272 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
273 struct f2fs_summary_block *sum_blk;
274 struct f2fs_summary *sum_entry;
275 struct seg_entry * se;
277 int need_fix = 0, ret = 0;
280 if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
283 segno = GET_SEGNO(sbi, blk_addr);
284 offset = OFFSET_IN_SEG(sbi, blk_addr);
286 sum_blk = get_sum_block(sbi, segno, &type);
288 if (type != SEG_TYPE_DATA && type != SEG_TYPE_CUR_DATA) {
289 /* can't fix current summary, then drop the block */
290 if (!c.fix_on || type < 0) {
291 ASSERT_MSG("Summary footer is not for data segment");
297 se = get_seg_entry(sbi, segno);
298 if (IS_DATASEG(se->type)) {
299 FIX_MSG("Summary footer indicates a data segment: 0x%x", segno);
300 sum_blk->footer.entry_type = SUM_TYPE_DATA;
307 sum_entry = &(sum_blk->entries[offset]);
309 if (le32_to_cpu(sum_entry->nid) != parent_nid ||
310 sum_entry->version != version ||
311 le16_to_cpu(sum_entry->ofs_in_node) != idx_in_node) {
312 if (!c.fix_on || type < 0) {
313 DBG(0, "summary_entry.nid [0x%x]\n",
314 le32_to_cpu(sum_entry->nid));
315 DBG(0, "summary_entry.version [0x%x]\n",
317 DBG(0, "summary_entry.ofs_in_node [0x%x]\n",
318 le16_to_cpu(sum_entry->ofs_in_node));
319 DBG(0, "parent nid [0x%x]\n",
321 DBG(0, "version from nat [0x%x]\n", version);
322 DBG(0, "idx in parent node [0x%x]\n",
325 DBG(0, "Target data block addr [0x%x]\n", blk_addr);
326 ASSERT_MSG("Invalid data seg summary\n");
328 } else if (is_valid_summary(sbi, sum_entry, blk_addr)) {
329 /* delete wrong index */
332 FIX_MSG("Set data summary 0x%x -> [0x%x] [0x%x] [0x%x]",
333 segno, parent_nid, version, idx_in_node);
334 sum_entry->nid = cpu_to_le32(parent_nid);
335 sum_entry->version = version;
336 sum_entry->ofs_in_node = cpu_to_le16(idx_in_node);
340 if (need_fix && f2fs_dev_is_writable()) {
344 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
345 ret2 = dev_write_block(sum_blk, ssa_blk);
349 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
350 type == SEG_TYPE_MAX)
355 static int __check_inode_mode(u32 nid, enum FILE_TYPE ftype, u16 mode)
357 if (ftype >= F2FS_FT_MAX)
359 /* f2fs_iget will return -EIO if mode is not valid file type */
360 if (!S_ISLNK(mode) && !S_ISREG(mode) && !S_ISDIR(mode) &&
361 !S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode) &&
363 ASSERT_MSG("inode [0x%x] unknown file type i_mode [0x%x]",
368 if (S_ISLNK(mode) && ftype != F2FS_FT_SYMLINK)
370 if (S_ISREG(mode) && ftype != F2FS_FT_REG_FILE)
372 if (S_ISDIR(mode) && ftype != F2FS_FT_DIR)
374 if (S_ISCHR(mode) && ftype != F2FS_FT_CHRDEV)
376 if (S_ISBLK(mode) && ftype != F2FS_FT_BLKDEV)
378 if (S_ISFIFO(mode) && ftype != F2FS_FT_FIFO)
380 if (S_ISSOCK(mode) && ftype != F2FS_FT_SOCK)
384 ASSERT_MSG("inode [0x%x] mismatch i_mode [0x%x vs. 0x%x]",
389 static int sanity_check_nid(struct f2fs_sb_info *sbi, u32 nid,
390 struct f2fs_node *node_blk,
391 enum FILE_TYPE ftype, enum NODE_TYPE ntype,
392 struct node_info *ni)
394 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
397 if (!IS_VALID_NID(sbi, nid)) {
398 ASSERT_MSG("nid is not valid. [0x%x]", nid);
402 get_node_info(sbi, nid, ni);
404 ASSERT_MSG("nid[0x%x] ino is 0", nid);
408 if (ni->blk_addr == NEW_ADDR) {
409 ASSERT_MSG("nid is NEW_ADDR. [0x%x]", nid);
413 if (!IS_VALID_BLK_ADDR(sbi, ni->blk_addr)) {
414 ASSERT_MSG("blkaddress is not valid. [0x%x]", ni->blk_addr);
418 ret = dev_read_block(node_blk, ni->blk_addr);
421 if (ntype == TYPE_INODE &&
422 node_blk->footer.nid != node_blk->footer.ino) {
423 ASSERT_MSG("nid[0x%x] footer.nid[0x%x] footer.ino[0x%x]",
424 nid, le32_to_cpu(node_blk->footer.nid),
425 le32_to_cpu(node_blk->footer.ino));
428 if (ni->ino != le32_to_cpu(node_blk->footer.ino)) {
429 ASSERT_MSG("nid[0x%x] nat_entry->ino[0x%x] footer.ino[0x%x]",
430 nid, ni->ino, le32_to_cpu(node_blk->footer.ino));
433 if (ntype != TYPE_INODE &&
434 node_blk->footer.nid == node_blk->footer.ino) {
435 ASSERT_MSG("nid[0x%x] footer.nid[0x%x] footer.ino[0x%x]",
436 nid, le32_to_cpu(node_blk->footer.nid),
437 le32_to_cpu(node_blk->footer.ino));
441 if (le32_to_cpu(node_blk->footer.nid) != nid) {
442 ASSERT_MSG("nid[0x%x] blk_addr[0x%x] footer.nid[0x%x]",
444 le32_to_cpu(node_blk->footer.nid));
448 if (ntype == TYPE_XATTR) {
449 u32 flag = le32_to_cpu(node_blk->footer.flag);
451 if ((flag >> OFFSET_BIT_SHIFT) != XATTR_NODE_OFFSET) {
452 ASSERT_MSG("xnid[0x%x] has wrong ofs:[0x%x]",
458 if ((ntype == TYPE_INODE && ftype == F2FS_FT_DIR) ||
459 (ntype == TYPE_XATTR && ftype == F2FS_FT_XATTR)) {
460 /* not included '.' & '..' */
461 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) != 0) {
462 ASSERT_MSG("Duplicated node blk. nid[0x%x][0x%x]\n",
468 /* this if only from fix_hard_links */
469 if (ftype == F2FS_FT_MAX)
472 if (ntype == TYPE_INODE &&
473 __check_inode_mode(nid, ftype, le16_to_cpu(node_blk->i.i_mode)))
476 /* workaround to fix later */
477 if (ftype != F2FS_FT_ORPHAN ||
478 f2fs_test_bit(nid, fsck->nat_area_bitmap) != 0) {
479 f2fs_clear_bit(nid, fsck->nat_area_bitmap);
480 /* avoid reusing nid when reconnecting files */
481 f2fs_set_bit(nid, NM_I(sbi)->nid_bitmap);
483 ASSERT_MSG("orphan or xattr nid is duplicated [0x%x]\n",
486 if (is_valid_ssa_node_blk(sbi, nid, ni->blk_addr)) {
487 ASSERT_MSG("summary node block is not valid. [0x%x]", nid);
491 if (f2fs_test_sit_bitmap(sbi, ni->blk_addr) == 0)
492 ASSERT_MSG("SIT bitmap is 0x0. blk_addr[0x%x]",
495 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0) {
497 fsck->chk.valid_blk_cnt++;
498 fsck->chk.valid_node_cnt++;
500 /* Progress report */
501 if (!c.show_file_map && sbi->total_valid_node_count > 1000) {
502 unsigned int p10 = sbi->total_valid_node_count / 10;
504 if (sbi->fsck->chk.checked_node_cnt++ % p10)
507 printf("[FSCK] Check node %"PRIu64" / %u (%.2f%%)\n",
508 sbi->fsck->chk.checked_node_cnt,
509 sbi->total_valid_node_count,
510 10 * (float)sbi->fsck->chk.checked_node_cnt /
517 int fsck_sanity_check_nid(struct f2fs_sb_info *sbi, u32 nid,
518 struct f2fs_node *node_blk,
519 enum FILE_TYPE ftype, enum NODE_TYPE ntype,
520 struct node_info *ni)
522 return sanity_check_nid(sbi, nid, node_blk, ftype, ntype, ni);
525 static int fsck_chk_xattr_blk(struct f2fs_sb_info *sbi, u32 ino,
526 u32 x_nid, u32 *blk_cnt)
528 struct f2fs_node *node_blk = NULL;
535 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
536 ASSERT(node_blk != NULL);
539 if (sanity_check_nid(sbi, x_nid, node_blk,
540 F2FS_FT_XATTR, TYPE_XATTR, &ni)) {
545 *blk_cnt = *blk_cnt + 1;
546 f2fs_set_main_bitmap(sbi, ni.blk_addr, CURSEG_COLD_NODE);
547 DBG(2, "ino[0x%x] x_nid[0x%x]\n", ino, x_nid);
553 int fsck_chk_node_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
554 u32 nid, enum FILE_TYPE ftype, enum NODE_TYPE ntype,
555 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
556 struct child_info *child)
559 struct f2fs_node *node_blk = NULL;
561 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
562 ASSERT(node_blk != NULL);
564 if (sanity_check_nid(sbi, nid, node_blk, ftype, ntype, &ni))
567 if (ntype == TYPE_INODE) {
568 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
570 fsck_chk_inode_blk(sbi, nid, ftype, node_blk, blk_cnt, cbc,
572 quota_add_inode_usage(fsck->qctx, nid, &node_blk->i);
575 case TYPE_DIRECT_NODE:
576 f2fs_set_main_bitmap(sbi, ni.blk_addr,
578 fsck_chk_dnode_blk(sbi, inode, nid, ftype, node_blk,
579 blk_cnt, cbc, child, &ni);
581 case TYPE_INDIRECT_NODE:
582 f2fs_set_main_bitmap(sbi, ni.blk_addr,
584 fsck_chk_idnode_blk(sbi, inode, ftype, node_blk,
585 blk_cnt, cbc, child);
587 case TYPE_DOUBLE_INDIRECT_NODE:
588 f2fs_set_main_bitmap(sbi, ni.blk_addr,
590 fsck_chk_didnode_blk(sbi, inode, ftype, node_blk,
591 blk_cnt, cbc, child);
604 static inline void get_extent_info(struct extent_info *ext,
605 struct f2fs_extent *i_ext)
607 ext->fofs = le32_to_cpu(i_ext->fofs);
608 ext->blk = le32_to_cpu(i_ext->blk_addr);
609 ext->len = le32_to_cpu(i_ext->len);
612 static void check_extent_info(struct child_info *child,
613 block_t blkaddr, int last)
615 struct extent_info *ei = &child->ei;
616 u32 pgofs = child->pgofs;
622 if (child->state & FSCK_UNMATCHED_EXTENT)
625 if ((child->state & FSCK_INLINE_INODE) && ei->len)
629 /* hole exist in the back of extent */
630 if (child->last_blk != ei->blk + ei->len - 1)
631 child->state |= FSCK_UNMATCHED_EXTENT;
635 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR)
638 if (pgofs >= ei->fofs && pgofs < ei->fofs + ei->len) {
639 /* unmatched blkaddr */
640 if (is_hole || (blkaddr != pgofs - ei->fofs + ei->blk))
643 if (!child->last_blk) {
644 /* hole exists in the front of extent */
645 if (pgofs != ei->fofs)
647 } else if (child->last_blk + 1 != blkaddr) {
648 /* hole exists in the middle of extent */
651 child->last_blk = blkaddr;
658 if (blkaddr < ei->blk || blkaddr >= ei->blk + ei->len)
660 /* unmatched file offset */
662 child->state |= FSCK_UNMATCHED_EXTENT;
665 void fsck_reada_node_block(struct f2fs_sb_info *sbi, u32 nid)
669 if (nid != 0 && IS_VALID_NID(sbi, nid)) {
670 get_node_info(sbi, nid, &ni);
671 if (IS_VALID_BLK_ADDR(sbi, ni.blk_addr))
672 dev_reada_block(ni.blk_addr);
676 void fsck_reada_all_direct_node_blocks(struct f2fs_sb_info *sbi,
677 struct f2fs_node *node_blk)
681 for (i = 0; i < NIDS_PER_BLOCK; i++) {
682 u32 nid = le32_to_cpu(node_blk->in.nid[i]);
684 fsck_reada_node_block(sbi, nid);
688 /* start with valid nid and blkaddr */
689 void fsck_chk_inode_blk(struct f2fs_sb_info *sbi, u32 nid,
690 enum FILE_TYPE ftype, struct f2fs_node *node_blk,
691 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
692 struct node_info *ni, struct child_info *child_d)
694 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
695 struct child_info child;
696 enum NODE_TYPE ntype;
697 u32 i_links = le32_to_cpu(node_blk->i.i_links);
698 u64 i_size = le64_to_cpu(node_blk->i.i_size);
699 u64 i_blocks = le64_to_cpu(node_blk->i.i_blocks);
700 bool compr_supported = c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION);
701 u32 i_flags = le32_to_cpu(node_blk->i.i_flags);
702 bool compressed = i_flags & F2FS_COMPR_FL;
703 bool compr_rel = node_blk->i.i_inline & F2FS_COMPRESS_RELEASED;
704 u64 i_compr_blocks = le64_to_cpu(node_blk->i.i_compr_blocks);
705 nid_t i_xattr_nid = le32_to_cpu(node_blk->i.i_xattr_nid);
709 unsigned int addrs, idx = 0;
710 unsigned short i_gc_failures;
713 u32 cluster_size = 1 << node_blk->i.i_log_cluster_size;
718 if (!compr_supported || (node_blk->i.i_inline & F2FS_INLINE_DATA)) {
720 * The 'compression' flag in i_flags affects the traverse of
721 * the node tree. Thus, it must be fixed unconditionally
722 * in the memory (node_blk).
724 node_blk->i.i_flags &= ~cpu_to_le32(F2FS_COMPR_FL);
728 FIX_MSG("[0x%x] i_flags=0x%x -> 0x%x",
729 nid, i_flags, node_blk->i.i_flags);
731 i_flags &= ~F2FS_COMPR_FL;
734 memset(&child, 0, sizeof(child));
737 child.pp_ino = le32_to_cpu(node_blk->i.i_pino);
738 child.dir_level = node_blk->i.i_dir_level;
740 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0)
741 fsck->chk.valid_inode_cnt++;
743 if (ftype == F2FS_FT_DIR) {
744 f2fs_set_main_bitmap(sbi, ni->blk_addr, CURSEG_HOT_NODE);
745 namelen = le32_to_cpu(node_blk->i.i_namelen);
746 if (namelen > F2FS_NAME_LEN)
747 namelen = F2FS_NAME_LEN;
748 memcpy(child.p_name, node_blk->i.i_name, namelen);
750 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0) {
751 f2fs_set_main_bitmap(sbi, ni->blk_addr,
753 if (i_links > 1 && ftype != F2FS_FT_ORPHAN &&
754 !is_qf_ino(F2FS_RAW_SUPER(sbi), nid)) {
755 /* First time. Create new hard link node */
756 add_into_hard_link_list(sbi, nid, i_links);
757 fsck->chk.multi_hard_link_files++;
760 DBG(3, "[0x%x] has hard links [0x%x]\n", nid, i_links);
761 if (find_and_dec_hard_link_list(sbi, nid)) {
762 ASSERT_MSG("[0x%x] needs more i_links=0x%x",
765 node_blk->i.i_links =
766 cpu_to_le32(i_links + 1);
768 FIX_MSG("File: 0x%x "
769 "i_links= 0x%x -> 0x%x",
770 nid, i_links, i_links + 1);
772 goto skip_blkcnt_fix;
774 /* No need to go deep into the node */
779 /* readahead xattr node block */
780 fsck_reada_node_block(sbi, i_xattr_nid);
782 if (fsck_chk_xattr_blk(sbi, nid, i_xattr_nid, blk_cnt)) {
784 node_blk->i.i_xattr_nid = 0;
786 FIX_MSG("Remove xattr block: 0x%x, x_nid = 0x%x",
791 if (ftype == F2FS_FT_CHRDEV || ftype == F2FS_FT_BLKDEV ||
792 ftype == F2FS_FT_FIFO || ftype == F2FS_FT_SOCK)
795 /* init extent info */
796 get_extent_info(&child.ei, &node_blk->i.i_ext);
799 if (f2fs_has_extra_isize(&node_blk->i)) {
800 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
802 le16_to_cpu(node_blk->i.i_extra_isize);
803 if (isize > 4 * DEF_ADDRS_PER_INODE) {
804 ASSERT_MSG("[0x%x] wrong i_extra_isize=0x%x",
807 FIX_MSG("ino[0x%x] recover i_extra_isize "
811 node_blk->i.i_extra_isize =
812 cpu_to_le16(calc_extra_isize());
817 ASSERT_MSG("[0x%x] wrong extra_attr flag", nid);
819 FIX_MSG("ino[0x%x] remove F2FS_EXTRA_ATTR "
820 "flag in i_inline:%u",
821 nid, node_blk->i.i_inline);
822 /* we don't support tuning F2FS_FEATURE_EXTRA_ATTR now */
823 node_blk->i.i_inline &= ~F2FS_EXTRA_ATTR;
829 cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) &&
830 (node_blk->i.i_inline & F2FS_INLINE_XATTR)) {
831 unsigned int inline_size =
832 le16_to_cpu(node_blk->i.i_inline_xattr_size);
835 inline_size > MAX_INLINE_XATTR_SIZE) {
836 ASSERT_MSG("[0x%x] wrong inline_xattr_size:%u",
839 FIX_MSG("ino[0x%x] recover inline xattr size "
842 DEFAULT_INLINE_XATTR_ADDRS);
843 node_blk->i.i_inline_xattr_size =
844 cpu_to_le16(DEFAULT_INLINE_XATTR_ADDRS);
850 ofs = get_extra_isize(node_blk);
852 if ((node_blk->i.i_flags & cpu_to_le32(F2FS_CASEFOLD_FL)) &&
853 (ftype != F2FS_FT_DIR ||
854 !(c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)))) {
855 ASSERT_MSG("[0x%x] unexpected casefold flag", nid);
857 FIX_MSG("ino[0x%x] clear casefold flag", nid);
858 node_blk->i.i_flags &= ~cpu_to_le32(F2FS_CASEFOLD_FL);
863 if ((node_blk->i.i_inline & F2FS_INLINE_DATA)) {
864 unsigned int inline_size = MAX_INLINE_DATA(node_blk);
866 qf_szchk_type[cur_qtype] = QF_SZCHK_INLINE;
867 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs]);
870 ASSERT_MSG("[0x%x] wrong inline reserve blkaddr:%u",
873 FIX_MSG("inline_data has wrong 0'th block = %x",
875 node_blk->i.i_addr[ofs] = 0;
876 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
880 if (i_size > inline_size) {
881 ASSERT_MSG("[0x%x] wrong inline size:%lu",
882 nid, (unsigned long)i_size);
884 node_blk->i.i_size = cpu_to_le64(inline_size);
885 FIX_MSG("inline_data has wrong i_size %lu",
886 (unsigned long)i_size);
890 if (!(node_blk->i.i_inline & F2FS_DATA_EXIST)) {
891 char buf[MAX_INLINE_DATA(node_blk)];
892 memset(buf, 0, MAX_INLINE_DATA(node_blk));
894 if (memcmp(buf, inline_data_addr(node_blk),
895 MAX_INLINE_DATA(node_blk))) {
896 ASSERT_MSG("[0x%x] junk inline data", nid);
898 FIX_MSG("inline_data has DATA_EXIST");
899 node_blk->i.i_inline |= F2FS_DATA_EXIST;
904 DBG(3, "ino[0x%x] has inline data!\n", nid);
905 child.state |= FSCK_INLINE_INODE;
909 if ((node_blk->i.i_inline & F2FS_INLINE_DENTRY)) {
910 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs]);
912 DBG(3, "ino[0x%x] has inline dentry!\n", nid);
914 ASSERT_MSG("[0x%x] wrong inline reserve blkaddr:%u",
917 FIX_MSG("inline_dentry has wrong 0'th block = %x",
919 node_blk->i.i_addr[ofs] = 0;
920 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
925 ret = fsck_chk_inline_dentries(sbi, node_blk, &child);
930 child.state |= FSCK_INLINE_INODE;
934 /* check data blocks in inode */
935 addrs = ADDRS_PER_INODE(&node_blk->i);
936 if (cur_qtype != -1) {
937 u64 addrs_per_blk = (u64)ADDRS_PER_BLOCK(&node_blk->i);
938 qf_szchk_type[cur_qtype] = QF_SZCHK_REGFILE;
939 qf_maxsize[cur_qtype] = (u64)(addrs + 2 * addrs_per_blk +
940 2 * addrs_per_blk * NIDS_PER_BLOCK +
941 addrs_per_blk * NIDS_PER_BLOCK *
942 NIDS_PER_BLOCK) * F2FS_BLKSIZE;
944 for (idx = 0; idx < addrs; idx++, child.pgofs++) {
945 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs + idx]);
947 /* check extent info */
948 check_extent_info(&child, blkaddr, 0);
950 if (blkaddr == NULL_ADDR)
952 if (blkaddr == COMPRESS_ADDR) {
953 if (!compressed || (child.pgofs &
954 (cluster_size - 1)) != 0) {
956 node_blk->i.i_addr[ofs + idx] =
959 FIX_MSG("[0x%x] i_addr[%d] = 0", nid,
965 fsck->chk.valid_blk_cnt++;
966 *blk_cnt = *blk_cnt + 1;
967 cbc->cheader_pgofs = child.pgofs;
972 if (!compr_rel && blkaddr == NEW_ADDR &&
973 child.pgofs - cbc->cheader_pgofs < cluster_size)
975 ret = fsck_chk_data_blk(sbi,
976 IS_CASEFOLDED(&node_blk->i),
978 &child, (i_blocks == *blk_cnt),
979 ftype, nid, idx, ni->version,
980 file_is_encrypt(&node_blk->i));
982 *blk_cnt = *blk_cnt + 1;
983 if (cur_qtype != -1 && blkaddr != NEW_ADDR)
984 qf_last_blkofs[cur_qtype] = child.pgofs;
985 } else if (c.fix_on) {
986 node_blk->i.i_addr[ofs + idx] = 0;
988 FIX_MSG("[0x%x] i_addr[%d] = 0", nid, ofs + idx);
992 /* readahead node blocks */
993 for (idx = 0; idx < 5; idx++) {
994 u32 nid = le32_to_cpu(node_blk->i.i_nid[idx]);
995 fsck_reada_node_block(sbi, nid);
998 /* check node blocks in inode */
999 for (idx = 0; idx < 5; idx++) {
1000 nid_t i_nid = le32_to_cpu(node_blk->i.i_nid[idx]);
1002 if (idx == 0 || idx == 1)
1003 ntype = TYPE_DIRECT_NODE;
1004 else if (idx == 2 || idx == 3)
1005 ntype = TYPE_INDIRECT_NODE;
1007 ntype = TYPE_DOUBLE_INDIRECT_NODE;
1014 ret = fsck_chk_node_blk(sbi, &node_blk->i, i_nid,
1015 ftype, ntype, blk_cnt, cbc, &child);
1017 *blk_cnt = *blk_cnt + 1;
1018 } else if (ret == -EINVAL) {
1020 node_blk->i.i_nid[idx] = 0;
1022 FIX_MSG("[0x%x] i_nid[%d] = 0", nid, idx);
1025 if (ntype == TYPE_DIRECT_NODE)
1026 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i);
1027 else if (ntype == TYPE_INDIRECT_NODE)
1028 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1031 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1032 NIDS_PER_BLOCK * NIDS_PER_BLOCK;
1038 /* check uncovered range in the back of extent */
1039 check_extent_info(&child, 0, 1);
1041 if (child.state & FSCK_UNMATCHED_EXTENT) {
1042 ASSERT_MSG("ino: 0x%x has wrong ext: [pgofs:%u, blk:%u, len:%u]",
1043 nid, child.ei.fofs, child.ei.blk, child.ei.len);
1048 if (i_blocks != *blk_cnt) {
1049 ASSERT_MSG("ino: 0x%x has i_blocks: %08"PRIx64", "
1050 "but has %u blocks",
1051 nid, i_blocks, *blk_cnt);
1053 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
1055 FIX_MSG("[0x%x] i_blocks=0x%08"PRIx64" -> 0x%x",
1056 nid, i_blocks, *blk_cnt);
1060 if (compressed && i_compr_blocks != cbc->cnt) {
1062 node_blk->i.i_compr_blocks = cpu_to_le64(cbc->cnt);
1064 FIX_MSG("[0x%x] i_compr_blocks=0x%08"PRIx64" -> 0x%x",
1065 nid, i_compr_blocks, cbc->cnt);
1070 en = malloc(F2FS_PRINT_NAMELEN);
1073 namelen = le32_to_cpu(node_blk->i.i_namelen);
1074 if (namelen > F2FS_NAME_LEN) {
1075 if (child_d && child_d->i_namelen <= F2FS_NAME_LEN) {
1076 ASSERT_MSG("ino: 0x%x has i_namelen: 0x%x, "
1077 "but has %d characters for name",
1078 nid, namelen, child_d->i_namelen);
1080 FIX_MSG("[0x%x] i_namelen=0x%x -> 0x%x", nid, namelen,
1081 child_d->i_namelen);
1082 node_blk->i.i_namelen = cpu_to_le32(child_d->i_namelen);
1085 namelen = child_d->i_namelen;
1087 namelen = F2FS_NAME_LEN;
1089 pretty_print_filename(node_blk->i.i_name, namelen, en,
1090 file_enc_name(&node_blk->i));
1091 if (ftype == F2FS_FT_ORPHAN)
1092 DBG(1, "Orphan Inode: 0x%x [%s] i_blocks: %u\n\n",
1093 le32_to_cpu(node_blk->footer.ino),
1096 if (is_qf_ino(F2FS_RAW_SUPER(sbi), nid))
1097 DBG(1, "Quota Inode: 0x%x [%s] i_blocks: %u\n\n",
1098 le32_to_cpu(node_blk->footer.ino),
1101 if (ftype == F2FS_FT_DIR) {
1102 DBG(1, "Directory Inode: 0x%x [%s] depth: %d has %d files\n\n",
1103 le32_to_cpu(node_blk->footer.ino), en,
1104 le32_to_cpu(node_blk->i.i_current_depth),
1107 if (i_links != child.links) {
1108 ASSERT_MSG("ino: 0x%x i_links: %u, real links: %u",
1109 nid, i_links, child.links);
1111 node_blk->i.i_links = cpu_to_le32(child.links);
1113 FIX_MSG("Dir: 0x%x i_links= 0x%x -> 0x%x",
1114 nid, i_links, child.links);
1117 if (child.dots < 2 &&
1118 !(node_blk->i.i_inline & F2FS_INLINE_DOTS)) {
1119 ASSERT_MSG("ino: 0x%x dots: %u",
1122 node_blk->i.i_inline |= F2FS_INLINE_DOTS;
1124 FIX_MSG("Dir: 0x%x set inline_dots", nid);
1129 i_gc_failures = le16_to_cpu(node_blk->i.i_gc_failures);
1132 * old kernel initialized i_gc_failures as 0x01, in preen mode 2,
1133 * let's skip repairing.
1135 if (ftype == F2FS_FT_REG_FILE && i_gc_failures &&
1136 (c.preen_mode != PREEN_MODE_2 || i_gc_failures != 0x01)) {
1138 DBG(1, "Regular Inode: 0x%x [%s] depth: %d\n\n",
1139 le32_to_cpu(node_blk->footer.ino), en,
1143 node_blk->i.i_gc_failures = cpu_to_le16(0);
1145 FIX_MSG("Regular: 0x%x reset i_gc_failures from 0x%x to 0x00",
1146 nid, i_gc_failures);
1152 if (ftype == F2FS_FT_SYMLINK && i_size == 0 &&
1153 i_blocks == (i_xattr_nid ? 3 : 2)) {
1154 node_blk->i.i_size = cpu_to_le64(F2FS_BLKSIZE);
1156 FIX_MSG("Symlink: recover 0x%x with i_size=%lu",
1157 nid, (unsigned long)F2FS_BLKSIZE);
1160 if (ftype == F2FS_FT_ORPHAN && i_links) {
1161 ASSERT_MSG("ino: 0x%x is orphan inode, but has i_links: %u",
1164 node_blk->i.i_links = 0;
1166 FIX_MSG("ino: 0x%x orphan_inode, i_links= 0x%x -> 0",
1171 /* drop extent information to avoid potential wrong access */
1172 if (need_fix && f2fs_dev_is_writable())
1173 node_blk->i.i_ext.len = 0;
1175 if ((c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) &&
1176 f2fs_has_extra_isize(&node_blk->i)) {
1177 __u32 provided, calculated;
1179 provided = le32_to_cpu(node_blk->i.i_inode_checksum);
1180 calculated = f2fs_inode_chksum(node_blk);
1182 if (provided != calculated) {
1183 ASSERT_MSG("ino: 0x%x chksum:0x%x, but calculated one is: 0x%x",
1184 nid, provided, calculated);
1186 node_blk->i.i_inode_checksum =
1187 cpu_to_le32(calculated);
1189 FIX_MSG("ino: 0x%x recover, i_inode_checksum= 0x%x -> 0x%x",
1190 nid, provided, calculated);
1195 if (need_fix && f2fs_dev_is_writable()) {
1196 ret = dev_write_block(node_blk, ni->blk_addr);
1201 int fsck_chk_dnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1202 u32 nid, enum FILE_TYPE ftype, struct f2fs_node *node_blk,
1203 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
1204 struct child_info *child, struct node_info *ni)
1209 child->pp_ino = le32_to_cpu(inode->i_pino);
1210 u32 i_flags = le32_to_cpu(inode->i_flags);
1211 bool compressed = i_flags & F2FS_COMPR_FL;
1212 bool compr_rel = inode->i_inline & F2FS_COMPRESS_RELEASED;
1213 u32 cluster_size = 1 << inode->i_log_cluster_size;
1215 for (idx = 0; idx < ADDRS_PER_BLOCK(inode); idx++, child->pgofs++) {
1216 block_t blkaddr = le32_to_cpu(node_blk->dn.addr[idx]);
1218 check_extent_info(child, blkaddr, 0);
1220 if (blkaddr == NULL_ADDR)
1222 if (blkaddr == COMPRESS_ADDR) {
1223 if (!compressed || (child->pgofs &
1224 (cluster_size - 1)) != 0) {
1226 node_blk->dn.addr[idx] = NULL_ADDR;
1228 FIX_MSG("[0x%x] dn.addr[%d] = 0", nid,
1234 F2FS_FSCK(sbi)->chk.valid_blk_cnt++;
1235 *blk_cnt = *blk_cnt + 1;
1236 cbc->cheader_pgofs = child->pgofs;
1241 if (!compr_rel && blkaddr == NEW_ADDR && child->pgofs -
1242 cbc->cheader_pgofs < cluster_size)
1244 ret = fsck_chk_data_blk(sbi, IS_CASEFOLDED(inode),
1246 le64_to_cpu(inode->i_blocks) == *blk_cnt, ftype,
1247 nid, idx, ni->version,
1248 file_is_encrypt(inode));
1250 *blk_cnt = *blk_cnt + 1;
1251 if (cur_qtype != -1 && blkaddr != NEW_ADDR)
1252 qf_last_blkofs[cur_qtype] = child->pgofs;
1253 } else if (c.fix_on) {
1254 node_blk->dn.addr[idx] = NULL_ADDR;
1256 FIX_MSG("[0x%x] dn.addr[%d] = 0", nid, idx);
1259 if (need_fix && f2fs_dev_is_writable()) {
1260 ret = dev_write_block(node_blk, ni->blk_addr);
1266 int fsck_chk_idnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1267 enum FILE_TYPE ftype, struct f2fs_node *node_blk, u32 *blk_cnt,
1268 struct f2fs_compr_blk_cnt *cbc, struct child_info *child)
1270 int need_fix = 0, ret;
1273 fsck_reada_all_direct_node_blocks(sbi, node_blk);
1275 for (i = 0; i < NIDS_PER_BLOCK; i++) {
1276 if (le32_to_cpu(node_blk->in.nid[i]) == 0x0)
1278 ret = fsck_chk_node_blk(sbi, inode,
1279 le32_to_cpu(node_blk->in.nid[i]),
1280 ftype, TYPE_DIRECT_NODE, blk_cnt,
1283 *blk_cnt = *blk_cnt + 1;
1284 else if (ret == -EINVAL) {
1286 printf("should delete in.nid[i] = 0;\n");
1288 node_blk->in.nid[i] = 0;
1290 FIX_MSG("Set indirect node 0x%x -> 0", i);
1293 child->pgofs += ADDRS_PER_BLOCK(&node_blk->i);
1297 if (need_fix && f2fs_dev_is_writable()) {
1298 struct node_info ni;
1299 nid_t nid = le32_to_cpu(node_blk->footer.nid);
1301 get_node_info(sbi, nid, &ni);
1302 ret = dev_write_block(node_blk, ni.blk_addr);
1309 int fsck_chk_didnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1310 enum FILE_TYPE ftype, struct f2fs_node *node_blk, u32 *blk_cnt,
1311 struct f2fs_compr_blk_cnt *cbc, struct child_info *child)
1314 int need_fix = 0, ret = 0;
1316 fsck_reada_all_direct_node_blocks(sbi, node_blk);
1318 for (i = 0; i < NIDS_PER_BLOCK; i++) {
1319 if (le32_to_cpu(node_blk->in.nid[i]) == 0x0)
1321 ret = fsck_chk_node_blk(sbi, inode,
1322 le32_to_cpu(node_blk->in.nid[i]),
1323 ftype, TYPE_INDIRECT_NODE, blk_cnt, cbc, child);
1325 *blk_cnt = *blk_cnt + 1;
1326 else if (ret == -EINVAL) {
1328 printf("should delete in.nid[i] = 0;\n");
1330 node_blk->in.nid[i] = 0;
1332 FIX_MSG("Set double indirect node 0x%x -> 0", i);
1335 child->pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1340 if (need_fix && f2fs_dev_is_writable()) {
1341 struct node_info ni;
1342 nid_t nid = le32_to_cpu(node_blk->footer.nid);
1344 get_node_info(sbi, nid, &ni);
1345 ret = dev_write_block(node_blk, ni.blk_addr);
1352 static const char *lookup_table =
1353 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
1358 * Encodes the input string using characters from the set [A-Za-z0-9+,].
1359 * The encoded string is roughly 4/3 times the size of the input string.
1361 static int base64_encode(const u8 *src, int len, char *dst)
1363 int i, bits = 0, ac = 0;
1366 for (i = 0; i < len; i++) {
1367 ac += src[i] << bits;
1370 *cp++ = lookup_table[ac & 0x3f];
1373 } while (bits >= 6);
1376 *cp++ = lookup_table[ac & 0x3f];
1380 void pretty_print_filename(const u8 *raw_name, u32 len,
1381 char out[F2FS_PRINT_NAMELEN], int enc_name)
1383 len = min(len, (u32)F2FS_NAME_LEN);
1386 len = base64_encode(raw_name, len, out);
1388 memcpy(out, raw_name, len);
1392 static void print_dentry(struct f2fs_sb_info *sbi, __u8 *name,
1393 u8 *bitmap, struct f2fs_dir_entry *dentry,
1394 int max, int idx, int last_blk, int enc_name)
1396 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1397 u32 depth = fsck->dentry_depth;
1403 char new[F2FS_PRINT_NAMELEN];
1405 if (!c.show_dentry && !c.show_file_map)
1408 name_len = le16_to_cpu(dentry[idx].name_len);
1409 next_idx = idx + (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN;
1411 bit_offset = find_next_bit_le(bitmap, max, next_idx);
1412 if (bit_offset >= max && last_blk)
1415 if (tree_mark_size <= depth) {
1416 tree_mark_size *= 2;
1417 ASSERT(tree_mark_size != 0);
1418 tree_mark = realloc(tree_mark, tree_mark_size);
1419 ASSERT(tree_mark != NULL);
1423 tree_mark[depth] = '`';
1425 tree_mark[depth] = '|';
1427 if (tree_mark[depth - 1] == '`')
1428 tree_mark[depth - 1] = ' ';
1430 pretty_print_filename(name, name_len, new, enc_name);
1432 if (c.show_file_map) {
1433 struct f2fs_dentry *d = fsck->dentry;
1435 if (dentry[idx].file_type != F2FS_FT_REG_FILE)
1440 printf("/%s", d->name);
1444 if (dump_node(sbi, le32_to_cpu(dentry[idx].ino), 0))
1447 for (i = 1; i < depth; i++)
1448 printf("%c ", tree_mark[i]);
1450 printf("%c-- %s <ino = 0x%x>, <encrypted (%d)>\n",
1451 last_de ? '`' : '|',
1452 new, le32_to_cpu(dentry[idx].ino),
1457 static int f2fs_check_hash_code(int encoding, int casefolded,
1458 struct f2fs_dir_entry *dentry,
1459 const unsigned char *name, u32 len, int enc_name)
1461 /* Casefolded Encrypted names require a key to compute siphash */
1462 if (enc_name && casefolded)
1465 f2fs_hash_t hash_code = f2fs_dentry_hash(encoding, casefolded, name, len);
1466 /* fix hash_code made by old buggy code */
1467 if (dentry->hash_code != hash_code) {
1468 char new[F2FS_PRINT_NAMELEN];
1470 pretty_print_filename(name, len, new, enc_name);
1471 FIX_MSG("Mismatch hash_code for \"%s\" [%x:%x]",
1472 new, le32_to_cpu(dentry->hash_code),
1474 dentry->hash_code = cpu_to_le32(hash_code);
1481 static int __get_current_level(int dir_level, u32 pgofs)
1483 unsigned int bidx = 0;
1486 for (i = 0; i < MAX_DIR_HASH_DEPTH; i++) {
1487 bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
1494 static int f2fs_check_dirent_position(const struct f2fs_dir_entry *dentry,
1495 const char *printable_name,
1496 u32 pgofs, u8 dir_level, u32 pino)
1498 unsigned int nbucket, nblock;
1499 unsigned int bidx, end_block;
1502 level = __get_current_level(dir_level, pgofs);
1504 nbucket = dir_buckets(level, dir_level);
1505 nblock = bucket_blocks(level);
1507 bidx = dir_block_index(level, dir_level,
1508 le32_to_cpu(dentry->hash_code) % nbucket);
1509 end_block = bidx + nblock;
1511 if (pgofs >= bidx && pgofs < end_block)
1514 ASSERT_MSG("Wrong position of dirent pino:%u, name:%s, level:%d, "
1515 "dir_level:%d, pgofs:%u, correct range:[%u, %u]\n",
1516 pino, printable_name, level, dir_level, pgofs, bidx,
1521 static int __chk_dots_dentries(struct f2fs_sb_info *sbi,
1523 struct f2fs_dir_entry *dentry,
1524 struct child_info *child,
1526 __u8 (*filename)[F2FS_SLOT_LEN],
1531 if ((name[0] == '.' && len == 1)) {
1532 if (le32_to_cpu(dentry->ino) != child->p_ino) {
1533 ASSERT_MSG("Bad inode number[0x%x] for '.', parent_ino is [0x%x]\n",
1534 le32_to_cpu(dentry->ino), child->p_ino);
1535 dentry->ino = cpu_to_le32(child->p_ino);
1540 if (name[0] == '.' && name[1] == '.' && len == 2) {
1541 if (child->p_ino == F2FS_ROOT_INO(sbi)) {
1542 if (le32_to_cpu(dentry->ino) != F2FS_ROOT_INO(sbi)) {
1543 ASSERT_MSG("Bad inode number[0x%x] for '..'\n",
1544 le32_to_cpu(dentry->ino));
1545 dentry->ino = cpu_to_le32(F2FS_ROOT_INO(sbi));
1548 } else if (le32_to_cpu(dentry->ino) != child->pp_ino) {
1549 ASSERT_MSG("Bad inode number[0x%x] for '..', parent parent ino is [0x%x]\n",
1550 le32_to_cpu(dentry->ino), child->pp_ino);
1551 dentry->ino = cpu_to_le32(child->pp_ino);
1556 if (f2fs_check_hash_code(get_encoding(sbi), casefolded, dentry, name, len, enc_name))
1559 if (name[len] != '\0') {
1560 ASSERT_MSG("'.' is not NULL terminated\n");
1562 memcpy(*filename, name, len);
1568 static void nullify_dentry(struct f2fs_dir_entry *dentry, int offs,
1569 __u8 (*filename)[F2FS_SLOT_LEN], u8 **bitmap)
1571 memset(dentry, 0, sizeof(struct f2fs_dir_entry));
1572 test_and_clear_bit_le(offs, *bitmap);
1573 memset(*filename, 0, F2FS_SLOT_LEN);
1576 static int __chk_dentries(struct f2fs_sb_info *sbi, int casefolded,
1577 struct child_info *child,
1578 u8 *bitmap, struct f2fs_dir_entry *dentry,
1579 __u8 (*filenames)[F2FS_SLOT_LEN],
1580 int max, int last_blk, int enc_name)
1582 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1583 enum FILE_TYPE ftype;
1586 struct f2fs_compr_blk_cnt cbc;
1588 char en[F2FS_PRINT_NAMELEN];
1594 /* readahead inode blocks */
1595 for (i = 0; i < max; i++) {
1598 if (test_bit_le(i, bitmap) == 0)
1601 ino = le32_to_cpu(dentry[i].ino);
1603 if (IS_VALID_NID(sbi, ino)) {
1604 struct node_info ni;
1606 get_node_info(sbi, ino, &ni);
1607 if (IS_VALID_BLK_ADDR(sbi, ni.blk_addr)) {
1608 dev_reada_block(ni.blk_addr);
1609 name_len = le16_to_cpu(dentry[i].name_len);
1611 i += (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN - 1;
1616 for (i = 0; i < max;) {
1617 if (test_bit_le(i, bitmap) == 0) {
1621 if (!IS_VALID_NID(sbi, le32_to_cpu(dentry[i].ino))) {
1622 ASSERT_MSG("Bad dentry 0x%x with invalid NID/ino 0x%x",
1623 i, le32_to_cpu(dentry[i].ino));
1625 FIX_MSG("Clear bad dentry 0x%x with bad ino 0x%x",
1626 i, le32_to_cpu(dentry[i].ino));
1627 test_and_clear_bit_le(i, bitmap);
1634 ftype = dentry[i].file_type;
1635 if ((ftype <= F2FS_FT_UNKNOWN || ftype > F2FS_FT_LAST_FILE_TYPE)) {
1636 ASSERT_MSG("Bad dentry 0x%x with unexpected ftype 0x%x",
1637 le32_to_cpu(dentry[i].ino), ftype);
1639 FIX_MSG("Clear bad dentry 0x%x with bad ftype 0x%x",
1641 test_and_clear_bit_le(i, bitmap);
1648 name_len = le16_to_cpu(dentry[i].name_len);
1650 if (name_len == 0 || name_len > F2FS_NAME_LEN) {
1651 ASSERT_MSG("Bad dentry 0x%x with invalid name_len", i);
1653 FIX_MSG("Clear bad dentry 0x%x", i);
1654 test_and_clear_bit_le(i, bitmap);
1660 name = calloc(name_len + 1, 1);
1663 memcpy(name, filenames[i], name_len);
1664 slots = (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN;
1666 /* Becareful. 'dentry.file_type' is not imode. */
1667 if (ftype == F2FS_FT_DIR) {
1668 if ((name[0] == '.' && name_len == 1) ||
1669 (name[0] == '.' && name[1] == '.' &&
1671 ret = __chk_dots_dentries(sbi, casefolded, &dentry[i],
1672 child, name, name_len, &filenames[i],
1683 if (child->dots > 2) {
1684 ASSERT_MSG("More than one '.' or '..', should delete the extra one\n");
1685 nullify_dentry(&dentry[i], i,
1686 &filenames[i], &bitmap);
1697 if (f2fs_check_hash_code(get_encoding(sbi), casefolded, dentry + i, name, name_len, enc_name))
1700 pretty_print_filename(name, name_len, en, enc_name);
1702 if (max == NR_DENTRY_IN_BLOCK) {
1703 ret = f2fs_check_dirent_position(dentry + i, en,
1704 child->pgofs, child->dir_level,
1708 FIX_MSG("Clear bad dentry 0x%x", i);
1709 test_and_clear_bit_le(i, bitmap);
1718 DBG(1, "[%3u]-[0x%x] name[%s] len[0x%x] ino[0x%x] type[0x%x]\n",
1719 fsck->dentry_depth, i, en, name_len,
1720 le32_to_cpu(dentry[i].ino),
1721 dentry[i].file_type);
1723 print_dentry(sbi, name, bitmap,
1724 dentry, max, i, last_blk, enc_name);
1728 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
1729 child->i_namelen = name_len;
1730 ret = fsck_chk_node_blk(sbi,
1731 NULL, le32_to_cpu(dentry[i].ino),
1732 ftype, TYPE_INODE, &blk_cnt, &cbc, child);
1734 if (ret && c.fix_on) {
1737 for (j = 0; j < slots; j++)
1738 test_and_clear_bit_le(i + j, bitmap);
1739 FIX_MSG("Unlink [0x%x] - %s len[0x%x], type[0x%x]",
1740 le32_to_cpu(dentry[i].ino),
1742 dentry[i].file_type);
1744 } else if (ret == 0) {
1745 if (ftype == F2FS_FT_DIR)
1754 return fixed ? -1 : dentries;
1757 int fsck_chk_inline_dentries(struct f2fs_sb_info *sbi,
1758 struct f2fs_node *node_blk, struct child_info *child)
1760 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1761 struct f2fs_dentry *cur_dentry = fsck->dentry_end;
1762 struct f2fs_dentry *new_dentry;
1763 struct f2fs_dentry_ptr d;
1764 void *inline_dentry;
1767 inline_dentry = inline_data_addr(node_blk);
1768 ASSERT(inline_dentry != NULL);
1770 make_dentry_ptr(&d, node_blk, inline_dentry, 2);
1772 fsck->dentry_depth++;
1773 new_dentry = calloc(sizeof(struct f2fs_dentry), 1);
1774 ASSERT(new_dentry != NULL);
1776 new_dentry->depth = fsck->dentry_depth;
1777 memcpy(new_dentry->name, child->p_name, F2FS_NAME_LEN);
1778 cur_dentry->next = new_dentry;
1779 fsck->dentry_end = new_dentry;
1781 dentries = __chk_dentries(sbi, IS_CASEFOLDED(&node_blk->i), child,
1782 d.bitmap, d.dentry, d.filename, d.max, 1,
1783 file_is_encrypt(&node_blk->i));// pass through
1785 DBG(1, "[%3d] Inline Dentry Block Fixed hash_codes\n\n",
1786 fsck->dentry_depth);
1788 DBG(1, "[%3d] Inline Dentry Block Done : "
1789 "dentries:%d in %d slots (len:%d)\n\n",
1790 fsck->dentry_depth, dentries,
1791 d.max, F2FS_NAME_LEN);
1793 fsck->dentry = cur_dentry;
1794 fsck->dentry_end = cur_dentry;
1795 cur_dentry->next = NULL;
1797 fsck->dentry_depth--;
1801 int fsck_chk_dentry_blk(struct f2fs_sb_info *sbi, int casefolded, u32 blk_addr,
1802 struct child_info *child, int last_blk, int enc_name)
1804 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1805 struct f2fs_dentry_block *de_blk;
1806 struct f2fs_dentry *cur_dentry = fsck->dentry_end;
1807 struct f2fs_dentry *new_dentry;
1810 de_blk = (struct f2fs_dentry_block *)calloc(BLOCK_SZ, 1);
1811 ASSERT(de_blk != NULL);
1813 ret = dev_read_block(de_blk, blk_addr);
1816 fsck->dentry_depth++;
1817 new_dentry = calloc(sizeof(struct f2fs_dentry), 1);
1818 ASSERT(new_dentry != NULL);
1819 new_dentry->depth = fsck->dentry_depth;
1820 memcpy(new_dentry->name, child->p_name, F2FS_NAME_LEN);
1821 cur_dentry->next = new_dentry;
1822 fsck->dentry_end = new_dentry;
1824 dentries = __chk_dentries(sbi, casefolded, child,
1825 de_blk->dentry_bitmap,
1826 de_blk->dentry, de_blk->filename,
1827 NR_DENTRY_IN_BLOCK, last_blk, enc_name);
1829 if (dentries < 0 && f2fs_dev_is_writable()) {
1830 ret = dev_write_block(de_blk, blk_addr);
1832 DBG(1, "[%3d] Dentry Block [0x%x] Fixed hash_codes\n\n",
1833 fsck->dentry_depth, blk_addr);
1835 DBG(1, "[%3d] Dentry Block [0x%x] Done : "
1836 "dentries:%d in %d slots (len:%d)\n\n",
1837 fsck->dentry_depth, blk_addr, dentries,
1838 NR_DENTRY_IN_BLOCK, F2FS_NAME_LEN);
1840 fsck->dentry = cur_dentry;
1841 fsck->dentry_end = cur_dentry;
1842 cur_dentry->next = NULL;
1844 fsck->dentry_depth--;
1849 int fsck_chk_data_blk(struct f2fs_sb_info *sbi, int casefolded,
1850 u32 blk_addr, struct child_info *child, int last_blk,
1851 enum FILE_TYPE ftype, u32 parent_nid, u16 idx_in_node, u8 ver,
1854 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1856 /* Is it reserved block? */
1857 if (blk_addr == NEW_ADDR) {
1858 fsck->chk.valid_blk_cnt++;
1862 if (!IS_VALID_BLK_ADDR(sbi, blk_addr)) {
1863 ASSERT_MSG("blkaddress is not valid. [0x%x]", blk_addr);
1867 if (is_valid_ssa_data_blk(sbi, blk_addr, parent_nid,
1868 idx_in_node, ver)) {
1869 ASSERT_MSG("summary data block is not valid. [0x%x]",
1874 if (f2fs_test_sit_bitmap(sbi, blk_addr) == 0)
1875 ASSERT_MSG("SIT bitmap is 0x0. blk_addr[0x%x]", blk_addr);
1877 if (f2fs_test_main_bitmap(sbi, blk_addr) != 0)
1878 ASSERT_MSG("Duplicated data [0x%x]. pnid[0x%x] idx[0x%x]",
1879 blk_addr, parent_nid, idx_in_node);
1881 fsck->chk.valid_blk_cnt++;
1883 if (ftype == F2FS_FT_DIR) {
1884 f2fs_set_main_bitmap(sbi, blk_addr, CURSEG_HOT_DATA);
1885 return fsck_chk_dentry_blk(sbi, casefolded, blk_addr, child,
1886 last_blk, enc_name);
1888 f2fs_set_main_bitmap(sbi, blk_addr, CURSEG_WARM_DATA);
1893 int fsck_chk_orphan_node(struct f2fs_sb_info *sbi)
1896 struct f2fs_compr_blk_cnt cbc = {0, CHEADER_PGOFS_NONE};
1897 block_t start_blk, orphan_blkaddr, i, j;
1898 struct f2fs_orphan_block *orphan_blk, *new_blk;
1899 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1902 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
1905 start_blk = __start_cp_addr(sbi) + 1 + get_sb(cp_payload);
1906 orphan_blkaddr = __start_sum_addr(sbi) - 1 - get_sb(cp_payload);
1908 f2fs_ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
1910 orphan_blk = calloc(BLOCK_SZ, 1);
1913 new_blk = calloc(BLOCK_SZ, 1);
1916 for (i = 0; i < orphan_blkaddr; i++) {
1917 int ret = dev_read_block(orphan_blk, start_blk + i);
1918 u32 new_entry_count = 0;
1921 entry_count = le32_to_cpu(orphan_blk->entry_count);
1923 for (j = 0; j < entry_count; j++) {
1924 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
1925 DBG(1, "[%3d] ino [0x%x]\n", i, ino);
1926 struct node_info ni;
1929 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
1931 if (c.preen_mode == PREEN_MODE_1 && !c.fix_on) {
1932 get_node_info(sbi, ino, &ni);
1933 if (!IS_VALID_NID(sbi, ino) ||
1934 !IS_VALID_BLK_ADDR(sbi, ni.blk_addr)) {
1943 ret = fsck_chk_node_blk(sbi, NULL, ino,
1944 F2FS_FT_ORPHAN, TYPE_INODE, &blk_cnt,
1947 new_blk->ino[new_entry_count++] =
1949 else if (ret && c.fix_on)
1950 FIX_MSG("[0x%x] remove from orphan list", ino);
1952 ASSERT_MSG("[0x%x] wrong orphan inode", ino);
1954 if (f2fs_dev_is_writable() && c.fix_on &&
1955 entry_count != new_entry_count) {
1956 new_blk->entry_count = cpu_to_le32(new_entry_count);
1957 ret = dev_write_block(new_blk, start_blk + i);
1960 memset(orphan_blk, 0, BLOCK_SZ);
1961 memset(new_blk, 0, BLOCK_SZ);
1969 int fsck_chk_quota_node(struct f2fs_sb_info *sbi)
1971 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1972 enum quota_type qtype;
1975 struct f2fs_compr_blk_cnt cbc = {0, CHEADER_PGOFS_NONE};
1977 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
1979 if (sb->qf_ino[qtype] == 0)
1981 nid_t ino = QUOTA_INO(sb, qtype);
1982 struct node_info ni;
1984 DBG(1, "qtype [%d] ino [0x%x]\n", qtype, ino);
1987 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
1989 if (c.preen_mode == PREEN_MODE_1 && !c.fix_on) {
1990 get_node_info(sbi, ino, &ni);
1991 if (!IS_VALID_NID(sbi, ino) ||
1992 !IS_VALID_BLK_ADDR(sbi, ni.blk_addr))
1996 ret = fsck_chk_node_blk(sbi, NULL, ino,
1997 F2FS_FT_REG_FILE, TYPE_INODE, &blk_cnt,
2000 ASSERT_MSG("wrong quota inode, qtype [%d] ino [0x%x]",
2002 qf_szchk_type[qtype] = QF_SZCHK_ERR;
2004 f2fs_rebuild_qf_inode(sbi, qtype);
2011 int fsck_chk_quota_files(struct f2fs_sb_info *sbi)
2013 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2014 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2015 enum quota_type qtype;
2020 /* Return if quota feature is disabled */
2024 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
2025 ino = sb->qf_ino[qtype];
2029 DBG(1, "Checking Quota file ([%3d] ino [0x%x])\n", qtype, ino);
2031 ret = quota_compare_and_update(sbi, qtype, &needs_writeout,
2033 if (ret == 0 && needs_writeout == 0) {
2038 /* Something is wrong */
2040 DBG(0, "Fixing Quota file ([%3d] ino [0x%x])\n",
2042 f2fs_filesize_update(sbi, ino, 0);
2043 ret = quota_write_inode(sbi, qtype);
2045 c.quota_fixed = true;
2048 ASSERT_MSG("Unable to write quota file");
2051 ASSERT_MSG("Quota file is missing or invalid"
2052 " quota file content found.");
2058 int fsck_chk_meta(struct f2fs_sb_info *sbi)
2060 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2061 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2062 struct seg_entry *se;
2063 unsigned int sit_valid_segs = 0, sit_node_blks = 0;
2066 /* 1. check sit usage with CP: curseg is lost? */
2067 for (i = 0; i < MAIN_SEGS(sbi); i++) {
2068 se = get_seg_entry(sbi, i);
2069 if (se->valid_blocks != 0)
2071 else if (IS_CUR_SEGNO(sbi, i)) {
2072 /* curseg has not been written back to device */
2073 MSG(1, "\tInfo: curseg %u is counted in valid segs\n", i);
2076 if (IS_NODESEG(se->type))
2077 sit_node_blks += se->valid_blocks;
2079 if (fsck->chk.sit_free_segs + sit_valid_segs !=
2080 get_usable_seg_count(sbi)) {
2081 ASSERT_MSG("SIT usage does not match: sit_free_segs %u, "
2082 "sit_valid_segs %u, total_segs %u",
2083 fsck->chk.sit_free_segs, sit_valid_segs,
2084 get_usable_seg_count(sbi));
2088 /* 2. check node count */
2089 if (fsck->chk.valid_nat_entry_cnt != sit_node_blks) {
2090 ASSERT_MSG("node count does not match: valid_nat_entry_cnt %u,"
2091 " sit_node_blks %u",
2092 fsck->chk.valid_nat_entry_cnt, sit_node_blks);
2096 /* 3. check SIT with CP */
2097 if (fsck->chk.sit_free_segs != le32_to_cpu(cp->free_segment_count)) {
2098 ASSERT_MSG("free segs does not match: sit_free_segs %u, "
2099 "free_segment_count %u",
2100 fsck->chk.sit_free_segs,
2101 le32_to_cpu(cp->free_segment_count));
2105 /* 4. check NAT with CP */
2106 if (fsck->chk.valid_nat_entry_cnt !=
2107 le32_to_cpu(cp->valid_node_count)) {
2108 ASSERT_MSG("valid node does not match: valid_nat_entry_cnt %u,"
2109 " valid_node_count %u",
2110 fsck->chk.valid_nat_entry_cnt,
2111 le32_to_cpu(cp->valid_node_count));
2115 /* 4. check orphan inode simply */
2116 if (fsck_chk_orphan_node(sbi))
2119 /* 5. check nat entry -- must be done before quota check */
2120 for (i = 0; i < fsck->nr_nat_entries; i++) {
2121 u32 blk = le32_to_cpu(fsck->entries[i].block_addr);
2122 nid_t ino = le32_to_cpu(fsck->entries[i].ino);
2126 * skip entry whose ino is 0, otherwise, we will
2127 * get a negative number by BLKOFF_FROM_MAIN(sbi, blk)
2131 if (!IS_VALID_BLK_ADDR(sbi, blk)) {
2132 MSG(0, "\tError: nat entry[ino %u block_addr 0x%x]"
2138 if (!f2fs_test_sit_bitmap(sbi, blk)) {
2139 MSG(0, "\tError: nat entry[ino %u block_addr 0x%x]"
2140 " not find it in sit_area_bitmap\n",
2145 if (!IS_VALID_NID(sbi, ino)) {
2146 MSG(0, "\tError: nat_entry->ino %u exceeds the range"
2147 " of nat entries %u\n",
2148 ino, fsck->nr_nat_entries);
2152 if (!f2fs_test_bit(ino, fsck->nat_area_bitmap)) {
2153 MSG(0, "\tError: nat_entry->ino %u is not set in"
2154 " nat_area_bitmap\n", ino);
2159 /* 6. check quota inode simply */
2160 if (fsck_chk_quota_node(sbi))
2163 if (fsck->nat_valid_inode_cnt != le32_to_cpu(cp->valid_inode_count)) {
2164 ASSERT_MSG("valid inode does not match: nat_valid_inode_cnt %u,"
2165 " valid_inode_count %u",
2166 fsck->nat_valid_inode_cnt,
2167 le32_to_cpu(cp->valid_inode_count));
2174 void fsck_chk_checkpoint(struct f2fs_sb_info *sbi)
2176 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2178 if (get_cp(ckpt_flags) & CP_LARGE_NAT_BITMAP_FLAG) {
2179 if (get_cp(checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
2180 ASSERT_MSG("Deprecated layout of large_nat_bitmap, "
2181 "chksum_offset:%u", get_cp(checksum_offset));
2187 void fsck_init(struct f2fs_sb_info *sbi)
2189 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2190 struct f2fs_sm_info *sm_i = SM_I(sbi);
2193 * We build three bitmap for main/sit/nat so that may check consistency
2195 * 1. main_area_bitmap will be used to check whether all blocks of main
2196 * area is used or not.
2197 * 2. nat_area_bitmap has bitmap information of used nid in NAT.
2198 * 3. sit_area_bitmap has bitmap information of used main block.
2199 * At Last sequence, we compare main_area_bitmap with sit_area_bitmap.
2201 fsck->nr_main_blks = sm_i->main_segments << sbi->log_blocks_per_seg;
2202 fsck->main_area_bitmap_sz = (fsck->nr_main_blks + 7) / 8;
2203 fsck->main_area_bitmap = calloc(fsck->main_area_bitmap_sz, 1);
2204 ASSERT(fsck->main_area_bitmap != NULL);
2206 build_nat_area_bitmap(sbi);
2208 build_sit_area_bitmap(sbi);
2210 ASSERT(tree_mark_size != 0);
2211 tree_mark = calloc(tree_mark_size, 1);
2212 ASSERT(tree_mark != NULL);
2213 fsck->dentry = calloc(sizeof(struct f2fs_dentry), 1);
2214 ASSERT(fsck->dentry != NULL);
2215 memcpy(fsck->dentry->name, "/", 1);
2216 fsck->dentry_end = fsck->dentry;
2218 c.quota_fixed = false;
2221 static void fix_hard_links(struct f2fs_sb_info *sbi)
2223 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2224 struct hard_link_node *tmp, *node;
2225 struct f2fs_node *node_blk = NULL;
2226 struct node_info ni;
2229 if (fsck->hard_link_list_head == NULL)
2232 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
2233 ASSERT(node_blk != NULL);
2235 node = fsck->hard_link_list_head;
2238 if (sanity_check_nid(sbi, node->nid, node_blk,
2239 F2FS_FT_MAX, TYPE_INODE, &ni))
2240 FIX_MSG("Failed to fix, rerun fsck.f2fs");
2242 node_blk->i.i_links = cpu_to_le32(node->actual_links);
2244 FIX_MSG("File: 0x%x i_links= 0x%x -> 0x%x",
2245 node->nid, node->links, node->actual_links);
2247 ret = dev_write_block(node_blk, ni.blk_addr);
2256 static void fix_nat_entries(struct f2fs_sb_info *sbi)
2258 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2261 for (i = 0; i < fsck->nr_nat_entries; i++)
2262 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0)
2263 nullify_nat_entry(sbi, i);
2266 static void flush_curseg_sit_entries(struct f2fs_sb_info *sbi)
2268 struct sit_info *sit_i = SIT_I(sbi);
2269 struct f2fs_sit_block *sit_blk;
2272 sit_blk = calloc(BLOCK_SZ, 1);
2274 /* update curseg sit entries, since we may change
2275 * a segment type in move_curseg_info
2277 for (i = 0; i < NO_CHECK_TYPE; i++) {
2278 struct curseg_info *curseg = CURSEG_I(sbi, i);
2279 struct f2fs_sit_entry *sit;
2280 struct seg_entry *se;
2282 se = get_seg_entry(sbi, curseg->segno);
2283 get_current_sit_page(sbi, curseg->segno, sit_blk);
2284 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, curseg->segno)];
2285 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2287 rewrite_current_sit_page(sbi, curseg->segno, sit_blk);
2293 static void fix_checksum(struct f2fs_sb_info *sbi)
2295 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2296 struct f2fs_nm_info *nm_i = NM_I(sbi);
2297 struct sit_info *sit_i = SIT_I(sbi);
2298 void *bitmap_offset;
2303 bitmap_offset = cp->sit_nat_version_bitmap + sizeof(__le32);
2305 memcpy(bitmap_offset, nm_i->nat_bitmap, nm_i->bitmap_size);
2306 memcpy(bitmap_offset + nm_i->bitmap_size,
2307 sit_i->sit_bitmap, sit_i->bitmap_size);
2310 static void fix_checkpoint(struct f2fs_sb_info *sbi)
2312 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2313 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2314 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2315 unsigned long long cp_blk_no;
2316 u32 flags = c.alloc_failed ? CP_FSCK_FLAG: CP_UMOUNT_FLAG;
2317 block_t orphan_blks = 0;
2323 /* should call from fsck */
2324 ASSERT(c.func == FSCK);
2326 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
2327 orphan_blks = __start_sum_addr(sbi) - 1;
2328 flags |= CP_ORPHAN_PRESENT_FLAG;
2330 if (is_set_ckpt_flags(cp, CP_TRIMMED_FLAG))
2331 flags |= CP_TRIMMED_FLAG;
2332 if (is_set_ckpt_flags(cp, CP_DISABLED_FLAG))
2333 flags |= CP_DISABLED_FLAG;
2334 if (is_set_ckpt_flags(cp, CP_LARGE_NAT_BITMAP_FLAG)) {
2335 flags |= CP_LARGE_NAT_BITMAP_FLAG;
2336 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
2338 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
2341 if (flags & CP_UMOUNT_FLAG)
2346 set_cp(cp_pack_total_block_count, cp_blocks +
2347 orphan_blks + get_sb(cp_payload));
2349 flags = update_nat_bits_flags(sb, cp, flags);
2350 flags |= CP_NOCRC_RECOVERY_FLAG;
2351 set_cp(ckpt_flags, flags);
2353 set_cp(free_segment_count, get_free_segments(sbi));
2354 set_cp(valid_block_count, fsck->chk.valid_blk_cnt);
2355 set_cp(valid_node_count, fsck->chk.valid_node_cnt);
2356 set_cp(valid_inode_count, fsck->chk.valid_inode_cnt);
2358 crc = f2fs_checkpoint_chksum(cp);
2359 *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
2362 cp_blk_no = get_sb(cp_blkaddr);
2363 if (sbi->cur_cp == 2)
2364 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
2366 ret = dev_write_block(cp, cp_blk_no++);
2369 for (i = 0; i < get_sb(cp_payload); i++) {
2370 ret = dev_write_block(((unsigned char *)cp) +
2371 (i + 1) * F2FS_BLKSIZE, cp_blk_no++);
2375 cp_blk_no += orphan_blks;
2377 for (i = 0; i < NO_CHECK_TYPE; i++) {
2378 struct curseg_info *curseg = CURSEG_I(sbi, i);
2380 if (!(flags & CP_UMOUNT_FLAG) && IS_NODESEG(i))
2383 ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
2387 /* Write nat bits */
2388 if (flags & CP_NAT_BITS_FLAG)
2389 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
2391 ret = f2fs_fsync_device();
2394 ret = dev_write_block(cp, cp_blk_no++);
2397 ret = f2fs_fsync_device();
2401 static void fix_checkpoints(struct f2fs_sb_info *sbi)
2403 /* copy valid checkpoint to its mirror position */
2404 duplicate_checkpoint(sbi);
2406 /* repair checkpoint at CP #0 position */
2408 fix_checkpoint(sbi);
2411 #ifdef HAVE_LINUX_BLKZONED_H
2414 * Refer valid block map and return offset of the last valid block in the zone.
2415 * Obtain valid block map from SIT and fsync data.
2416 * If there is no valid block in the zone, return -1.
2418 static int last_vblk_off_in_zone(struct f2fs_sb_info *sbi,
2419 unsigned int zone_segno)
2422 unsigned int segs_per_zone = sbi->segs_per_sec * sbi->secs_per_zone;
2423 struct seg_entry *se;
2425 for (s = segs_per_zone - 1; s >= 0; s--) {
2426 se = get_seg_entry(sbi, zone_segno + s);
2429 * Refer not cur_valid_map but ckpt_valid_map which reflects
2432 ASSERT(se->ckpt_valid_map);
2433 for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
2434 if (f2fs_test_bit(b, (const char*)se->ckpt_valid_map))
2435 return b + (s << sbi->log_blocks_per_seg);
2441 static int check_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
2443 struct curseg_info *curseg = CURSEG_I(sbi, type);
2444 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2445 struct blk_zone blkz;
2446 block_t cs_block, wp_block, zone_last_vblock;
2447 uint64_t cs_sector, wp_sector;
2449 unsigned int zone_segno;
2450 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
2452 /* get the device the curseg points to */
2453 cs_block = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
2454 for (i = 0; i < MAX_DEVICES; i++) {
2455 if (!c.devices[i].path)
2457 if (c.devices[i].start_blkaddr <= cs_block &&
2458 cs_block <= c.devices[i].end_blkaddr)
2462 if (i >= MAX_DEVICES)
2465 /* get write pointer position of the zone the curseg points to */
2466 cs_sector = (cs_block - c.devices[i].start_blkaddr)
2467 << log_sectors_per_block;
2468 ret = f2fs_report_zone(i, cs_sector, &blkz);
2472 if (blk_zone_type(&blkz) != BLK_ZONE_TYPE_SEQWRITE_REQ)
2475 /* check consistency between the curseg and the write pointer */
2476 wp_block = c.devices[i].start_blkaddr +
2477 (blk_zone_wp_sector(&blkz) >> log_sectors_per_block);
2478 wp_sector = blk_zone_wp_sector(&blkz);
2480 if (cs_sector == wp_sector)
2483 if (cs_sector > wp_sector) {
2484 MSG(0, "Inconsistent write pointer with curseg %d: "
2485 "curseg %d[0x%x,0x%x] > wp[0x%x,0x%x]\n",
2486 type, type, curseg->segno, curseg->next_blkoff,
2487 GET_SEGNO(sbi, wp_block), OFFSET_IN_SEG(sbi, wp_block));
2488 fsck->chk.wp_inconsistent_zones++;
2492 MSG(0, "Write pointer goes advance from curseg %d: "
2493 "curseg %d[0x%x,0x%x] wp[0x%x,0x%x]\n",
2494 type, type, curseg->segno, curseg->next_blkoff,
2495 GET_SEGNO(sbi, wp_block), OFFSET_IN_SEG(sbi, wp_block));
2497 zone_segno = GET_SEG_FROM_SEC(sbi,
2498 GET_SEC_FROM_SEG(sbi, curseg->segno));
2499 zone_last_vblock = START_BLOCK(sbi, zone_segno) +
2500 last_vblk_off_in_zone(sbi, zone_segno);
2503 * If valid blocks exist between the curseg position and the write
2504 * pointer, they are fsync data. This is not an error to fix. Leave it
2505 * for kernel to recover later.
2506 * If valid blocks exist between the curseg's zone start and the curseg
2507 * position, or if there is no valid block in the curseg's zone, fix
2508 * the inconsistency between the curseg and the writ pointer.
2509 * Of Note is that if there is no valid block in the curseg's zone,
2510 * last_vblk_off_in_zone() returns -1 and zone_last_vblock is always
2511 * smaller than cs_block.
2513 if (cs_block <= zone_last_vblock && zone_last_vblock < wp_block) {
2514 MSG(0, "Curseg has fsync data: curseg %d[0x%x,0x%x] "
2515 "last valid block in zone[0x%x,0x%x]\n",
2516 type, curseg->segno, curseg->next_blkoff,
2517 GET_SEGNO(sbi, zone_last_vblock),
2518 OFFSET_IN_SEG(sbi, zone_last_vblock));
2522 fsck->chk.wp_inconsistent_zones++;
2528 static int check_curseg_write_pointer(struct f2fs_sb_info *UNUSED(sbi),
2536 int check_curseg_offset(struct f2fs_sb_info *sbi, int type)
2538 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2539 struct curseg_info *curseg = CURSEG_I(sbi, type);
2540 struct seg_entry *se;
2543 if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO) &&
2544 type != CURSEG_HOT_DATA && type != CURSEG_HOT_NODE)
2547 if ((curseg->next_blkoff >> 3) >= SIT_VBLOCK_MAP_SIZE) {
2548 ASSERT_MSG("Next block offset:%u is invalid, type:%d",
2549 curseg->next_blkoff, type);
2552 se = get_seg_entry(sbi, curseg->segno);
2553 if (f2fs_test_bit(curseg->next_blkoff,
2554 (const char *)se->cur_valid_map)) {
2555 ASSERT_MSG("Next block offset is not free, type:%d", type);
2558 if (curseg->alloc_type == SSR)
2561 nblocks = sbi->blocks_per_seg;
2562 for (j = curseg->next_blkoff + 1; j < nblocks; j++) {
2563 if (f2fs_test_bit(j, (const char *)se->cur_valid_map)) {
2564 ASSERT_MSG("For LFS curseg, space after .next_blkoff "
2565 "should be unused, type:%d", type);
2570 if (c.zoned_model == F2FS_ZONED_HM)
2571 return check_curseg_write_pointer(sbi, type);
2576 int check_curseg_offsets(struct f2fs_sb_info *sbi)
2580 for (i = 0; i < NO_CHECK_TYPE; i++) {
2581 ret = check_curseg_offset(sbi, i);
2588 static void fix_curseg_info(struct f2fs_sb_info *sbi)
2590 int i, need_update = 0;
2592 for (i = 0; i < NO_CHECK_TYPE; i++) {
2593 if (check_curseg_offset(sbi, i)) {
2594 update_curseg_info(sbi, i);
2600 write_curseg_info(sbi);
2601 flush_curseg_sit_entries(sbi);
2605 int check_sit_types(struct f2fs_sb_info *sbi)
2610 for (i = 0; i < MAIN_SEGS(sbi); i++) {
2611 struct seg_entry *se;
2613 se = get_seg_entry(sbi, i);
2614 if (se->orig_type != se->type) {
2615 if (se->orig_type == CURSEG_COLD_DATA &&
2616 se->type <= CURSEG_COLD_DATA) {
2617 se->type = se->orig_type;
2619 FIX_MSG("Wrong segment type [0x%x] %x -> %x",
2620 i, se->orig_type, se->type);
2628 static struct f2fs_node *fsck_get_lpf(struct f2fs_sb_info *sbi)
2630 struct f2fs_node *node;
2631 struct node_info ni;
2635 /* read root inode first */
2636 node = calloc(F2FS_BLKSIZE, 1);
2638 get_node_info(sbi, F2FS_ROOT_INO(sbi), &ni);
2639 err = dev_read_block(node, ni.blk_addr);
2642 /* lookup lost+found in root directory */
2643 lpf_ino = f2fs_lookup(sbi, node, (u8 *)LPF, strlen(LPF));
2644 if (lpf_ino) { /* found */
2645 get_node_info(sbi, lpf_ino, &ni);
2646 err = dev_read_block(node, ni.blk_addr);
2648 DBG(1, "Found lost+found 0x%x at blkaddr [0x%x]\n",
2649 lpf_ino, ni.blk_addr);
2650 if (!S_ISDIR(le16_to_cpu(node->i.i_mode))) {
2651 ASSERT_MSG("lost+found is not directory [0%o]\n",
2652 le16_to_cpu(node->i.i_mode));
2653 /* FIXME: give up? */
2656 } else { /* not found, create it */
2659 memset(&de, 0, sizeof(de));
2660 de.name = (u8 *) LPF;
2661 de.len = strlen(LPF);
2663 de.pino = F2FS_ROOT_INO(sbi),
2664 de.file_type = F2FS_FT_DIR,
2667 de.mtime = time(NULL);
2669 err = f2fs_mkdir(sbi, &de);
2671 ASSERT_MSG("Failed create lost+found");
2675 get_node_info(sbi, de.ino, &ni);
2676 err = dev_read_block(node, ni.blk_addr);
2678 DBG(1, "Create lost+found 0x%x at blkaddr [0x%x]\n",
2679 de.ino, ni.blk_addr);
2682 c.lpf_ino = le32_to_cpu(node->footer.ino);
2689 static int fsck_do_reconnect_file(struct f2fs_sb_info *sbi,
2690 struct f2fs_node *lpf,
2691 struct f2fs_node *fnode)
2695 nid_t ino = le32_to_cpu(fnode->footer.ino);
2696 struct node_info ni;
2699 namelen = snprintf(name, 80, "%u", ino);
2701 /* ignore terminating '\0', should never happen */
2704 if (f2fs_lookup(sbi, lpf, (u8 *)name, namelen)) {
2705 ASSERT_MSG("Name %s already exist in lost+found", name);
2709 get_node_info(sbi, le32_to_cpu(lpf->footer.ino), &ni);
2710 ftype = map_de_type(le16_to_cpu(fnode->i.i_mode));
2711 ret = f2fs_add_link(sbi, lpf, (unsigned char *)name, namelen,
2712 ino, ftype, ni.blk_addr, 0);
2714 ASSERT_MSG("Failed to add inode [0x%x] to lost+found", ino);
2719 memcpy(fnode->i.i_name, name, namelen);
2720 fnode->i.i_namelen = cpu_to_le32(namelen);
2721 fnode->i.i_pino = c.lpf_ino;
2722 get_node_info(sbi, le32_to_cpu(fnode->footer.ino), &ni);
2723 ret = dev_write_block(fnode, ni.blk_addr);
2726 DBG(1, "Reconnect inode [0x%x] to lost+found\n", ino);
2730 static void fsck_failed_reconnect_file_dnode(struct f2fs_sb_info *sbi,
2733 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2734 struct f2fs_node *node;
2735 struct node_info ni;
2739 node = calloc(F2FS_BLKSIZE, 1);
2742 get_node_info(sbi, nid, &ni);
2743 err = dev_read_block(node, ni.blk_addr);
2746 fsck->chk.valid_node_cnt--;
2747 fsck->chk.valid_blk_cnt--;
2748 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2750 for (i = 0; i < ADDRS_PER_BLOCK(&node->i); i++) {
2751 addr = le32_to_cpu(node->dn.addr[i]);
2754 fsck->chk.valid_blk_cnt--;
2755 if (addr == NEW_ADDR)
2757 f2fs_clear_main_bitmap(sbi, addr);
2763 static void fsck_failed_reconnect_file_idnode(struct f2fs_sb_info *sbi,
2766 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2767 struct f2fs_node *node;
2768 struct node_info ni;
2772 node = calloc(F2FS_BLKSIZE, 1);
2775 get_node_info(sbi, nid, &ni);
2776 err = dev_read_block(node, ni.blk_addr);
2779 fsck->chk.valid_node_cnt--;
2780 fsck->chk.valid_blk_cnt--;
2781 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2783 for (i = 0; i < NIDS_PER_BLOCK; i++) {
2784 tmp = le32_to_cpu(node->in.nid[i]);
2787 fsck_failed_reconnect_file_dnode(sbi, tmp);
2793 static void fsck_failed_reconnect_file_didnode(struct f2fs_sb_info *sbi,
2796 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2797 struct f2fs_node *node;
2798 struct node_info ni;
2802 node = calloc(F2FS_BLKSIZE, 1);
2805 get_node_info(sbi, nid, &ni);
2806 err = dev_read_block(node, ni.blk_addr);
2809 fsck->chk.valid_node_cnt--;
2810 fsck->chk.valid_blk_cnt--;
2811 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2813 for (i = 0; i < NIDS_PER_BLOCK; i++) {
2814 tmp = le32_to_cpu(node->in.nid[i]);
2817 fsck_failed_reconnect_file_idnode(sbi, tmp);
2824 * Counters and main_area_bitmap are already changed during checking
2825 * inode block, so clear them. There is no need to clear new blocks
2826 * allocted to lost+found.
2828 static void fsck_failed_reconnect_file(struct f2fs_sb_info *sbi, nid_t ino)
2830 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2831 struct f2fs_node *node;
2832 struct node_info ni;
2836 node = calloc(F2FS_BLKSIZE, 1);
2839 get_node_info(sbi, ino, &ni);
2840 err = dev_read_block(node, ni.blk_addr);
2843 /* clear inode counters */
2844 fsck->chk.valid_inode_cnt--;
2845 fsck->chk.valid_node_cnt--;
2846 fsck->chk.valid_blk_cnt--;
2847 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2849 /* clear xnid counters */
2850 if (node->i.i_xattr_nid) {
2851 nid = le32_to_cpu(node->i.i_xattr_nid);
2852 fsck->chk.valid_node_cnt--;
2853 fsck->chk.valid_blk_cnt--;
2854 get_node_info(sbi, nid, &ni);
2855 f2fs_clear_main_bitmap(sbi, ni.blk_addr);
2858 /* clear data counters */
2859 if(!(node->i.i_inline & F2FS_INLINE_DATA)) {
2860 ofs = get_extra_isize(node);
2861 for (i = 0; i < ADDRS_PER_INODE(&node->i); i++) {
2862 block_t addr = le32_to_cpu(node->i.i_addr[ofs + i]);
2865 fsck->chk.valid_blk_cnt--;
2866 if (addr == NEW_ADDR)
2868 f2fs_clear_main_bitmap(sbi, addr);
2872 for (i = 0; i < 5; i++) {
2873 nid = le32_to_cpu(node->i.i_nid[i]);
2878 case 0: /* direct node */
2880 fsck_failed_reconnect_file_dnode(sbi, nid);
2882 case 2: /* indirect node */
2884 fsck_failed_reconnect_file_idnode(sbi, nid);
2886 case 4: /* double indirect node */
2887 fsck_failed_reconnect_file_didnode(sbi, nid);
2896 * Scan unreachable nids and find only regular file inodes. If these files
2897 * are not corrupted, reconnect them to lost+found.
2899 * Since all unreachable nodes are already checked, we can allocate new
2902 * This function returns the number of files been reconnected.
2904 static int fsck_reconnect_file(struct f2fs_sb_info *sbi)
2906 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2907 struct f2fs_node *lpf_node, *node;
2908 struct node_info ni;
2909 char *reconnect_bitmap;
2911 struct f2fs_compr_blk_cnt cbc;
2913 int err, cnt = 0, ftype;
2915 node = calloc(F2FS_BLKSIZE, 1);
2918 reconnect_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
2919 ASSERT(reconnect_bitmap);
2921 for (nid = 0; nid < fsck->nr_nat_entries; nid++) {
2922 if (f2fs_test_bit(nid, fsck->nat_area_bitmap)) {
2923 if (is_qf_ino(F2FS_RAW_SUPER(sbi), nid)) {
2924 DBG(1, "Not support quota inode [0x%x]\n",
2929 get_node_info(sbi, nid, &ni);
2930 err = dev_read_block(node, ni.blk_addr);
2933 /* reconnection will restore these nodes if needed */
2934 if (node->footer.ino != node->footer.nid) {
2935 DBG(1, "Not support non-inode node [0x%x]\n",
2940 if (S_ISDIR(le16_to_cpu(node->i.i_mode))) {
2941 DBG(1, "Not support directory inode [0x%x]\n",
2946 ftype = map_de_type(le16_to_cpu(node->i.i_mode));
2947 if (sanity_check_nid(sbi, nid, node, ftype,
2949 ASSERT_MSG("Invalid nid [0x%x]\n", nid);
2953 DBG(1, "Check inode 0x%x\n", nid);
2956 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
2957 fsck_chk_inode_blk(sbi, nid, ftype, node,
2958 &blk_cnt, &cbc, &ni, NULL);
2960 f2fs_set_bit(nid, reconnect_bitmap);
2964 lpf_node = fsck_get_lpf(sbi);
2968 for (nid = 0; nid < fsck->nr_nat_entries; nid++) {
2969 if (f2fs_test_bit(nid, reconnect_bitmap)) {
2970 get_node_info(sbi, nid, &ni);
2971 err = dev_read_block(node, ni.blk_addr);
2974 if (fsck_do_reconnect_file(sbi, lpf_node, node)) {
2975 DBG(1, "Failed to reconnect inode [0x%x]\n",
2977 fsck_failed_reconnect_file(sbi, nid);
2981 quota_add_inode_usage(fsck->qctx, nid, &node->i);
2983 DBG(1, "Reconnected inode [0x%x] to lost+found\n", nid);
2991 free(reconnect_bitmap);
2995 #ifdef HAVE_LINUX_BLKZONED_H
2997 struct write_pointer_check_data {
2998 struct f2fs_sb_info *sbi;
3002 static int chk_and_fix_wp_with_sit(int UNUSED(i), void *blkzone, void *opaque)
3004 struct blk_zone *blkz = (struct blk_zone *)blkzone;
3005 struct write_pointer_check_data *wpd = opaque;
3006 struct f2fs_sb_info *sbi = wpd->sbi;
3007 struct device_info *dev = c.devices + wpd->dev_index;
3008 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3009 block_t zone_block, wp_block, wp_blkoff;
3010 unsigned int zone_segno, wp_segno;
3011 struct curseg_info *cs;
3012 int cs_index, ret, last_valid_blkoff;
3013 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
3014 unsigned int segs_per_zone = sbi->segs_per_sec * sbi->secs_per_zone;
3016 if (blk_zone_conv(blkz))
3019 zone_block = dev->start_blkaddr
3020 + (blk_zone_sector(blkz) >> log_sectors_per_block);
3021 zone_segno = GET_SEGNO(sbi, zone_block);
3022 if (zone_segno >= MAIN_SEGS(sbi))
3025 wp_block = dev->start_blkaddr
3026 + (blk_zone_wp_sector(blkz) >> log_sectors_per_block);
3027 wp_segno = GET_SEGNO(sbi, wp_block);
3028 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
3030 /* if a curseg points to the zone, skip the check */
3031 for (cs_index = 0; cs_index < NO_CHECK_TYPE; cs_index++) {
3032 cs = &SM_I(sbi)->curseg_array[cs_index];
3033 if (zone_segno <= cs->segno &&
3034 cs->segno < zone_segno + segs_per_zone)
3038 last_valid_blkoff = last_vblk_off_in_zone(sbi, zone_segno);
3041 * When there is no valid block in the zone, check write pointer is
3042 * at zone start. If not, reset the write pointer.
3044 if (last_valid_blkoff < 0 &&
3045 blk_zone_wp_sector(blkz) != blk_zone_sector(blkz)) {
3047 MSG(0, "Inconsistent write pointer: wp[0x%x,0x%x]\n",
3048 wp_segno, wp_blkoff);
3049 fsck->chk.wp_inconsistent_zones++;
3053 FIX_MSG("Reset write pointer of zone at segment 0x%x",
3055 ret = f2fs_reset_zone(wpd->dev_index, blkz);
3057 printf("[FSCK] Write pointer reset failed: %s\n",
3061 fsck->chk.wp_fixed = 1;
3066 * If valid blocks exist in the zone beyond the write pointer, it
3067 * is a bug. No need to fix because the zone is not selected for the
3068 * write. Just report it.
3070 if (last_valid_blkoff + zone_block > wp_block) {
3071 MSG(0, "Unexpected invalid write pointer: wp[0x%x,0x%x]\n",
3072 wp_segno, wp_blkoff);
3079 static void fix_wp_sit_alignment(struct f2fs_sb_info *sbi)
3082 struct write_pointer_check_data wpd = { sbi, 0 };
3084 if (c.zoned_model != F2FS_ZONED_HM)
3087 for (i = 0; i < MAX_DEVICES; i++) {
3088 if (!c.devices[i].path)
3090 if (c.devices[i].zoned_model != F2FS_ZONED_HM)
3094 if (f2fs_report_zones(i, chk_and_fix_wp_with_sit, &wpd)) {
3095 printf("[FSCK] Write pointer check failed: %s\n",
3104 static void fix_wp_sit_alignment(struct f2fs_sb_info *UNUSED(sbi))
3112 * Check and fix consistency with write pointers at the beginning of
3113 * fsck so that following writes by fsck do not fail.
3115 void fsck_chk_and_fix_write_pointers(struct f2fs_sb_info *sbi)
3117 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3119 if (c.zoned_model != F2FS_ZONED_HM)
3122 if (check_curseg_offsets(sbi) && c.fix_on) {
3123 fix_curseg_info(sbi);
3124 fsck->chk.wp_fixed = 1;
3127 fix_wp_sit_alignment(sbi);
3130 int fsck_chk_curseg_info(struct f2fs_sb_info *sbi)
3132 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3133 struct curseg_info *curseg;
3134 struct seg_entry *se;
3135 struct f2fs_summary_block *sum_blk;
3138 for (i = 0; i < NO_CHECK_TYPE; i++) {
3139 curseg = CURSEG_I(sbi, i);
3140 se = get_seg_entry(sbi, curseg->segno);
3141 sum_blk = curseg->sum_blk;
3143 if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
3144 (i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE))
3147 if (se->type != i) {
3148 ASSERT_MSG("Incorrect curseg [%d]: segno [0x%x] "
3149 "type(SIT) [%d]", i, curseg->segno,
3151 if (c.fix_on || c.preen_mode)
3155 if (i <= CURSEG_COLD_DATA && IS_SUM_DATA_SEG(sum_blk->footer)) {
3157 } else if (i > CURSEG_COLD_DATA && IS_SUM_NODE_SEG(sum_blk->footer)) {
3160 ASSERT_MSG("Incorrect curseg [%d]: segno [0x%x] "
3161 "type(SSA) [%d]", i, curseg->segno,
3162 sum_blk->footer.entry_type);
3163 if (c.fix_on || c.preen_mode)
3164 sum_blk->footer.entry_type =
3165 i <= CURSEG_COLD_DATA ?
3166 SUM_TYPE_DATA : SUM_TYPE_NODE;
3174 int fsck_verify(struct f2fs_sb_info *sbi)
3179 u32 nr_unref_nid = 0;
3180 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3181 struct hard_link_node *node = NULL;
3182 bool verify_failed = false;
3183 uint64_t max_blks, data_secs, node_secs, free_blks;
3185 if (c.show_file_map)
3190 if (c.zoned_model == F2FS_ZONED_HM) {
3191 printf("[FSCK] Write pointers consistency ");
3192 if (fsck->chk.wp_inconsistent_zones == 0x0) {
3193 printf(" [Ok..]\n");
3195 printf(" [Fail] [0x%x]\n",
3196 fsck->chk.wp_inconsistent_zones);
3197 verify_failed = true;
3200 if (fsck->chk.wp_fixed && c.fix_on)
3204 if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
3205 for (i = 0; i < fsck->nr_nat_entries; i++)
3206 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0)
3208 if (i < fsck->nr_nat_entries) {
3209 i = fsck_reconnect_file(sbi);
3210 printf("[FSCK] Reconnect %u files to lost+found\n", i);
3214 for (i = 0; i < fsck->nr_nat_entries; i++) {
3215 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0) {
3216 struct node_info ni;
3218 get_node_info(sbi, i, &ni);
3219 printf("NID[0x%x] is unreachable, blkaddr:0x%x\n",
3225 if (fsck->hard_link_list_head != NULL) {
3226 node = fsck->hard_link_list_head;
3228 printf("NID[0x%x] has [0x%x] more unreachable links\n",
3229 node->nid, node->links);
3235 data_secs = round_up(sbi->total_valid_node_count, BLKS_PER_SEC(sbi));
3236 node_secs = round_up(sbi->total_valid_block_count -
3237 sbi->total_valid_node_count, BLKS_PER_SEC(sbi));
3238 free_blks = (sbi->total_sections - data_secs - node_secs) *
3240 max_blks = SM_I(sbi)->main_blkaddr + (data_secs + node_secs) *
3242 printf("[FSCK] Max image size: %"PRIu64" MB, Free space: %"PRIu64" MB\n",
3243 max_blks >> 8, free_blks >> 8);
3244 printf("[FSCK] Unreachable nat entries ");
3245 if (nr_unref_nid == 0x0) {
3246 printf(" [Ok..] [0x%x]\n", nr_unref_nid);
3248 printf(" [Fail] [0x%x]\n", nr_unref_nid);
3249 verify_failed = true;
3252 printf("[FSCK] SIT valid block bitmap checking ");
3253 if (memcmp(fsck->sit_area_bitmap, fsck->main_area_bitmap,
3254 fsck->sit_area_bitmap_sz) == 0x0) {
3258 verify_failed = true;
3261 printf("[FSCK] Hard link checking for regular file ");
3262 if (fsck->hard_link_list_head == NULL) {
3263 printf(" [Ok..] [0x%x]\n", fsck->chk.multi_hard_link_files);
3265 printf(" [Fail] [0x%x]\n", fsck->chk.multi_hard_link_files);
3266 verify_failed = true;
3269 printf("[FSCK] valid_block_count matching with CP ");
3270 if (sbi->total_valid_block_count == fsck->chk.valid_blk_cnt) {
3271 printf(" [Ok..] [0x%x]\n", (u32)fsck->chk.valid_blk_cnt);
3273 printf(" [Fail] [0x%x]\n", (u32)fsck->chk.valid_blk_cnt);
3274 verify_failed = true;
3277 printf("[FSCK] valid_node_count matching with CP (de lookup) ");
3278 if (sbi->total_valid_node_count == fsck->chk.valid_node_cnt) {
3279 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_node_cnt);
3281 printf(" [Fail] [0x%x]\n", fsck->chk.valid_node_cnt);
3282 verify_failed = true;
3285 printf("[FSCK] valid_node_count matching with CP (nat lookup)");
3286 if (sbi->total_valid_node_count == fsck->chk.valid_nat_entry_cnt) {
3287 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_nat_entry_cnt);
3289 printf(" [Fail] [0x%x]\n", fsck->chk.valid_nat_entry_cnt);
3290 verify_failed = true;
3293 printf("[FSCK] valid_inode_count matched with CP ");
3294 if (sbi->total_valid_inode_count == fsck->chk.valid_inode_cnt) {
3295 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_inode_cnt);
3297 printf(" [Fail] [0x%x]\n", fsck->chk.valid_inode_cnt);
3298 verify_failed = true;
3301 printf("[FSCK] free segment_count matched with CP ");
3302 if (le32_to_cpu(F2FS_CKPT(sbi)->free_segment_count) ==
3303 fsck->chk.sit_free_segs) {
3304 printf(" [Ok..] [0x%x]\n", fsck->chk.sit_free_segs);
3306 printf(" [Fail] [0x%x]\n", fsck->chk.sit_free_segs);
3307 verify_failed = true;
3310 printf("[FSCK] next block offset is free ");
3311 if (check_curseg_offsets(sbi) == 0) {
3312 printf(" [Ok..]\n");
3314 printf(" [Fail]\n");
3315 verify_failed = true;
3318 printf("[FSCK] fixing SIT types\n");
3319 if (check_sit_types(sbi) != 0)
3322 printf("[FSCK] other corrupted bugs ");
3323 if (c.bug_on == 0) {
3324 printf(" [Ok..]\n");
3326 printf(" [Fail]\n");
3327 ret = EXIT_ERR_CODE;
3330 if (verify_failed) {
3331 ret = EXIT_ERR_CODE;
3335 #ifndef WITH_ANDROID
3336 if (nr_unref_nid && !c.ro) {
3337 char ans[255] = {0};
3340 printf("\nDo you want to restore lost files into ./lost_found/? [Y/N] ");
3341 res = scanf("%s", ans);
3343 if (!strcasecmp(ans, "y")) {
3344 for (i = 0; i < fsck->nr_nat_entries; i++) {
3345 if (f2fs_test_bit(i, fsck->nat_area_bitmap))
3346 dump_node(sbi, i, 1);
3352 /* fix global metadata */
3353 if (force || (c.fix_on && f2fs_dev_is_writable())) {
3354 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
3355 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3357 if (force || c.bug_on || c.bug_nat_bits || c.quota_fixed) {
3358 /* flush nats to write_nit_bits below */
3359 flush_journal_entries(sbi);
3360 fix_hard_links(sbi);
3361 fix_nat_entries(sbi);
3362 rewrite_sit_area_bitmap(sbi);
3363 fix_wp_sit_alignment(sbi);
3364 fix_curseg_info(sbi);
3366 fix_checkpoints(sbi);
3367 } else if (is_set_ckpt_flags(cp, CP_FSCK_FLAG) ||
3368 is_set_ckpt_flags(cp, CP_QUOTA_NEED_FSCK_FLAG)) {
3369 write_checkpoints(sbi);
3372 if (c.abnormal_stop)
3373 memset(sb->s_stop_reason, 0, MAX_STOP_REASON);
3376 memset(sb->s_errors, 0, MAX_F2FS_ERRORS);
3378 if (c.abnormal_stop || c.fs_errors)
3379 update_superblock(sb, SB_MASK_ALL);
3381 /* to return FSCK_ERROR_CORRECTED */
3387 void fsck_free(struct f2fs_sb_info *sbi)
3389 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3392 quota_release_context(&fsck->qctx);
3394 if (fsck->main_area_bitmap)
3395 free(fsck->main_area_bitmap);
3397 if (fsck->nat_area_bitmap)
3398 free(fsck->nat_area_bitmap);
3400 if (fsck->sit_area_bitmap)
3401 free(fsck->sit_area_bitmap);
3404 free(fsck->entries);
3409 while (fsck->dentry) {
3410 struct f2fs_dentry *dentry = fsck->dentry;
3412 fsck->dentry = fsck->dentry->next;