4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
17 #ifdef HAVE_LINUX_POSIX_ACL_H
18 #include <linux/posix_acl.h>
24 #ifndef ACL_UNDEFINED_TAG
25 #define ACL_UNDEFINED_TAG (0x00)
26 #define ACL_USER_OBJ (0x01)
27 #define ACL_USER (0x02)
28 #define ACL_GROUP_OBJ (0x04)
29 #define ACL_GROUP (0x08)
30 #define ACL_MASK (0x10)
31 #define ACL_OTHER (0x20)
34 #ifdef HAVE_LINUX_BLKZONED_H
36 static int get_device_idx(struct f2fs_sb_info *sbi, uint32_t segno)
38 block_t seg_start_blkaddr;
41 seg_start_blkaddr = SM_I(sbi)->main_blkaddr +
42 segno * DEFAULT_BLOCKS_PER_SEGMENT;
43 for (i = 0; i < c.ndevs; i++)
44 if (c.devices[i].start_blkaddr <= seg_start_blkaddr &&
45 c.devices[i].end_blkaddr > seg_start_blkaddr)
50 static int get_zone_idx_from_dev(struct f2fs_sb_info *sbi,
51 uint32_t segno, uint32_t dev_idx)
53 block_t seg_start_blkaddr = START_BLOCK(sbi, segno);
55 return (seg_start_blkaddr - c.devices[dev_idx].start_blkaddr) >>
56 log_base_2(sbi->segs_per_sec * sbi->blocks_per_seg);
59 bool is_usable_seg(struct f2fs_sb_info *sbi, unsigned int segno)
61 unsigned int secno = segno / sbi->segs_per_sec;
62 block_t seg_start = START_BLOCK(sbi, segno);
63 block_t blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
64 unsigned int dev_idx = get_device_idx(sbi, segno);
65 unsigned int zone_idx = get_zone_idx_from_dev(sbi, segno, dev_idx);
66 unsigned int sec_off = SM_I(sbi)->main_blkaddr >>
67 log_base_2(blocks_per_sec);
69 if (zone_idx < c.devices[dev_idx].nr_rnd_zones)
72 if (c.devices[dev_idx].zoned_model != F2FS_ZONED_HM)
75 return seg_start < ((sec_off + secno) * blocks_per_sec) +
76 c.devices[dev_idx].zone_cap_blocks[zone_idx];
79 unsigned int get_usable_seg_count(struct f2fs_sb_info *sbi)
81 unsigned int i, usable_seg_count = 0;
83 for (i = 0; i < MAIN_SEGS(sbi); i++)
84 if (is_usable_seg(sbi, i))
87 return usable_seg_count;
92 bool is_usable_seg(struct f2fs_sb_info *UNUSED(sbi), unsigned int UNUSED(segno))
97 unsigned int get_usable_seg_count(struct f2fs_sb_info *sbi)
99 return MAIN_SEGS(sbi);
104 u32 get_free_segments(struct f2fs_sb_info *sbi)
106 u32 i, free_segs = 0;
108 for (i = 0; i < MAIN_SEGS(sbi); i++) {
109 struct seg_entry *se = get_seg_entry(sbi, i);
111 if (se->valid_blocks == 0x0 && !IS_CUR_SEGNO(sbi, i) &&
112 is_usable_seg(sbi, i))
118 void update_free_segments(struct f2fs_sb_info *sbi)
120 char *progress = "-*|*-";
126 MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
131 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
132 static void print_acl(const u8 *value, int size)
134 const struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value;
135 const struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
136 const u8 *end = value + size;
139 if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION)) {
140 MSG(0, "Invalid ACL version [0x%x : 0x%x]\n",
141 le32_to_cpu(hdr->a_version), F2FS_ACL_VERSION);
145 count = f2fs_acl_count(size);
147 MSG(0, "Invalid ACL value size %d\n", size);
151 for (i = 0; i < count; i++) {
152 if ((u8 *)entry > end) {
153 MSG(0, "Invalid ACL entries count %d\n", count);
157 switch (le16_to_cpu(entry->e_tag)) {
162 MSG(0, "tag:0x%x perm:0x%x\n",
163 le16_to_cpu(entry->e_tag),
164 le16_to_cpu(entry->e_perm));
165 entry = (struct f2fs_acl_entry *)((char *)entry +
166 sizeof(struct f2fs_acl_entry_short));
169 MSG(0, "tag:0x%x perm:0x%x uid:%u\n",
170 le16_to_cpu(entry->e_tag),
171 le16_to_cpu(entry->e_perm),
172 le32_to_cpu(entry->e_id));
173 entry = (struct f2fs_acl_entry *)((char *)entry +
174 sizeof(struct f2fs_acl_entry));
177 MSG(0, "tag:0x%x perm:0x%x gid:%u\n",
178 le16_to_cpu(entry->e_tag),
179 le16_to_cpu(entry->e_perm),
180 le32_to_cpu(entry->e_id));
181 entry = (struct f2fs_acl_entry *)((char *)entry +
182 sizeof(struct f2fs_acl_entry));
185 MSG(0, "Unknown ACL tag 0x%x\n",
186 le16_to_cpu(entry->e_tag));
191 #endif /* HAVE_LINUX_POSIX_ACL_H || HAVE_SYS_ACL_H */
193 static void print_xattr_entry(const struct f2fs_xattr_entry *ent)
195 const u8 *value = (const u8 *)&ent->e_name[ent->e_name_len];
196 const int size = le16_to_cpu(ent->e_value_size);
197 const struct fscrypt_context *ctx;
200 MSG(0, "\nxattr: e_name_index:%d e_name:", ent->e_name_index);
201 for (i = 0; i < ent->e_name_len; i++)
202 MSG(0, "%c", ent->e_name[i]);
203 MSG(0, " e_name_len:%d e_value_size:%d e_value:\n",
204 ent->e_name_len, size);
206 switch (ent->e_name_index) {
207 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
208 case F2FS_XATTR_INDEX_POSIX_ACL_ACCESS:
209 case F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT:
210 print_acl(value, size);
213 case F2FS_XATTR_INDEX_ENCRYPTION:
214 ctx = (const struct fscrypt_context *)value;
215 if (size != sizeof(*ctx) ||
216 ctx->format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
218 MSG(0, "format: %d\n", ctx->format);
219 MSG(0, "contents_encryption_mode: 0x%x\n", ctx->contents_encryption_mode);
220 MSG(0, "filenames_encryption_mode: 0x%x\n", ctx->filenames_encryption_mode);
221 MSG(0, "flags: 0x%x\n", ctx->flags);
222 MSG(0, "master_key_descriptor: ");
223 for (i = 0; i < FS_KEY_DESCRIPTOR_SIZE; i++)
224 MSG(0, "%02X", ctx->master_key_descriptor[i]);
226 for (i = 0; i < FS_KEY_DERIVATION_NONCE_SIZE; i++)
227 MSG(0, "%02X", ctx->nonce[i]);
231 for (i = 0; i < size; i++)
232 MSG(0, "%02X", value[i]);
236 void print_inode_info(struct f2fs_sb_info *sbi,
237 struct f2fs_node *node, int name)
239 struct f2fs_inode *inode = &node->i;
241 struct f2fs_xattr_entry *ent;
242 char en[F2FS_PRINT_NAMELEN];
244 u32 namelen = le32_to_cpu(inode->i_namelen);
245 int enc_name = file_enc_name(inode);
246 int ofs = get_extra_isize(node);
248 pretty_print_filename(inode->i_name, namelen, en, enc_name);
250 MSG(0, " - File name : %s%s\n", en,
251 enc_name ? " <encrypted>" : "");
252 setlocale(LC_ALL, "");
253 MSG(0, " - File size : %'" PRIu64 " (bytes)\n",
254 le64_to_cpu(inode->i_size));
258 DISP_u32(inode, i_mode);
259 DISP_u32(inode, i_advise);
260 DISP_u32(inode, i_uid);
261 DISP_u32(inode, i_gid);
262 DISP_u32(inode, i_links);
263 DISP_u64(inode, i_size);
264 DISP_u64(inode, i_blocks);
266 DISP_u64(inode, i_atime);
267 DISP_u32(inode, i_atime_nsec);
268 DISP_u64(inode, i_ctime);
269 DISP_u32(inode, i_ctime_nsec);
270 DISP_u64(inode, i_mtime);
271 DISP_u32(inode, i_mtime_nsec);
273 DISP_u32(inode, i_generation);
274 DISP_u32(inode, i_current_depth);
275 DISP_u32(inode, i_xattr_nid);
276 DISP_u32(inode, i_flags);
277 DISP_u32(inode, i_inline);
278 DISP_u32(inode, i_pino);
279 DISP_u32(inode, i_dir_level);
282 DISP_u32(inode, i_namelen);
283 printf("%-30s\t\t[%s]\n", "i_name", en);
286 printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
287 le32_to_cpu(inode->i_ext.fofs),
288 le32_to_cpu(inode->i_ext.blk_addr),
289 le32_to_cpu(inode->i_ext.len));
291 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
292 DISP_u16(inode, i_extra_isize);
293 if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
294 DISP_u16(inode, i_inline_xattr_size);
295 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
296 DISP_u32(inode, i_projid);
297 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
298 DISP_u32(inode, i_inode_checksum);
299 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
300 DISP_u64(inode, i_crtime);
301 DISP_u32(inode, i_crtime_nsec);
303 if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
304 DISP_u64(inode, i_compr_blocks);
305 DISP_u32(inode, i_compress_algrithm);
306 DISP_u32(inode, i_log_cluster_size);
307 DISP_u32(inode, i_padding);
311 for (i = 0; i < ADDRS_PER_INODE(inode); i++) {
315 if (i + ofs >= DEF_ADDRS_PER_INODE)
318 blkaddr = le32_to_cpu(inode->i_addr[i + ofs]);
322 if (blkaddr == COMPRESS_ADDR)
323 flag = "cluster flag";
324 else if (blkaddr == NEW_ADDR)
325 flag = "reserved flag";
326 printf("i_addr[0x%x] %-16s\t\t[0x%8x : %u]\n", i + ofs, flag,
330 DISP_u32(inode, i_nid[0]); /* direct */
331 DISP_u32(inode, i_nid[1]); /* direct */
332 DISP_u32(inode, i_nid[2]); /* indirect */
333 DISP_u32(inode, i_nid[3]); /* indirect */
334 DISP_u32(inode, i_nid[4]); /* double indirect */
336 xattr_addr = read_all_xattrs(sbi, node);
338 list_for_each_xattr(ent, xattr_addr) {
339 print_xattr_entry(ent);
347 void print_node_info(struct f2fs_sb_info *sbi,
348 struct f2fs_node *node_block, int verbose)
350 nid_t ino = le32_to_cpu(node_block->footer.ino);
351 nid_t nid = le32_to_cpu(node_block->footer.nid);
354 DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
355 print_inode_info(sbi, node_block, verbose);
358 u32 *dump_blk = (u32 *)node_block;
360 "Node ID [0x%x:%u] is direct node or indirect node.\n",
362 for (i = 0; i < DEF_ADDRS_PER_BLOCK; i++)
363 MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
364 i, dump_blk[i], dump_blk[i]);
368 static void DISP_label(uint16_t *name)
370 char buffer[MAX_VOLUME_NAME];
372 utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
374 printf("%-30s %s\n", "Filesystem volume name:", buffer);
376 printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
379 void print_raw_sb_info(struct f2fs_super_block *sb)
387 printf("+--------------------------------------------------------+\n");
388 printf("| Super block |\n");
389 printf("+--------------------------------------------------------+\n");
392 DISP_u32(sb, major_ver);
394 DISP_label(sb->volume_name);
396 DISP_u32(sb, minor_ver);
397 DISP_u32(sb, log_sectorsize);
398 DISP_u32(sb, log_sectors_per_block);
400 DISP_u32(sb, log_blocksize);
401 DISP_u32(sb, log_blocks_per_seg);
402 DISP_u32(sb, segs_per_sec);
403 DISP_u32(sb, secs_per_zone);
404 DISP_u32(sb, checksum_offset);
405 DISP_u64(sb, block_count);
407 DISP_u32(sb, section_count);
408 DISP_u32(sb, segment_count);
409 DISP_u32(sb, segment_count_ckpt);
410 DISP_u32(sb, segment_count_sit);
411 DISP_u32(sb, segment_count_nat);
413 DISP_u32(sb, segment_count_ssa);
414 DISP_u32(sb, segment_count_main);
415 DISP_u32(sb, segment0_blkaddr);
417 DISP_u32(sb, cp_blkaddr);
418 DISP_u32(sb, sit_blkaddr);
419 DISP_u32(sb, nat_blkaddr);
420 DISP_u32(sb, ssa_blkaddr);
421 DISP_u32(sb, main_blkaddr);
423 DISP_u32(sb, root_ino);
424 DISP_u32(sb, node_ino);
425 DISP_u32(sb, meta_ino);
426 DISP_u32(sb, cp_payload);
428 DISP("%-.252s", sb, version);
432 void print_ckpt_info(struct f2fs_sb_info *sbi)
434 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
442 printf("+--------------------------------------------------------+\n");
443 printf("| Checkpoint |\n");
444 printf("+--------------------------------------------------------+\n");
446 DISP_u64(cp, checkpoint_ver);
447 DISP_u64(cp, user_block_count);
448 DISP_u64(cp, valid_block_count);
449 DISP_u32(cp, rsvd_segment_count);
450 DISP_u32(cp, overprov_segment_count);
451 DISP_u32(cp, free_segment_count);
453 DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
454 DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
455 DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
456 DISP_u32(cp, cur_node_segno[0]);
457 DISP_u32(cp, cur_node_segno[1]);
458 DISP_u32(cp, cur_node_segno[2]);
460 DISP_u32(cp, cur_node_blkoff[0]);
461 DISP_u32(cp, cur_node_blkoff[1]);
462 DISP_u32(cp, cur_node_blkoff[2]);
465 DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
466 DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
467 DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
468 DISP_u32(cp, cur_data_segno[0]);
469 DISP_u32(cp, cur_data_segno[1]);
470 DISP_u32(cp, cur_data_segno[2]);
472 DISP_u32(cp, cur_data_blkoff[0]);
473 DISP_u32(cp, cur_data_blkoff[1]);
474 DISP_u32(cp, cur_data_blkoff[2]);
476 DISP_u32(cp, ckpt_flags);
477 DISP_u32(cp, cp_pack_total_block_count);
478 DISP_u32(cp, cp_pack_start_sum);
479 DISP_u32(cp, valid_node_count);
480 DISP_u32(cp, valid_inode_count);
481 DISP_u32(cp, next_free_nid);
482 DISP_u32(cp, sit_ver_bitmap_bytesize);
483 DISP_u32(cp, nat_ver_bitmap_bytesize);
484 DISP_u32(cp, checksum_offset);
485 DISP_u64(cp, elapsed_time);
487 DISP_u32(cp, sit_nat_version_bitmap[0]);
491 void print_cp_state(u32 flag)
496 MSG(0, "Info: checkpoint state = %x : ", flag);
497 if (flag & CP_QUOTA_NEED_FSCK_FLAG)
498 MSG(0, "%s", " quota_need_fsck");
499 if (flag & CP_LARGE_NAT_BITMAP_FLAG)
500 MSG(0, "%s", " large_nat_bitmap");
501 if (flag & CP_NOCRC_RECOVERY_FLAG)
502 MSG(0, "%s", " allow_nocrc");
503 if (flag & CP_TRIMMED_FLAG)
504 MSG(0, "%s", " trimmed");
505 if (flag & CP_NAT_BITS_FLAG)
506 MSG(0, "%s", " nat_bits");
507 if (flag & CP_CRC_RECOVERY_FLAG)
508 MSG(0, "%s", " crc");
509 if (flag & CP_FASTBOOT_FLAG)
510 MSG(0, "%s", " fastboot");
511 if (flag & CP_FSCK_FLAG)
512 MSG(0, "%s", " fsck");
513 if (flag & CP_ERROR_FLAG)
514 MSG(0, "%s", " error");
515 if (flag & CP_COMPACT_SUM_FLAG)
516 MSG(0, "%s", " compacted_summary");
517 if (flag & CP_ORPHAN_PRESENT_FLAG)
518 MSG(0, "%s", " orphan_inodes");
519 if (flag & CP_DISABLED_FLAG)
520 MSG(0, "%s", " disabled");
521 if (flag & CP_RESIZEFS_FLAG)
522 MSG(0, "%s", " resizefs");
523 if (flag & CP_UMOUNT_FLAG)
524 MSG(0, "%s", " unmount");
526 MSG(0, "%s", " sudden-power-off");
530 void print_sb_state(struct f2fs_super_block *sb)
532 __le32 f = sb->feature;
535 MSG(0, "Info: superblock features = %x : ", f);
536 if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
537 MSG(0, "%s", " encrypt");
539 if (f & cpu_to_le32(F2FS_FEATURE_VERITY)) {
540 MSG(0, "%s", " verity");
542 if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
543 MSG(0, "%s", " blkzoned");
545 if (f & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
546 MSG(0, "%s", " extra_attr");
548 if (f & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
549 MSG(0, "%s", " project_quota");
551 if (f & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) {
552 MSG(0, "%s", " inode_checksum");
554 if (f & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) {
555 MSG(0, "%s", " flexible_inline_xattr");
557 if (f & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
558 MSG(0, "%s", " quota_ino");
560 if (f & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
561 MSG(0, "%s", " inode_crtime");
563 if (f & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
564 MSG(0, "%s", " lost_found");
566 if (f & cpu_to_le32(F2FS_FEATURE_SB_CHKSUM)) {
567 MSG(0, "%s", " sb_checksum");
569 if (f & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
570 MSG(0, "%s", " casefold");
572 if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
573 MSG(0, "%s", " compression");
575 if (f & cpu_to_le32(F2FS_FEATURE_RO)) {
579 MSG(0, "Info: superblock encrypt level = %d, salt = ",
580 sb->encryption_level);
581 for (i = 0; i < 16; i++)
582 MSG(0, "%02x", sb->encrypt_pw_salt[i]);
586 static char *stop_reason_str[] = {
587 [STOP_CP_REASON_SHUTDOWN] = "shutdown",
588 [STOP_CP_REASON_FAULT_INJECT] = "fault_inject",
589 [STOP_CP_REASON_META_PAGE] = "meta_page",
590 [STOP_CP_REASON_WRITE_FAIL] = "write_fail",
591 [STOP_CP_REASON_CORRUPTED_SUMMARY] = "corrupted_summary",
592 [STOP_CP_REASON_UPDATE_INODE] = "update_inode",
593 [STOP_CP_REASON_FLUSH_FAIL] = "flush_fail",
596 void print_sb_stop_reason(struct f2fs_super_block *sb)
598 u8 *reason = sb->s_stop_reason;
604 MSG(0, "Info: checkpoint stop reason: ");
606 for (i = 0; i < STOP_CP_REASON_MAX; i++) {
608 MSG(0, "%s(%d) ", stop_reason_str[i], reason[i]);
614 static char *errors_str[] = {
615 [ERROR_CORRUPTED_CLUSTER] = "corrupted_cluster",
616 [ERROR_FAIL_DECOMPRESSION] = "fail_decompression",
617 [ERROR_INVALID_BLKADDR] = "invalid_blkaddr",
618 [ERROR_CORRUPTED_DIRENT] = "corrupted_dirent",
619 [ERROR_CORRUPTED_INODE] = "corrupted_inode",
620 [ERROR_INCONSISTENT_SUMMARY] = "inconsistent_summary",
621 [ERROR_INCONSISTENT_FOOTER] = "inconsistent_footer",
622 [ERROR_INCONSISTENT_SUM_TYPE] = "inconsistent_sum_type",
623 [ERROR_CORRUPTED_JOURNAL] = "corrupted_journal",
624 [ERROR_INCONSISTENT_NODE_COUNT] = "inconsistent_node_count",
625 [ERROR_INCONSISTENT_BLOCK_COUNT] = "inconsistent_block_count",
626 [ERROR_INVALID_CURSEG] = "invalid_curseg",
627 [ERROR_INCONSISTENT_SIT] = "inconsistent_sit",
628 [ERROR_CORRUPTED_VERITY_XATTR] = "corrupted_verity_xattr",
629 [ERROR_CORRUPTED_XATTR] = "corrupted_xattr",
632 void print_sb_errors(struct f2fs_super_block *sb)
634 u8 *errors = sb->s_errors;
640 MSG(0, "Info: fs errors: ");
642 for (i = 0; i < ERROR_MAX; i++) {
643 if (test_bit_le(i, errors))
644 MSG(0, "%s ", errors_str[i]);
650 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
651 block_t blkaddr, int type)
657 if (blkaddr >= SIT_BLK_CNT(sbi))
661 if (blkaddr >= MAIN_BLKADDR(sbi) ||
662 blkaddr < SM_I(sbi)->ssa_blkaddr)
666 if (blkaddr >= SIT_I(sbi)->sit_base_addr ||
667 blkaddr < __start_cp_addr(sbi))
671 if (blkaddr >= MAX_BLKADDR(sbi) ||
672 blkaddr < MAIN_BLKADDR(sbi))
682 static inline block_t current_sit_addr(struct f2fs_sb_info *sbi,
686 * Readahead CP/NAT/SIT/SSA pages
688 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
691 block_t blkno = start;
692 block_t blkaddr, start_blk = 0, len = 0;
694 for (; nrpages-- > 0; blkno++) {
696 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
701 if (blkno >= NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))
703 /* get nat block addr */
704 blkaddr = current_nat_addr(sbi,
705 blkno * NAT_ENTRY_PER_BLOCK, NULL);
708 /* get sit block addr */
709 blkaddr = current_sit_addr(sbi,
710 blkno * SIT_ENTRY_PER_BLOCK);
724 } else if (start_blk + len == blkaddr) {
727 dev_readahead(start_blk << F2FS_BLKSIZE_BITS,
728 len << F2FS_BLKSIZE_BITS);
733 dev_readahead(start_blk << F2FS_BLKSIZE_BITS,
734 len << F2FS_BLKSIZE_BITS);
735 return blkno - start;
738 void update_superblock(struct f2fs_super_block *sb, int sb_mask)
742 u32 old_crc, new_crc;
744 buf = calloc(BLOCK_SZ, 1);
747 if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) {
748 old_crc = get_sb(crc);
749 new_crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
751 set_sb(crc, new_crc);
752 MSG(1, "Info: SB CRC is updated (0x%x -> 0x%x)\n",
756 memcpy(buf + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
757 for (addr = SB0_ADDR; addr < SB_MAX_ADDR; addr++) {
758 if (SB_MASK(addr) & sb_mask) {
759 ret = dev_write_block(buf, addr);
765 DBG(0, "Info: Done to update superblock\n");
768 static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
769 enum SB_ADDR sb_addr)
771 u32 segment0_blkaddr = get_sb(segment0_blkaddr);
772 u32 cp_blkaddr = get_sb(cp_blkaddr);
773 u32 sit_blkaddr = get_sb(sit_blkaddr);
774 u32 nat_blkaddr = get_sb(nat_blkaddr);
775 u32 ssa_blkaddr = get_sb(ssa_blkaddr);
776 u32 main_blkaddr = get_sb(main_blkaddr);
777 u32 segment_count_ckpt = get_sb(segment_count_ckpt);
778 u32 segment_count_sit = get_sb(segment_count_sit);
779 u32 segment_count_nat = get_sb(segment_count_nat);
780 u32 segment_count_ssa = get_sb(segment_count_ssa);
781 u32 segment_count_main = get_sb(segment_count_main);
782 u32 segment_count = get_sb(segment_count);
783 u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
784 u64 main_end_blkaddr = main_blkaddr +
785 (segment_count_main << log_blocks_per_seg);
786 u64 seg_end_blkaddr = segment0_blkaddr +
787 (segment_count << log_blocks_per_seg);
789 if (segment0_blkaddr != cp_blkaddr) {
790 MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
791 segment0_blkaddr, cp_blkaddr);
795 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
797 MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
798 cp_blkaddr, sit_blkaddr,
799 segment_count_ckpt << log_blocks_per_seg);
803 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
805 MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
806 sit_blkaddr, nat_blkaddr,
807 segment_count_sit << log_blocks_per_seg);
811 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
813 MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
814 nat_blkaddr, ssa_blkaddr,
815 segment_count_nat << log_blocks_per_seg);
819 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
821 MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
822 ssa_blkaddr, main_blkaddr,
823 segment_count_ssa << log_blocks_per_seg);
827 if (main_end_blkaddr > seg_end_blkaddr) {
828 MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
831 (segment_count << log_blocks_per_seg),
832 segment_count_main << log_blocks_per_seg);
834 } else if (main_end_blkaddr < seg_end_blkaddr) {
835 set_sb(segment_count, (main_end_blkaddr -
836 segment0_blkaddr) >> log_blocks_per_seg);
838 update_superblock(sb, SB_MASK(sb_addr));
839 MSG(0, "Info: Fix alignment: start(%u) end(%u) block(%u)\n",
842 (segment_count << log_blocks_per_seg),
843 segment_count_main << log_blocks_per_seg);
848 static int verify_sb_chksum(struct f2fs_super_block *sb)
850 if (SB_CHKSUM_OFFSET != get_sb(checksum_offset)) {
851 MSG(0, "\tInvalid SB CRC offset: %u\n",
852 get_sb(checksum_offset));
855 if (f2fs_crc_valid(get_sb(crc), sb,
856 get_sb(checksum_offset))) {
857 MSG(0, "\tInvalid SB CRC: 0x%x\n", get_sb(crc));
863 int sanity_check_raw_super(struct f2fs_super_block *sb, enum SB_ADDR sb_addr)
865 unsigned int blocksize;
866 unsigned int segment_count, segs_per_sec, secs_per_zone, segs_per_zone;
867 unsigned int total_sections, blocks_per_seg;
869 if (F2FS_SUPER_MAGIC != get_sb(magic)) {
870 MSG(0, "Magic Mismatch, valid(0x%x) - read(0x%x)\n",
871 F2FS_SUPER_MAGIC, get_sb(magic));
875 if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) &&
876 verify_sb_chksum(sb))
879 blocksize = 1 << get_sb(log_blocksize);
880 if (F2FS_BLKSIZE != blocksize) {
881 MSG(0, "Invalid blocksize (%u), supports only 4KB\n",
886 /* check log blocks per segment */
887 if (get_sb(log_blocks_per_seg) != 9) {
888 MSG(0, "Invalid log blocks per segment (%u)\n",
889 get_sb(log_blocks_per_seg));
893 /* Currently, support 512/1024/2048/4096 bytes sector size */
894 if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
895 get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE) {
896 MSG(0, "Invalid log sectorsize (%u)\n", get_sb(log_sectorsize));
900 if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
901 F2FS_MAX_LOG_SECTOR_SIZE) {
902 MSG(0, "Invalid log sectors per block(%u) log sectorsize(%u)\n",
903 get_sb(log_sectors_per_block),
904 get_sb(log_sectorsize));
908 segment_count = get_sb(segment_count);
909 segs_per_sec = get_sb(segs_per_sec);
910 secs_per_zone = get_sb(secs_per_zone);
911 total_sections = get_sb(section_count);
912 segs_per_zone = segs_per_sec * secs_per_zone;
914 /* blocks_per_seg should be 512, given the above check */
915 blocks_per_seg = 1 << get_sb(log_blocks_per_seg);
917 if (segment_count > F2FS_MAX_SEGMENT ||
918 segment_count < F2FS_MIN_SEGMENTS) {
919 MSG(0, "\tInvalid segment count (%u)\n", segment_count);
923 if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
924 (total_sections > segment_count ||
925 total_sections < F2FS_MIN_SEGMENTS ||
926 segs_per_sec > segment_count || !segs_per_sec)) {
927 MSG(0, "\tInvalid segment/section count (%u, %u x %u)\n",
928 segment_count, total_sections, segs_per_sec);
932 if ((segment_count / segs_per_sec) < total_sections) {
933 MSG(0, "Small segment_count (%u < %u * %u)\n",
934 segment_count, segs_per_sec, total_sections);
938 if (segment_count > (get_sb(block_count) >> 9)) {
939 MSG(0, "Wrong segment_count / block_count (%u > %llu)\n",
940 segment_count, get_sb(block_count));
944 if (sb->devs[0].path[0]) {
945 unsigned int dev_segs = le32_to_cpu(sb->devs[0].total_segments);
948 while (i < MAX_DEVICES && sb->devs[i].path[0]) {
949 dev_segs += le32_to_cpu(sb->devs[i].total_segments);
952 if (segment_count != dev_segs / segs_per_zone * segs_per_zone) {
953 MSG(0, "Segment count (%u) mismatch with total segments from devices (%u)",
954 segment_count, dev_segs);
959 if (secs_per_zone > total_sections || !secs_per_zone) {
960 MSG(0, "Wrong secs_per_zone / total_sections (%u, %u)\n",
961 secs_per_zone, total_sections);
964 if (get_sb(extension_count) > F2FS_MAX_EXTENSION ||
965 sb->hot_ext_count > F2FS_MAX_EXTENSION ||
966 get_sb(extension_count) +
967 sb->hot_ext_count > F2FS_MAX_EXTENSION) {
968 MSG(0, "Corrupted extension count (%u + %u > %u)\n",
969 get_sb(extension_count),
975 if (get_sb(cp_payload) > (blocks_per_seg - F2FS_CP_PACKS)) {
976 MSG(0, "Insane cp_payload (%u > %u)\n",
977 get_sb(cp_payload), blocks_per_seg - F2FS_CP_PACKS);
981 /* check reserved ino info */
982 if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
983 get_sb(root_ino) != 3) {
984 MSG(0, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)\n",
985 get_sb(node_ino), get_sb(meta_ino), get_sb(root_ino));
989 /* Check zoned block device feature */
990 if (c.devices[0].zoned_model != F2FS_ZONED_NONE &&
991 !(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
992 MSG(0, "\tMissing zoned block device feature\n");
996 if (sanity_check_area_boundary(sb, sb_addr))
1001 #define CHECK_PERIOD (3600 * 24 * 30) // one month by default
1003 int validate_super_block(struct f2fs_sb_info *sbi, enum SB_ADDR sb_addr)
1005 char buf[F2FS_BLKSIZE];
1007 sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
1008 if (!sbi->raw_super)
1011 if (dev_read_block(buf, sb_addr))
1014 memcpy(sbi->raw_super, buf + F2FS_SUPER_OFFSET,
1015 sizeof(struct f2fs_super_block));
1017 if (!sanity_check_raw_super(sbi->raw_super, sb_addr)) {
1018 /* get kernel version */
1020 dev_read_version(c.version, 0, VERSION_NAME_LEN);
1021 get_kernel_version(c.version);
1023 get_kernel_uname_version(c.version);
1026 /* build sb version */
1027 memcpy(c.sb_version, sbi->raw_super->version, VERSION_NAME_LEN);
1028 get_kernel_version(c.sb_version);
1029 memcpy(c.init_version, sbi->raw_super->init_version,
1031 get_kernel_version(c.init_version);
1033 c.force_stop = is_checkpoint_stop(sbi->raw_super, false);
1034 c.abnormal_stop = is_checkpoint_stop(sbi->raw_super, true);
1035 c.fs_errors = is_inconsistent_error(sbi->raw_super);
1037 MSG(0, "Info: MKFS version\n \"%s\"\n", c.init_version);
1038 MSG(0, "Info: FSCK version\n from \"%s\"\n to \"%s\"\n",
1039 c.sb_version, c.version);
1040 print_sb_state(sbi->raw_super);
1041 print_sb_stop_reason(sbi->raw_super);
1042 print_sb_errors(sbi->raw_super);
1046 free(sbi->raw_super);
1047 sbi->raw_super = NULL;
1048 MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", sb_addr);
1053 int init_sb_info(struct f2fs_sb_info *sbi)
1055 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1059 sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
1060 sbi->log_blocksize = get_sb(log_blocksize);
1061 sbi->blocksize = 1 << sbi->log_blocksize;
1062 sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
1063 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1064 sbi->segs_per_sec = get_sb(segs_per_sec);
1065 sbi->secs_per_zone = get_sb(secs_per_zone);
1066 sbi->total_sections = get_sb(section_count);
1067 sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
1068 sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1069 sbi->root_ino_num = get_sb(root_ino);
1070 sbi->node_ino_num = get_sb(node_ino);
1071 sbi->meta_ino_num = get_sb(meta_ino);
1072 sbi->cur_victim_sec = NULL_SEGNO;
1074 for (i = 0; i < MAX_DEVICES; i++) {
1075 if (!sb->devs[i].path[0])
1079 c.devices[i].path = strdup((char *)sb->devs[i].path);
1080 if (get_device_info(i))
1083 ASSERT(!strcmp((char *)sb->devs[i].path,
1084 (char *)c.devices[i].path));
1087 c.devices[i].total_segments =
1088 le32_to_cpu(sb->devs[i].total_segments);
1090 c.devices[i].start_blkaddr =
1091 c.devices[i - 1].end_blkaddr + 1;
1092 c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
1093 c.devices[i].total_segments *
1096 c.devices[i].end_blkaddr += get_sb(segment0_blkaddr);
1098 if (c.zoned_model == F2FS_ZONED_NONE) {
1099 if (c.devices[i].zoned_model == F2FS_ZONED_HM)
1100 c.zoned_model = F2FS_ZONED_HM;
1101 else if (c.devices[i].zoned_model == F2FS_ZONED_HA &&
1102 c.zoned_model != F2FS_ZONED_HM)
1103 c.zoned_model = F2FS_ZONED_HA;
1107 MSG(0, "Info: Device[%d] : %s blkaddr = %"PRIx64"--%"PRIx64"\n",
1108 i, c.devices[i].path,
1109 c.devices[i].start_blkaddr,
1110 c.devices[i].end_blkaddr);
1113 total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
1114 MSG(0, "Info: Segments per section = %d\n", sbi->segs_per_sec);
1115 MSG(0, "Info: Sections per zone = %d\n", sbi->secs_per_zone);
1116 MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
1117 total_sectors, total_sectors >>
1118 (20 - get_sb(log_sectorsize)));
1122 static int verify_checksum_chksum(struct f2fs_checkpoint *cp)
1124 unsigned int chksum_offset = get_cp(checksum_offset);
1125 unsigned int crc, cal_crc;
1127 if (chksum_offset < CP_MIN_CHKSUM_OFFSET ||
1128 chksum_offset > CP_CHKSUM_OFFSET) {
1129 MSG(0, "\tInvalid CP CRC offset: %u\n", chksum_offset);
1133 crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + chksum_offset));
1134 cal_crc = f2fs_checkpoint_chksum(cp);
1135 if (cal_crc != crc) {
1136 MSG(0, "\tInvalid CP CRC: offset:%u, crc:0x%x, calc:0x%x\n",
1137 chksum_offset, crc, cal_crc);
1143 static void *get_checkpoint_version(block_t cp_addr)
1147 cp_page = malloc(F2FS_BLKSIZE);
1150 if (dev_read_block(cp_page, cp_addr) < 0)
1153 if (verify_checksum_chksum((struct f2fs_checkpoint *)cp_page))
1161 void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
1162 unsigned long long *version)
1164 void *cp_page_1, *cp_page_2;
1165 struct f2fs_checkpoint *cp;
1166 unsigned long long cur_version = 0, pre_version = 0;
1168 /* Read the 1st cp block in this CP pack */
1169 cp_page_1 = get_checkpoint_version(cp_addr);
1173 cp = (struct f2fs_checkpoint *)cp_page_1;
1174 if (get_cp(cp_pack_total_block_count) > sbi->blocks_per_seg)
1177 pre_version = get_cp(checkpoint_ver);
1179 /* Read the 2nd cp block in this CP pack */
1180 cp_addr += get_cp(cp_pack_total_block_count) - 1;
1181 cp_page_2 = get_checkpoint_version(cp_addr);
1185 cp = (struct f2fs_checkpoint *)cp_page_2;
1186 cur_version = get_cp(checkpoint_ver);
1188 if (cur_version == pre_version) {
1189 *version = cur_version;
1200 int get_valid_checkpoint(struct f2fs_sb_info *sbi)
1202 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1203 void *cp1, *cp2, *cur_page;
1204 unsigned long blk_size = sbi->blocksize;
1205 unsigned long long cp1_version = 0, cp2_version = 0, version;
1206 unsigned long long cp_start_blk_no;
1207 unsigned int cp_payload, cp_blks;
1210 cp_payload = get_sb(cp_payload);
1211 if (cp_payload > F2FS_BLK_ALIGN(MAX_SIT_BITMAP_SIZE))
1214 cp_blks = 1 + cp_payload;
1215 sbi->ckpt = malloc(cp_blks * blk_size);
1219 * Finding out valid cp block involves read both
1220 * sets( cp pack1 and cp pack 2)
1222 cp_start_blk_no = get_sb(cp_blkaddr);
1223 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
1225 /* The second checkpoint pack should start at the next segment */
1226 cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
1227 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
1230 if (ver_after(cp2_version, cp1_version)) {
1233 version = cp2_version;
1237 version = cp1_version;
1242 version = cp1_version;
1246 version = cp2_version;
1250 MSG(0, "Info: CKPT version = %llx\n", version);
1252 memcpy(sbi->ckpt, cur_page, blk_size);
1256 unsigned long long cp_blk_no;
1258 cp_blk_no = get_sb(cp_blkaddr);
1259 if (cur_page == cp2)
1260 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
1262 /* copy sit bitmap */
1263 for (i = 1; i < cp_blks; i++) {
1264 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
1265 ret = dev_read_block(cur_page, cp_blk_no + i);
1267 memcpy(ckpt + i * blk_size, cur_page, blk_size);
1282 bool is_checkpoint_stop(struct f2fs_super_block *sb, bool abnormal)
1286 for (i = 0; i < STOP_CP_REASON_MAX; i++) {
1287 if (abnormal && i == STOP_CP_REASON_SHUTDOWN)
1289 if (sb->s_stop_reason[i])
1296 bool is_inconsistent_error(struct f2fs_super_block *sb)
1300 for (i = 0; i < MAX_F2FS_ERRORS; i++) {
1301 if (sb->s_errors[i])
1309 * For a return value of 1, caller should further check for c.fix_on state
1310 * and take appropriate action.
1312 static int f2fs_should_proceed(struct f2fs_super_block *sb, u32 flag)
1314 if (!c.fix_on && (c.auto_fix || c.preen_mode)) {
1315 if (flag & CP_FSCK_FLAG ||
1316 flag & CP_QUOTA_NEED_FSCK_FLAG ||
1317 c.abnormal_stop || c.fs_errors ||
1318 (exist_qf_ino(sb) && (flag & CP_ERROR_FLAG))) {
1320 } else if (!c.preen_mode) {
1321 print_cp_state(flag);
1328 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
1330 unsigned int total, fsmeta;
1331 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1332 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1333 unsigned int flag = get_cp(ckpt_flags);
1334 unsigned int ovp_segments, reserved_segments;
1335 unsigned int main_segs, blocks_per_seg;
1336 unsigned int sit_segs, nat_segs;
1337 unsigned int sit_bitmap_size, nat_bitmap_size;
1338 unsigned int log_blocks_per_seg;
1339 unsigned int segment_count_main;
1340 unsigned int cp_pack_start_sum, cp_payload;
1341 block_t user_block_count;
1344 total = get_sb(segment_count);
1345 fsmeta = get_sb(segment_count_ckpt);
1346 sit_segs = get_sb(segment_count_sit);
1348 nat_segs = get_sb(segment_count_nat);
1350 fsmeta += get_cp(rsvd_segment_count);
1351 fsmeta += get_sb(segment_count_ssa);
1353 if (fsmeta >= total)
1356 ovp_segments = get_cp(overprov_segment_count);
1357 reserved_segments = get_cp(rsvd_segment_count);
1359 if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
1360 (fsmeta < F2FS_MIN_SEGMENT || ovp_segments == 0 ||
1361 reserved_segments == 0)) {
1362 MSG(0, "\tWrong layout: check mkfs.f2fs version\n");
1366 user_block_count = get_cp(user_block_count);
1367 segment_count_main = get_sb(segment_count_main) +
1368 (cpu_to_le32(F2FS_FEATURE_RO) ? 1 : 0);
1369 log_blocks_per_seg = get_sb(log_blocks_per_seg);
1370 if (!user_block_count || user_block_count >=
1371 segment_count_main << log_blocks_per_seg) {
1372 ASSERT_MSG("\tWrong user_block_count(%u)\n", user_block_count);
1374 if (!f2fs_should_proceed(sb, flag))
1379 if (flag & (CP_FSCK_FLAG | CP_RESIZEFS_FLAG)) {
1380 u32 valid_user_block_cnt;
1381 u32 seg_cnt_main = get_sb(segment_count) -
1382 (get_sb(segment_count_ckpt) +
1383 get_sb(segment_count_sit) +
1384 get_sb(segment_count_nat) +
1385 get_sb(segment_count_ssa));
1387 /* validate segment_count_main in sb first */
1388 if (seg_cnt_main != get_sb(segment_count_main)) {
1389 MSG(0, "Inconsistent segment_cnt_main %u in sb\n",
1390 segment_count_main << log_blocks_per_seg);
1393 valid_user_block_cnt = ((get_sb(segment_count_main) -
1394 get_cp(overprov_segment_count)) * c.blks_per_seg);
1395 MSG(0, "Info: Fix wrong user_block_count in CP: (%u) -> (%u)\n",
1396 user_block_count, valid_user_block_cnt);
1397 set_cp(user_block_count, valid_user_block_cnt);
1402 main_segs = get_sb(segment_count_main);
1403 blocks_per_seg = sbi->blocks_per_seg;
1405 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1406 if (get_cp(cur_node_segno[i]) >= main_segs ||
1407 get_cp(cur_node_blkoff[i]) >= blocks_per_seg)
1410 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1411 if (get_cp(cur_data_segno[i]) >= main_segs ||
1412 get_cp(cur_data_blkoff[i]) >= blocks_per_seg)
1416 sit_bitmap_size = get_cp(sit_ver_bitmap_bytesize);
1417 nat_bitmap_size = get_cp(nat_ver_bitmap_bytesize);
1419 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
1420 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
1421 MSG(0, "\tWrong bitmap size: sit(%u), nat(%u)\n",
1422 sit_bitmap_size, nat_bitmap_size);
1426 cp_pack_start_sum = __start_sum_addr(sbi);
1427 cp_payload = __cp_payload(sbi);
1428 if (cp_pack_start_sum < cp_payload + 1 ||
1429 cp_pack_start_sum > blocks_per_seg - 1 -
1431 MSG(0, "\tWrong cp_pack_start_sum(%u) or cp_payload(%u)\n",
1432 cp_pack_start_sum, cp_payload);
1433 if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM))
1435 set_sb(cp_payload, cp_pack_start_sum - 1);
1436 update_superblock(sb, SB_MASK_ALL);
1442 pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start, int *pack)
1444 struct f2fs_nm_info *nm_i = NM_I(sbi);
1449 block_off = NAT_BLOCK_OFFSET(start);
1450 seg_off = block_off >> sbi->log_blocks_per_seg;
1452 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1453 (seg_off << sbi->log_blocks_per_seg << 1) +
1454 (block_off & ((1 << sbi->log_blocks_per_seg) -1)));
1458 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) {
1459 block_addr += sbi->blocks_per_seg;
1467 /* will not init nid_bitmap from nat */
1468 static int f2fs_early_init_nid_bitmap(struct f2fs_sb_info *sbi)
1470 struct f2fs_nm_info *nm_i = NM_I(sbi);
1471 int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
1472 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1473 struct f2fs_summary_block *sum = curseg->sum_blk;
1474 struct f2fs_journal *journal = &sum->journal;
1478 if (!(c.func == SLOAD || c.func == FSCK))
1481 nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
1482 if (!nm_i->nid_bitmap)
1485 /* arbitrarily set 0 bit */
1486 f2fs_set_bit(0, nm_i->nid_bitmap);
1488 if (nats_in_cursum(journal) > NAT_JOURNAL_ENTRIES) {
1489 MSG(0, "\tError: f2fs_init_nid_bitmap truncate n_nats(%u) to "
1490 "NAT_JOURNAL_ENTRIES(%zu)\n",
1491 nats_in_cursum(journal), NAT_JOURNAL_ENTRIES);
1492 journal->n_nats = cpu_to_le16(NAT_JOURNAL_ENTRIES);
1496 for (i = 0; i < nats_in_cursum(journal); i++) {
1499 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1500 if (!IS_VALID_BLK_ADDR(sbi, addr)) {
1501 MSG(0, "\tError: f2fs_init_nid_bitmap: addr(%u) is invalid!!!\n", addr);
1502 journal->n_nats = cpu_to_le16(i);
1507 nid = le32_to_cpu(nid_in_journal(journal, i));
1508 if (!IS_VALID_NID(sbi, nid)) {
1509 MSG(0, "\tError: f2fs_init_nid_bitmap: nid(%u) is invalid!!!\n", nid);
1510 journal->n_nats = cpu_to_le16(i);
1514 if (addr != NULL_ADDR)
1515 f2fs_set_bit(nid, nm_i->nid_bitmap);
1520 /* will init nid_bitmap from nat */
1521 static int f2fs_late_init_nid_bitmap(struct f2fs_sb_info *sbi)
1523 struct f2fs_nm_info *nm_i = NM_I(sbi);
1524 struct f2fs_nat_block *nat_block;
1528 if (!(c.func == SLOAD || c.func == FSCK))
1531 nat_block = malloc(F2FS_BLKSIZE);
1533 free(nm_i->nid_bitmap);
1537 f2fs_ra_meta_pages(sbi, 0, NAT_BLOCK_OFFSET(nm_i->max_nid),
1539 for (nid = 0; nid < nm_i->max_nid; nid++) {
1540 if (!(nid % NAT_ENTRY_PER_BLOCK)) {
1543 start_blk = current_nat_addr(sbi, nid, NULL);
1544 ret = dev_read_block(nat_block, start_blk);
1548 if (nat_block->entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
1549 f2fs_set_bit(nid, nm_i->nid_bitmap);
1556 u32 update_nat_bits_flags(struct f2fs_super_block *sb,
1557 struct f2fs_checkpoint *cp, u32 flags)
1559 uint32_t nat_bits_bytes, nat_bits_blocks;
1561 nat_bits_bytes = get_sb(segment_count_nat) << 5;
1562 nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
1564 if (get_cp(cp_pack_total_block_count) <=
1565 (1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
1566 flags |= CP_NAT_BITS_FLAG;
1568 flags &= (~CP_NAT_BITS_FLAG);
1573 /* should call flush_journal_entries() bfore this */
1574 void write_nat_bits(struct f2fs_sb_info *sbi,
1575 struct f2fs_super_block *sb, struct f2fs_checkpoint *cp, int set)
1577 struct f2fs_nm_info *nm_i = NM_I(sbi);
1578 uint32_t nat_blocks = get_sb(segment_count_nat) <<
1579 (get_sb(log_blocks_per_seg) - 1);
1580 uint32_t nat_bits_bytes = nat_blocks >> 3;
1581 uint32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
1582 8 + F2FS_BLKSIZE - 1);
1583 unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
1584 struct f2fs_nat_block *nat_block;
1589 nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
1592 nat_block = malloc(F2FS_BLKSIZE);
1595 full_nat_bits = nat_bits + 8;
1596 empty_nat_bits = full_nat_bits + nat_bits_bytes;
1598 memset(full_nat_bits, 0, nat_bits_bytes);
1599 memset(empty_nat_bits, 0, nat_bits_bytes);
1601 for (i = 0; i < nat_blocks; i++) {
1602 int seg_off = i >> get_sb(log_blocks_per_seg);
1605 blkaddr = (pgoff_t)(get_sb(nat_blkaddr) +
1606 (seg_off << get_sb(log_blocks_per_seg) << 1) +
1607 (i & ((1 << get_sb(log_blocks_per_seg)) - 1)));
1610 * Should consider new nat_blocks is larger than old
1611 * nm_i->nat_blocks, since nm_i->nat_bitmap is based on
1614 if (i < nm_i->nat_blocks && f2fs_test_bit(i, nm_i->nat_bitmap))
1615 blkaddr += (1 << get_sb(log_blocks_per_seg));
1617 ret = dev_read_block(nat_block, blkaddr);
1620 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
1621 if ((i == 0 && j == 0) ||
1622 nat_block->entries[j].block_addr != NULL_ADDR)
1626 test_and_set_bit_le(i, empty_nat_bits);
1627 else if (valid == NAT_ENTRY_PER_BLOCK)
1628 test_and_set_bit_le(i, full_nat_bits);
1630 *(__le64 *)nat_bits = get_cp_crc(cp);
1633 blkaddr = get_sb(segment0_blkaddr) + (set <<
1634 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1636 DBG(1, "\tWriting NAT bits pages, at offset 0x%08x\n", blkaddr);
1638 for (i = 0; i < nat_bits_blocks; i++) {
1639 if (dev_write_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
1640 ASSERT_MSG("\tError: write NAT bits to disk!!!\n");
1642 MSG(0, "Info: Write valid nat_bits in checkpoint\n");
1647 static int check_nat_bits(struct f2fs_sb_info *sbi,
1648 struct f2fs_super_block *sb, struct f2fs_checkpoint *cp)
1650 struct f2fs_nm_info *nm_i = NM_I(sbi);
1651 uint32_t nat_blocks = get_sb(segment_count_nat) <<
1652 (get_sb(log_blocks_per_seg) - 1);
1653 uint32_t nat_bits_bytes = nat_blocks >> 3;
1654 uint32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
1655 8 + F2FS_BLKSIZE - 1);
1656 unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
1657 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1658 struct f2fs_journal *journal = &curseg->sum_blk->journal;
1663 nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
1666 full_nat_bits = nat_bits + 8;
1667 empty_nat_bits = full_nat_bits + nat_bits_bytes;
1669 blkaddr = get_sb(segment0_blkaddr) + (sbi->cur_cp <<
1670 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1672 for (i = 0; i < nat_bits_blocks; i++) {
1673 if (dev_read_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
1674 ASSERT_MSG("\tError: read NAT bits to disk!!!\n");
1677 if (*(__le64 *)nat_bits != get_cp_crc(cp) || nats_in_cursum(journal)) {
1679 * if there is a journal, f2fs was not shutdown cleanly. Let's
1680 * flush them with nat_bits.
1684 /* Otherwise, kernel will disable nat_bits */
1688 for (i = 0; i < nat_blocks; i++) {
1689 uint32_t start_nid = i * NAT_ENTRY_PER_BLOCK;
1691 int empty = test_bit_le(i, empty_nat_bits);
1692 int full = test_bit_le(i, full_nat_bits);
1694 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
1695 if (f2fs_test_bit(start_nid + j, nm_i->nid_bitmap))
1699 if (!empty || full) {
1703 } else if (valid == NAT_ENTRY_PER_BLOCK) {
1704 if (empty || !full) {
1709 if (empty || full) {
1718 MSG(0, "Info: Checked valid nat_bits in checkpoint\n");
1721 MSG(0, "Info: Corrupted valid nat_bits in checkpoint\n");
1726 int init_node_manager(struct f2fs_sb_info *sbi)
1728 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1729 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1730 struct f2fs_nm_info *nm_i = NM_I(sbi);
1731 unsigned char *version_bitmap;
1732 unsigned int nat_segs;
1734 nm_i->nat_blkaddr = get_sb(nat_blkaddr);
1736 /* segment_count_nat includes pair segment so divide to 2. */
1737 nat_segs = get_sb(segment_count_nat) >> 1;
1738 nm_i->nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
1739 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
1742 nm_i->init_scan_nid = get_cp(next_free_nid);
1743 nm_i->next_scan_nid = get_cp(next_free_nid);
1745 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1747 nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
1748 if (!nm_i->nat_bitmap)
1750 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1751 if (!version_bitmap)
1754 /* copy version bitmap */
1755 memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
1756 return f2fs_early_init_nid_bitmap(sbi);
1759 int build_node_manager(struct f2fs_sb_info *sbi)
1762 sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
1766 err = init_node_manager(sbi);
1773 int build_sit_info(struct f2fs_sb_info *sbi)
1775 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1776 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1777 struct sit_info *sit_i;
1778 unsigned int sit_segs;
1780 char *src_bitmap, *dst_bitmap;
1781 unsigned char *bitmap;
1782 unsigned int bitmap_size;
1784 sit_i = malloc(sizeof(struct sit_info));
1786 MSG(1, "\tError: Malloc failed for build_sit_info!\n");
1790 SM_I(sbi)->sit_info = sit_i;
1792 sit_i->sentries = calloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry), 1);
1793 if (!sit_i->sentries) {
1794 MSG(1, "\tError: Calloc failed for build_sit_info!\n");
1798 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE;
1800 if (need_fsync_data_record(sbi))
1801 bitmap_size += bitmap_size;
1803 sit_i->bitmap = calloc(bitmap_size, 1);
1804 if (!sit_i->bitmap) {
1805 MSG(1, "\tError: Calloc failed for build_sit_info!!\n");
1809 bitmap = sit_i->bitmap;
1811 for (start = 0; start < MAIN_SEGS(sbi); start++) {
1812 sit_i->sentries[start].cur_valid_map = bitmap;
1813 bitmap += SIT_VBLOCK_MAP_SIZE;
1815 if (need_fsync_data_record(sbi)) {
1816 sit_i->sentries[start].ckpt_valid_map = bitmap;
1817 bitmap += SIT_VBLOCK_MAP_SIZE;
1821 sit_segs = get_sb(segment_count_sit) >> 1;
1822 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1823 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1825 dst_bitmap = malloc(bitmap_size);
1827 MSG(1, "\tError: Malloc failed for build_sit_info!!\n");
1828 goto free_validity_maps;
1831 memcpy(dst_bitmap, src_bitmap, bitmap_size);
1833 sit_i->sit_base_addr = get_sb(sit_blkaddr);
1834 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1835 sit_i->written_valid_blocks = get_cp(valid_block_count);
1836 sit_i->sit_bitmap = dst_bitmap;
1837 sit_i->bitmap_size = bitmap_size;
1838 sit_i->dirty_sentries = 0;
1839 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1840 sit_i->elapsed_time = get_cp(elapsed_time);
1844 free(sit_i->bitmap);
1846 free(sit_i->sentries);
1853 void reset_curseg(struct f2fs_sb_info *sbi, int type)
1855 struct curseg_info *curseg = CURSEG_I(sbi, type);
1856 struct summary_footer *sum_footer;
1857 struct seg_entry *se;
1859 sum_footer = &(curseg->sum_blk->footer);
1860 memset(sum_footer, 0, sizeof(struct summary_footer));
1861 if (IS_DATASEG(type))
1862 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1863 if (IS_NODESEG(type))
1864 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1865 se = get_seg_entry(sbi, curseg->segno);
1870 static void read_compacted_summaries(struct f2fs_sb_info *sbi)
1872 struct curseg_info *curseg;
1873 unsigned int i, j, offset;
1878 start = start_sum_block(sbi);
1880 kaddr = malloc(F2FS_BLKSIZE);
1883 ret = dev_read_block(kaddr, start++);
1886 curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1887 memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
1889 curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1890 memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
1893 offset = 2 * SUM_JOURNAL_SIZE;
1894 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1895 unsigned short blk_off;
1896 struct curseg_info *curseg = CURSEG_I(sbi, i);
1898 reset_curseg(sbi, i);
1900 if (curseg->alloc_type == SSR)
1901 blk_off = sbi->blocks_per_seg;
1903 blk_off = curseg->next_blkoff;
1905 ASSERT(blk_off <= ENTRIES_IN_SUM);
1907 for (j = 0; j < blk_off; j++) {
1908 struct f2fs_summary *s;
1909 s = (struct f2fs_summary *)(kaddr + offset);
1910 curseg->sum_blk->entries[j] = *s;
1911 offset += SUMMARY_SIZE;
1912 if (offset + SUMMARY_SIZE <=
1913 F2FS_BLKSIZE - SUM_FOOTER_SIZE)
1915 memset(kaddr, 0, F2FS_BLKSIZE);
1916 ret = dev_read_block(kaddr, start++);
1924 static void restore_node_summary(struct f2fs_sb_info *sbi,
1925 unsigned int segno, struct f2fs_summary_block *sum_blk)
1927 struct f2fs_node *node_blk;
1928 struct f2fs_summary *sum_entry;
1933 node_blk = malloc(F2FS_BLKSIZE);
1936 /* scan the node segment */
1937 addr = START_BLOCK(sbi, segno);
1938 sum_entry = &sum_blk->entries[0];
1940 for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
1941 ret = dev_read_block(node_blk, addr);
1943 sum_entry->nid = node_blk->footer.nid;
1949 static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1951 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1952 struct f2fs_summary_block *sum_blk;
1953 struct curseg_info *curseg;
1954 unsigned int segno = 0;
1955 block_t blk_addr = 0;
1958 if (IS_DATASEG(type)) {
1959 segno = get_cp(cur_data_segno[type]);
1960 if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1961 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1963 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1965 segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
1966 if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1967 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1968 type - CURSEG_HOT_NODE);
1970 blk_addr = GET_SUM_BLKADDR(sbi, segno);
1973 sum_blk = malloc(sizeof(*sum_blk));
1976 ret = dev_read_block(sum_blk, blk_addr);
1979 if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1980 restore_node_summary(sbi, segno, sum_blk);
1982 curseg = CURSEG_I(sbi, type);
1983 memcpy(curseg->sum_blk, sum_blk, sizeof(*sum_blk));
1984 reset_curseg(sbi, type);
1988 void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
1989 struct f2fs_summary *sum)
1991 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1992 struct f2fs_summary_block *sum_blk;
1995 struct seg_entry *se;
1997 if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
2000 segno = GET_SEGNO(sbi, blk_addr);
2001 offset = OFFSET_IN_SEG(sbi, blk_addr);
2003 se = get_seg_entry(sbi, segno);
2005 sum_blk = get_sum_block(sbi, segno, &type);
2006 memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
2007 sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
2010 /* write SSA all the time */
2011 ret = dev_write_block(sum_blk, GET_SUM_BLKADDR(sbi, segno));
2014 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
2015 type == SEG_TYPE_MAX)
2019 static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
2021 int type = CURSEG_HOT_DATA;
2023 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
2024 read_compacted_summaries(sbi);
2025 type = CURSEG_HOT_NODE;
2028 for (; type <= CURSEG_COLD_NODE; type++)
2029 read_normal_summaries(sbi, type);
2032 static int build_curseg(struct f2fs_sb_info *sbi)
2034 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2035 struct curseg_info *array;
2036 unsigned short blk_off;
2040 array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
2042 MSG(1, "\tError: Malloc failed for build_curseg!\n");
2046 SM_I(sbi)->curseg_array = array;
2048 for (i = 0; i < NR_CURSEG_TYPE; i++) {
2049 array[i].sum_blk = calloc(sizeof(*(array[i].sum_blk)), 1);
2050 if (!array[i].sum_blk) {
2051 MSG(1, "\tError: Calloc failed for build_curseg!!\n");
2055 if (i <= CURSEG_COLD_DATA) {
2056 blk_off = get_cp(cur_data_blkoff[i]);
2057 segno = get_cp(cur_data_segno[i]);
2059 if (i > CURSEG_COLD_DATA) {
2060 blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
2061 segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
2063 ASSERT(segno < MAIN_SEGS(sbi));
2064 ASSERT(blk_off < DEFAULT_BLOCKS_PER_SEGMENT);
2066 array[i].segno = segno;
2067 array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
2068 array[i].next_segno = NULL_SEGNO;
2069 array[i].next_blkoff = blk_off;
2070 array[i].alloc_type = cp->alloc_type[i];
2072 restore_curseg_summaries(sbi);
2076 for(--i ; i >=0; --i)
2077 free(array[i].sum_blk);
2083 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
2085 unsigned int end_segno = SM_I(sbi)->segment_count - 1;
2086 ASSERT(segno <= end_segno);
2089 static inline block_t current_sit_addr(struct f2fs_sb_info *sbi,
2092 struct sit_info *sit_i = SIT_I(sbi);
2093 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
2094 block_t blk_addr = sit_i->sit_base_addr + offset;
2096 check_seg_range(sbi, segno);
2098 /* calculate sit block address */
2099 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
2100 blk_addr += sit_i->sit_blocks;
2105 void get_current_sit_page(struct f2fs_sb_info *sbi,
2106 unsigned int segno, struct f2fs_sit_block *sit_blk)
2108 block_t blk_addr = current_sit_addr(sbi, segno);
2110 ASSERT(dev_read_block(sit_blk, blk_addr) >= 0);
2113 void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
2114 unsigned int segno, struct f2fs_sit_block *sit_blk)
2116 block_t blk_addr = current_sit_addr(sbi, segno);
2118 ASSERT(dev_write_block(sit_blk, blk_addr) >= 0);
2121 void check_block_count(struct f2fs_sb_info *sbi,
2122 unsigned int segno, struct f2fs_sit_entry *raw_sit)
2124 struct f2fs_sm_info *sm_info = SM_I(sbi);
2125 unsigned int end_segno = sm_info->segment_count - 1;
2126 int valid_blocks = 0;
2129 /* check segment usage */
2130 if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
2131 ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
2132 segno, GET_SIT_VBLOCKS(raw_sit));
2134 /* check boundary of a given segment number */
2135 if (segno > end_segno)
2136 ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
2138 /* check bitmap with valid block count */
2139 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
2140 valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
2142 if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
2143 ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
2144 segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
2146 if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
2147 ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
2148 segno, GET_SIT_TYPE(raw_sit));
2151 void __seg_info_from_raw_sit(struct seg_entry *se,
2152 struct f2fs_sit_entry *raw_sit)
2154 se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
2155 memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
2156 se->type = GET_SIT_TYPE(raw_sit);
2157 se->orig_type = GET_SIT_TYPE(raw_sit);
2158 se->mtime = le64_to_cpu(raw_sit->mtime);
2161 void seg_info_from_raw_sit(struct f2fs_sb_info *sbi, struct seg_entry *se,
2162 struct f2fs_sit_entry *raw_sit)
2164 __seg_info_from_raw_sit(se, raw_sit);
2166 if (!need_fsync_data_record(sbi))
2168 se->ckpt_valid_blocks = se->valid_blocks;
2169 memcpy(se->ckpt_valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2170 se->ckpt_type = se->type;
2173 struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
2176 struct sit_info *sit_i = SIT_I(sbi);
2177 return &sit_i->sentries[segno];
2180 unsigned short get_seg_vblocks(struct f2fs_sb_info *sbi, struct seg_entry *se)
2182 if (!need_fsync_data_record(sbi))
2183 return se->valid_blocks;
2185 return se->ckpt_valid_blocks;
2188 unsigned char *get_seg_bitmap(struct f2fs_sb_info *sbi, struct seg_entry *se)
2190 if (!need_fsync_data_record(sbi))
2191 return se->cur_valid_map;
2193 return se->ckpt_valid_map;
2196 unsigned char get_seg_type(struct f2fs_sb_info *sbi, struct seg_entry *se)
2198 if (!need_fsync_data_record(sbi))
2201 return se->ckpt_type;
2204 struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
2205 unsigned int segno, int *ret_type)
2207 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2208 struct f2fs_summary_block *sum_blk;
2209 struct curseg_info *curseg;
2213 *ret_type= SEG_TYPE_MAX;
2215 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
2216 for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
2217 if (segno == get_cp(cur_node_segno[type])) {
2218 curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
2219 if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
2220 ASSERT_MSG("segno [0x%x] indicates a data "
2221 "segment, but should be node",
2223 *ret_type = -SEG_TYPE_CUR_NODE;
2225 *ret_type = SEG_TYPE_CUR_NODE;
2227 return curseg->sum_blk;
2231 for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
2232 if (segno == get_cp(cur_data_segno[type])) {
2233 curseg = CURSEG_I(sbi, type);
2234 if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
2235 ASSERT_MSG("segno [0x%x] indicates a node "
2236 "segment, but should be data",
2238 *ret_type = -SEG_TYPE_CUR_DATA;
2240 *ret_type = SEG_TYPE_CUR_DATA;
2242 return curseg->sum_blk;
2246 sum_blk = calloc(BLOCK_SZ, 1);
2249 ret = dev_read_block(sum_blk, ssa_blk);
2252 if (IS_SUM_NODE_SEG(sum_blk->footer))
2253 *ret_type = SEG_TYPE_NODE;
2254 else if (IS_SUM_DATA_SEG(sum_blk->footer))
2255 *ret_type = SEG_TYPE_DATA;
2260 int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
2261 struct f2fs_summary *sum_entry)
2263 struct f2fs_summary_block *sum_blk;
2267 segno = GET_SEGNO(sbi, blk_addr);
2268 offset = OFFSET_IN_SEG(sbi, blk_addr);
2270 sum_blk = get_sum_block(sbi, segno, &type);
2271 memcpy(sum_entry, &(sum_blk->entries[offset]),
2272 sizeof(struct f2fs_summary));
2273 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
2274 type == SEG_TYPE_MAX)
2279 static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
2280 struct f2fs_nat_entry *raw_nat)
2282 struct f2fs_nat_block *nat_block;
2287 if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
2290 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2293 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2294 block_addr = current_nat_addr(sbi, nid, NULL);
2296 ret = dev_read_block(nat_block, block_addr);
2299 memcpy(raw_nat, &nat_block->entries[entry_off],
2300 sizeof(struct f2fs_nat_entry));
2304 void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
2305 u16 ofs_in_node, block_t newaddr)
2307 struct f2fs_node *node_blk = NULL;
2308 struct node_info ni;
2309 block_t oldaddr, startaddr, endaddr;
2312 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
2315 get_node_info(sbi, nid, &ni);
2317 /* read node_block */
2318 ret = dev_read_block(node_blk, ni.blk_addr);
2321 /* check its block address */
2322 if (node_blk->footer.nid == node_blk->footer.ino) {
2323 int ofs = get_extra_isize(node_blk);
2325 oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs + ofs_in_node]);
2326 node_blk->i.i_addr[ofs + ofs_in_node] = cpu_to_le32(newaddr);
2327 ret = write_inode(node_blk, ni.blk_addr);
2330 oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
2331 node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
2332 ret = dev_write_block(node_blk, ni.blk_addr);
2336 /* check extent cache entry */
2337 if (node_blk->footer.nid != node_blk->footer.ino) {
2338 get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
2340 /* read inode block */
2341 ret = dev_read_block(node_blk, ni.blk_addr);
2345 startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
2346 endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
2347 if (oldaddr >= startaddr && oldaddr < endaddr) {
2348 node_blk->i.i_ext.len = 0;
2350 /* update inode block */
2351 ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
2356 void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
2357 nid_t nid, block_t newaddr)
2359 struct f2fs_nat_block *nat_block;
2364 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2367 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2368 block_addr = current_nat_addr(sbi, nid, NULL);
2370 ret = dev_read_block(nat_block, block_addr);
2374 nat_block->entries[entry_off].ino = cpu_to_le32(ino);
2375 nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
2377 F2FS_FSCK(sbi)->entries[nid] = nat_block->entries[entry_off];
2379 ret = dev_write_block(nat_block, block_addr);
2384 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
2386 struct f2fs_nat_entry raw_nat;
2389 if (c.func == FSCK && F2FS_FSCK(sbi)->nr_nat_entries) {
2390 node_info_from_raw_nat(ni, &(F2FS_FSCK(sbi)->entries[nid]));
2393 /* nat entry is not cached, read it */
2396 get_nat_entry(sbi, nid, &raw_nat);
2397 node_info_from_raw_nat(ni, &raw_nat);
2400 static int build_sit_entries(struct f2fs_sb_info *sbi)
2402 struct sit_info *sit_i = SIT_I(sbi);
2403 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2404 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2405 struct f2fs_sit_block *sit_blk;
2406 struct seg_entry *se;
2407 struct f2fs_sit_entry sit;
2408 int sit_blk_cnt = SIT_BLK_CNT(sbi);
2409 unsigned int i, segno, end;
2410 unsigned int readed, start_blk = 0;
2412 sit_blk = calloc(BLOCK_SZ, 1);
2414 MSG(1, "\tError: Calloc failed for build_sit_entries!\n");
2419 readed = f2fs_ra_meta_pages(sbi, start_blk, MAX_RA_BLOCKS,
2422 segno = start_blk * sit_i->sents_per_block;
2423 end = (start_blk + readed) * sit_i->sents_per_block;
2425 for (; segno < end && segno < MAIN_SEGS(sbi); segno++) {
2426 se = &sit_i->sentries[segno];
2428 get_current_sit_page(sbi, segno, sit_blk);
2429 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2431 check_block_count(sbi, segno, &sit);
2432 seg_info_from_raw_sit(sbi, se, &sit);
2434 start_blk += readed;
2435 } while (start_blk < sit_blk_cnt);
2440 if (sits_in_cursum(journal) > SIT_JOURNAL_ENTRIES) {
2441 MSG(0, "\tError: build_sit_entries truncate n_sits(%u) to "
2442 "SIT_JOURNAL_ENTRIES(%zu)\n",
2443 sits_in_cursum(journal), SIT_JOURNAL_ENTRIES);
2444 journal->n_sits = cpu_to_le16(SIT_JOURNAL_ENTRIES);
2448 for (i = 0; i < sits_in_cursum(journal); i++) {
2449 segno = le32_to_cpu(segno_in_journal(journal, i));
2451 if (segno >= MAIN_SEGS(sbi)) {
2452 MSG(0, "\tError: build_sit_entries: segno(%u) is invalid!!!\n", segno);
2453 journal->n_sits = cpu_to_le16(i);
2458 se = &sit_i->sentries[segno];
2459 sit = sit_in_journal(journal, i);
2461 check_block_count(sbi, segno, &sit);
2462 seg_info_from_raw_sit(sbi, se, &sit);
2467 static int early_build_segment_manager(struct f2fs_sb_info *sbi)
2469 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2470 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2471 struct f2fs_sm_info *sm_info;
2473 sm_info = malloc(sizeof(struct f2fs_sm_info));
2475 MSG(1, "\tError: Malloc failed for build_segment_manager!\n");
2480 sbi->sm_info = sm_info;
2481 sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
2482 sm_info->main_blkaddr = get_sb(main_blkaddr);
2483 sm_info->segment_count = get_sb(segment_count);
2484 sm_info->reserved_segments = get_cp(rsvd_segment_count);
2485 sm_info->ovp_segments = get_cp(overprov_segment_count);
2486 sm_info->main_segments = get_sb(segment_count_main);
2487 sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
2489 if (build_sit_info(sbi) || build_curseg(sbi)) {
2497 static int late_build_segment_manager(struct f2fs_sb_info *sbi)
2499 if (sbi->seg_manager_done)
2500 return 1; /* this function was already called */
2502 sbi->seg_manager_done = true;
2503 if (build_sit_entries(sbi)) {
2504 free (sbi->sm_info);
2511 void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
2513 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2514 struct f2fs_sm_info *sm_i = SM_I(sbi);
2515 unsigned int segno = 0;
2517 u32 sum_vblocks = 0;
2519 struct seg_entry *se;
2521 fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
2522 fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
2523 ASSERT(fsck->sit_area_bitmap);
2524 ptr = fsck->sit_area_bitmap;
2526 ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
2528 for (segno = 0; segno < MAIN_SEGS(sbi); segno++) {
2529 se = get_seg_entry(sbi, segno);
2531 memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2532 ptr += SIT_VBLOCK_MAP_SIZE;
2534 if (se->valid_blocks == 0x0 && is_usable_seg(sbi, segno)) {
2535 if (le32_to_cpu(sbi->ckpt->cur_node_segno[0]) == segno ||
2536 le32_to_cpu(sbi->ckpt->cur_data_segno[0]) == segno ||
2537 le32_to_cpu(sbi->ckpt->cur_node_segno[1]) == segno ||
2538 le32_to_cpu(sbi->ckpt->cur_data_segno[1]) == segno ||
2539 le32_to_cpu(sbi->ckpt->cur_node_segno[2]) == segno ||
2540 le32_to_cpu(sbi->ckpt->cur_data_segno[2]) == segno) {
2546 sum_vblocks += se->valid_blocks;
2549 fsck->chk.sit_valid_blocks = sum_vblocks;
2550 fsck->chk.sit_free_segs = free_segs;
2552 DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
2553 sum_vblocks, sum_vblocks,
2554 free_segs, free_segs);
2557 void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
2559 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2560 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2561 struct sit_info *sit_i = SIT_I(sbi);
2562 struct f2fs_sit_block *sit_blk;
2563 unsigned int segno = 0;
2564 struct f2fs_summary_block *sum = curseg->sum_blk;
2567 sit_blk = calloc(BLOCK_SZ, 1);
2569 /* remove sit journal */
2570 sum->journal.n_sits = 0;
2572 ptr = fsck->main_area_bitmap;
2574 for (segno = 0; segno < MAIN_SEGS(sbi); segno++) {
2575 struct f2fs_sit_entry *sit;
2576 struct seg_entry *se;
2577 u16 valid_blocks = 0;
2581 get_current_sit_page(sbi, segno, sit_blk);
2582 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2583 memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
2585 /* update valid block count */
2586 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
2587 valid_blocks += get_bits_in_byte(sit->valid_map[i]);
2589 se = get_seg_entry(sbi, segno);
2590 memcpy(se->cur_valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
2591 se->valid_blocks = valid_blocks;
2593 if (type >= NO_CHECK_TYPE) {
2594 ASSERT_MSG("Invalid type and valid blocks=%x,%x",
2595 segno, valid_blocks);
2598 sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
2600 rewrite_current_sit_page(sbi, segno, sit_blk);
2602 ptr += SIT_VBLOCK_MAP_SIZE;
2608 static int flush_sit_journal_entries(struct f2fs_sb_info *sbi)
2610 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2611 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2612 struct sit_info *sit_i = SIT_I(sbi);
2613 struct f2fs_sit_block *sit_blk;
2617 sit_blk = calloc(BLOCK_SZ, 1);
2619 for (i = 0; i < sits_in_cursum(journal); i++) {
2620 struct f2fs_sit_entry *sit;
2621 struct seg_entry *se;
2623 segno = segno_in_journal(journal, i);
2624 se = get_seg_entry(sbi, segno);
2626 get_current_sit_page(sbi, segno, sit_blk);
2627 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2629 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2630 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2632 sit->mtime = cpu_to_le64(se->mtime);
2634 rewrite_current_sit_page(sbi, segno, sit_blk);
2638 journal->n_sits = 0;
2642 static int flush_nat_journal_entries(struct f2fs_sb_info *sbi)
2644 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2645 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2646 struct f2fs_nat_block *nat_block;
2653 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2656 if (i >= nats_in_cursum(journal)) {
2658 journal->n_nats = 0;
2662 nid = le32_to_cpu(nid_in_journal(journal, i));
2664 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2665 block_addr = current_nat_addr(sbi, nid, NULL);
2667 ret = dev_read_block(nat_block, block_addr);
2670 memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
2671 sizeof(struct f2fs_nat_entry));
2673 ret = dev_write_block(nat_block, block_addr);
2679 void flush_journal_entries(struct f2fs_sb_info *sbi)
2681 int n_nats = flush_nat_journal_entries(sbi);
2682 int n_sits = flush_sit_journal_entries(sbi);
2684 if (n_nats || n_sits)
2685 write_checkpoints(sbi);
2688 void flush_sit_entries(struct f2fs_sb_info *sbi)
2690 struct sit_info *sit_i = SIT_I(sbi);
2691 struct f2fs_sit_block *sit_blk;
2692 unsigned int segno = 0;
2694 sit_blk = calloc(BLOCK_SZ, 1);
2696 /* update free segments */
2697 for (segno = 0; segno < MAIN_SEGS(sbi); segno++) {
2698 struct f2fs_sit_entry *sit;
2699 struct seg_entry *se;
2701 se = get_seg_entry(sbi, segno);
2706 get_current_sit_page(sbi, segno, sit_blk);
2707 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2708 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2709 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2711 rewrite_current_sit_page(sbi, segno, sit_blk);
2717 int relocate_curseg_offset(struct f2fs_sb_info *sbi, int type)
2719 struct curseg_info *curseg = CURSEG_I(sbi, type);
2720 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
2723 if (c.zoned_model == F2FS_ZONED_HM)
2726 for (i = 0; i < sbi->blocks_per_seg; i++) {
2727 if (!f2fs_test_bit(i, (const char *)se->cur_valid_map))
2731 if (i == sbi->blocks_per_seg)
2734 DBG(1, "Update curseg[%d].next_blkoff %u -> %u, alloc_type %s -> SSR\n",
2735 type, curseg->next_blkoff, i,
2736 curseg->alloc_type == LFS ? "LFS" : "SSR");
2738 curseg->next_blkoff = i;
2739 curseg->alloc_type = SSR;
2744 void set_section_type(struct f2fs_sb_info *sbi, unsigned int segno, int type)
2748 if (sbi->segs_per_sec == 1)
2751 for (i = 0; i < sbi->segs_per_sec; i++) {
2752 struct seg_entry *se = get_seg_entry(sbi, segno + i);
2758 #ifdef HAVE_LINUX_BLKZONED_H
2760 static bool write_pointer_at_zone_start(struct f2fs_sb_info *sbi,
2761 unsigned int zone_segno)
2764 struct blk_zone blkz;
2765 block_t block = START_BLOCK(sbi, zone_segno);
2766 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
2769 if (c.zoned_model != F2FS_ZONED_HM)
2772 for (j = 0; j < MAX_DEVICES; j++) {
2773 if (!c.devices[j].path)
2775 if (c.devices[j].start_blkaddr <= block &&
2776 block <= c.devices[j].end_blkaddr)
2780 if (j >= MAX_DEVICES)
2783 sector = (block - c.devices[j].start_blkaddr) << log_sectors_per_block;
2784 ret = f2fs_report_zone(j, sector, &blkz);
2788 if (blk_zone_type(&blkz) != BLK_ZONE_TYPE_SEQWRITE_REQ)
2791 return blk_zone_sector(&blkz) == blk_zone_wp_sector(&blkz);
2796 static bool write_pointer_at_zone_start(struct f2fs_sb_info *UNUSED(sbi),
2797 unsigned int UNUSED(zone_segno))
2804 int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left,
2805 int want_type, bool new_sec)
2807 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2808 struct seg_entry *se;
2812 u64 end_blkaddr = (get_sb(segment_count_main) <<
2813 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
2817 if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
2820 while (*to >= SM_I(sbi)->main_blkaddr && *to < end_blkaddr) {
2821 unsigned short vblocks;
2822 unsigned char *bitmap;
2825 segno = GET_SEGNO(sbi, *to);
2826 offset = OFFSET_IN_SEG(sbi, *to);
2828 se = get_seg_entry(sbi, segno);
2830 vblocks = get_seg_vblocks(sbi, se);
2831 bitmap = get_seg_bitmap(sbi, se);
2832 type = get_seg_type(sbi, se);
2834 if (vblocks == sbi->blocks_per_seg) {
2836 *to = left ? START_BLOCK(sbi, segno) - 1:
2837 START_BLOCK(sbi, segno + 1);
2840 if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
2841 IS_CUR_SEGNO(sbi, segno))
2843 if (vblocks == 0 && not_enough)
2846 if (vblocks == 0 && !(segno % sbi->segs_per_sec)) {
2847 struct seg_entry *se2;
2850 for (i = 1; i < sbi->segs_per_sec; i++) {
2851 se2 = get_seg_entry(sbi, segno + i);
2852 if (get_seg_vblocks(sbi, se2))
2856 if (i == sbi->segs_per_sec &&
2857 write_pointer_at_zone_start(sbi, segno)) {
2858 set_section_type(sbi, segno, want_type);
2863 if (type == want_type && !new_sec &&
2864 !f2fs_test_bit(offset, (const char *)bitmap))
2867 *to = left ? *to - 1: *to + 1;
2872 static void move_one_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left,
2875 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2876 struct curseg_info *curseg = CURSEG_I(sbi, i);
2877 struct f2fs_summary_block buf;
2882 if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))) {
2883 if (i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
2886 if (i == CURSEG_HOT_DATA) {
2888 from = SM_I(sbi)->main_blkaddr;
2891 from = __end_block_addr(sbi);
2896 /* update original SSA too */
2897 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2898 ret = dev_write_block(curseg->sum_blk, ssa_blk);
2902 ret = find_next_free_block(sbi, &to, left, i,
2903 c.zoned_model == F2FS_ZONED_HM);
2906 old_segno = curseg->segno;
2907 curseg->segno = GET_SEGNO(sbi, to);
2908 curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
2909 curseg->alloc_type = c.zoned_model == F2FS_ZONED_HM ? LFS : SSR;
2911 /* update new segno */
2912 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2913 ret = dev_read_block(&buf, ssa_blk);
2916 memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
2918 /* update se->types */
2919 reset_curseg(sbi, i);
2921 FIX_MSG("Move curseg[%d] %x -> %x after %"PRIx64"\n",
2922 i, old_segno, curseg->segno, from);
2925 void move_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left)
2929 /* update summary blocks having nullified journal entries */
2930 for (i = 0; i < NO_CHECK_TYPE; i++)
2931 move_one_curseg_info(sbi, from, left, i);
2934 void update_curseg_info(struct f2fs_sb_info *sbi, int type)
2936 if (!relocate_curseg_offset(sbi, type))
2938 move_one_curseg_info(sbi, SM_I(sbi)->main_blkaddr, 0, type);
2941 void zero_journal_entries(struct f2fs_sb_info *sbi)
2945 for (i = 0; i < NO_CHECK_TYPE; i++)
2946 CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
2949 void write_curseg_info(struct f2fs_sb_info *sbi)
2951 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2954 for (i = 0; i < NO_CHECK_TYPE; i++) {
2955 cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
2956 if (i < CURSEG_HOT_NODE) {
2957 set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
2958 set_cp(cur_data_blkoff[i],
2959 CURSEG_I(sbi, i)->next_blkoff);
2961 int n = i - CURSEG_HOT_NODE;
2963 set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
2964 set_cp(cur_node_blkoff[n],
2965 CURSEG_I(sbi, i)->next_blkoff);
2970 int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
2971 struct f2fs_nat_entry *raw_nat)
2973 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2974 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2977 for (i = 0; i < nats_in_cursum(journal); i++) {
2978 if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
2979 memcpy(raw_nat, &nat_in_journal(journal, i),
2980 sizeof(struct f2fs_nat_entry));
2981 DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
2988 void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
2990 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2991 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2992 struct f2fs_nat_block *nat_block;
2998 /* check in journal */
2999 for (i = 0; i < nats_in_cursum(journal); i++) {
3000 if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
3001 memset(&nat_in_journal(journal, i), 0,
3002 sizeof(struct f2fs_nat_entry));
3003 FIX_MSG("Remove nid [0x%x] in nat journal", nid);
3007 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
3010 entry_off = nid % NAT_ENTRY_PER_BLOCK;
3011 block_addr = current_nat_addr(sbi, nid, NULL);
3013 ret = dev_read_block(nat_block, block_addr);
3016 if (nid == F2FS_NODE_INO(sbi) || nid == F2FS_META_INO(sbi)) {
3017 FIX_MSG("nid [0x%x] block_addr= 0x%x -> 0x1", nid,
3018 le32_to_cpu(nat_block->entries[entry_off].block_addr));
3019 nat_block->entries[entry_off].block_addr = cpu_to_le32(0x1);
3021 memset(&nat_block->entries[entry_off], 0,
3022 sizeof(struct f2fs_nat_entry));
3023 FIX_MSG("Remove nid [0x%x] in NAT", nid);
3026 ret = dev_write_block(nat_block, block_addr);
3031 void duplicate_checkpoint(struct f2fs_sb_info *sbi)
3033 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3034 unsigned long long dst, src;
3036 unsigned int seg_size = 1 << get_sb(log_blocks_per_seg);
3039 if (sbi->cp_backuped)
3042 buf = malloc(F2FS_BLKSIZE * seg_size);
3045 if (sbi->cur_cp == 1) {
3046 src = get_sb(cp_blkaddr);
3047 dst = src + seg_size;
3049 dst = get_sb(cp_blkaddr);
3050 src = dst + seg_size;
3053 ret = dev_read(buf, src << F2FS_BLKSIZE_BITS,
3054 seg_size << F2FS_BLKSIZE_BITS);
3057 ret = dev_write(buf, dst << F2FS_BLKSIZE_BITS,
3058 seg_size << F2FS_BLKSIZE_BITS);
3063 ret = f2fs_fsync_device();
3066 sbi->cp_backuped = 1;
3068 MSG(0, "Info: Duplicate valid checkpoint to mirror position "
3069 "%llu -> %llu\n", src, dst);
3072 void write_checkpoint(struct f2fs_sb_info *sbi)
3074 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
3075 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3076 block_t orphan_blks = 0;
3077 unsigned long long cp_blk_no;
3078 u32 flags = CP_UMOUNT_FLAG;
3082 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
3083 orphan_blks = __start_sum_addr(sbi) - 1;
3084 flags |= CP_ORPHAN_PRESENT_FLAG;
3086 if (is_set_ckpt_flags(cp, CP_TRIMMED_FLAG))
3087 flags |= CP_TRIMMED_FLAG;
3088 if (is_set_ckpt_flags(cp, CP_DISABLED_FLAG))
3089 flags |= CP_DISABLED_FLAG;
3090 if (is_set_ckpt_flags(cp, CP_LARGE_NAT_BITMAP_FLAG)) {
3091 flags |= CP_LARGE_NAT_BITMAP_FLAG;
3092 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
3094 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
3097 set_cp(free_segment_count, get_free_segments(sbi));
3098 if (c.func == FSCK) {
3099 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3101 set_cp(valid_block_count, fsck->chk.valid_blk_cnt);
3102 set_cp(valid_node_count, fsck->chk.valid_node_cnt);
3103 set_cp(valid_inode_count, fsck->chk.valid_inode_cnt);
3105 set_cp(valid_block_count, sbi->total_valid_block_count);
3106 set_cp(valid_node_count, sbi->total_valid_node_count);
3107 set_cp(valid_inode_count, sbi->total_valid_inode_count);
3109 set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
3111 flags = update_nat_bits_flags(sb, cp, flags);
3112 set_cp(ckpt_flags, flags);
3114 crc = f2fs_checkpoint_chksum(cp);
3115 *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
3118 cp_blk_no = get_sb(cp_blkaddr);
3119 if (sbi->cur_cp == 2)
3120 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
3122 /* write the first cp */
3123 ret = dev_write_block(cp, cp_blk_no++);
3127 cp_blk_no += get_sb(cp_payload);
3128 /* skip orphan blocks */
3129 cp_blk_no += orphan_blks;
3131 /* update summary blocks having nullified journal entries */
3132 for (i = 0; i < NO_CHECK_TYPE; i++) {
3133 struct curseg_info *curseg = CURSEG_I(sbi, i);
3136 ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
3139 if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))) {
3140 /* update original SSA too */
3141 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
3142 ret = dev_write_block(curseg->sum_blk, ssa_blk);
3147 /* Write nat bits */
3148 if (flags & CP_NAT_BITS_FLAG)
3149 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
3151 /* in case of sudden power off */
3152 ret = f2fs_fsync_device();
3155 /* write the last cp */
3156 ret = dev_write_block(cp, cp_blk_no++);
3159 ret = f2fs_fsync_device();
3163 void write_checkpoints(struct f2fs_sb_info *sbi)
3165 /* copy valid checkpoint to its mirror position */
3166 duplicate_checkpoint(sbi);
3168 /* repair checkpoint at CP #0 position */
3170 write_checkpoint(sbi);
3173 void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
3175 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3176 struct f2fs_journal *journal = &curseg->sum_blk->journal;
3177 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3178 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3179 struct f2fs_nm_info *nm_i = NM_I(sbi);
3180 struct f2fs_nat_block *nat_block;
3181 struct node_info ni;
3182 u32 nid, nr_nat_blks;
3189 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
3192 /* Alloc & build nat entry bitmap */
3193 nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
3194 sbi->log_blocks_per_seg;
3196 fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
3197 fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
3198 fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
3199 ASSERT(fsck->nat_area_bitmap);
3201 fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
3202 fsck->nr_nat_entries);
3203 ASSERT(fsck->entries);
3205 for (block_off = 0; block_off < nr_nat_blks; block_off++) {
3207 seg_off = block_off >> sbi->log_blocks_per_seg;
3208 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
3209 (seg_off << sbi->log_blocks_per_seg << 1) +
3210 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
3212 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
3213 block_addr += sbi->blocks_per_seg;
3215 ret = dev_read_block(nat_block, block_addr);
3218 nid = block_off * NAT_ENTRY_PER_BLOCK;
3219 for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
3222 if ((nid + i) == F2FS_NODE_INO(sbi) ||
3223 (nid + i) == F2FS_META_INO(sbi)) {
3225 * block_addr of node/meta inode should be 0x1.
3226 * Set this bit, and fsck_verify will fix it.
3228 if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
3229 ASSERT_MSG("\tError: ino[0x%x] block_addr[0x%x] is invalid\n",
3230 nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
3231 f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
3236 node_info_from_raw_nat(&ni, &nat_block->entries[i]);
3237 if (ni.blk_addr == 0x0)
3239 if (ni.ino == 0x0) {
3240 ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
3241 " is invalid\n", ni.ino, ni.blk_addr);
3243 if (ni.ino == (nid + i)) {
3244 fsck->nat_valid_inode_cnt++;
3245 DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
3249 * nat entry [0] must be null. If
3250 * it is corrupted, set its bit in
3251 * nat_area_bitmap, fsck_verify will
3254 ASSERT_MSG("Invalid nat entry[0]: "
3255 "blk_addr[0x%x]\n", ni.blk_addr);
3256 fsck->chk.valid_nat_entry_cnt--;
3259 DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
3260 nid + i, ni.blk_addr, ni.ino);
3261 f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
3262 fsck->chk.valid_nat_entry_cnt++;
3264 fsck->entries[nid + i] = nat_block->entries[i];
3268 /* Traverse nat journal, update the corresponding entries */
3269 for (i = 0; i < nats_in_cursum(journal); i++) {
3270 struct f2fs_nat_entry raw_nat;
3271 nid = le32_to_cpu(nid_in_journal(journal, i));
3274 DBG(3, "==> Found nid [0x%x] in nat cache, update it\n", nid);
3276 /* Clear the original bit and count */
3277 if (fsck->entries[nid].block_addr != 0x0) {
3278 fsck->chk.valid_nat_entry_cnt--;
3279 f2fs_clear_bit(nid, fsck->nat_area_bitmap);
3280 if (fsck->entries[nid].ino == nid)
3281 fsck->nat_valid_inode_cnt--;
3284 /* Use nat entries in journal */
3285 memcpy(&raw_nat, &nat_in_journal(journal, i),
3286 sizeof(struct f2fs_nat_entry));
3287 node_info_from_raw_nat(&ni, &raw_nat);
3288 if (ni.blk_addr != 0x0) {
3290 ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
3291 " is invalid\n", ni.ino, ni.blk_addr);
3292 if (ni.ino == nid) {
3293 fsck->nat_valid_inode_cnt++;
3294 DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
3296 f2fs_set_bit(nid, fsck->nat_area_bitmap);
3297 fsck->chk.valid_nat_entry_cnt++;
3298 DBG(3, "nid[0x%x] in nat cache\n", nid);
3300 fsck->entries[nid] = raw_nat;
3304 DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
3305 fsck->chk.valid_nat_entry_cnt,
3306 fsck->chk.valid_nat_entry_cnt);
3309 static int check_sector_size(struct f2fs_super_block *sb)
3311 uint32_t log_sectorsize, log_sectors_per_block;
3313 log_sectorsize = log_base_2(c.sector_size);
3314 log_sectors_per_block = log_base_2(c.sectors_per_blk);
3316 if (log_sectorsize == get_sb(log_sectorsize) &&
3317 log_sectors_per_block == get_sb(log_sectors_per_block))
3320 set_sb(log_sectorsize, log_sectorsize);
3321 set_sb(log_sectors_per_block, log_sectors_per_block);
3323 update_superblock(sb, SB_MASK_ALL);
3327 static int tune_sb_features(struct f2fs_sb_info *sbi)
3330 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3332 if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) &&
3333 c.feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
3334 sb->feature |= cpu_to_le32(F2FS_FEATURE_ENCRYPT);
3335 MSG(0, "Info: Set Encryption feature\n");
3338 if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) &&
3339 c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
3340 if (!c.s_encoding) {
3341 ERR_MSG("ERROR: Must specify encoding to enable casefolding.\n");
3344 sb->feature |= cpu_to_le32(F2FS_FEATURE_CASEFOLD);
3345 MSG(0, "Info: Set Casefold feature\n");
3348 /* TODO: quota needs to allocate inode numbers */
3350 c.feature = sb->feature;
3354 update_superblock(sb, SB_MASK_ALL);
3358 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
3361 struct fsync_inode_entry *entry;
3363 list_for_each_entry(entry, head, list)
3364 if (entry->ino == ino)
3370 static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
3373 struct fsync_inode_entry *entry;
3375 entry = calloc(sizeof(struct fsync_inode_entry), 1);
3379 list_add_tail(&entry->list, head);
3383 static void del_fsync_inode(struct fsync_inode_entry *entry)
3385 list_del(&entry->list);
3389 static void destroy_fsync_dnodes(struct list_head *head)
3391 struct fsync_inode_entry *entry, *tmp;
3393 list_for_each_entry_safe(entry, tmp, head, list)
3394 del_fsync_inode(entry);
3397 static int find_fsync_inode(struct f2fs_sb_info *sbi, struct list_head *head)
3399 struct curseg_info *curseg;
3400 struct f2fs_node *node_blk;
3402 unsigned int loop_cnt = 0;
3403 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
3404 sbi->total_valid_block_count;
3407 /* get node pages in the current segment */
3408 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3409 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3411 node_blk = calloc(F2FS_BLKSIZE, 1);
3415 struct fsync_inode_entry *entry;
3417 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3420 err = dev_read_block(node_blk, blkaddr);
3424 if (!is_recoverable_dnode(sbi, node_blk))
3427 if (!is_fsync_dnode(node_blk))
3430 entry = get_fsync_inode(head, ino_of_node(node_blk));
3432 entry = add_fsync_inode(head, ino_of_node(node_blk));
3438 entry->blkaddr = blkaddr;
3440 if (IS_INODE(node_blk) && is_dent_dnode(node_blk))
3441 entry->last_dentry = blkaddr;
3443 /* sanity check in order to detect looped node chain */
3444 if (++loop_cnt >= free_blocks ||
3445 blkaddr == next_blkaddr_of_node(node_blk)) {
3446 MSG(0, "\tdetect looped node chain, blkaddr:%u, next:%u\n",
3448 next_blkaddr_of_node(node_blk));
3453 blkaddr = next_blkaddr_of_node(node_blk);
3460 static int do_record_fsync_data(struct f2fs_sb_info *sbi,
3461 struct f2fs_node *node_blk,
3464 unsigned int segno, offset;
3465 struct seg_entry *se;
3466 unsigned int ofs_in_node = 0;
3467 unsigned int start, end;
3468 int err = 0, recorded = 0;
3470 segno = GET_SEGNO(sbi, blkaddr);
3471 se = get_seg_entry(sbi, segno);
3472 offset = OFFSET_IN_SEG(sbi, blkaddr);
3474 if (f2fs_test_bit(offset, (char *)se->cur_valid_map)) {
3478 if (f2fs_test_bit(offset, (char *)se->ckpt_valid_map)) {
3483 if (!se->ckpt_valid_blocks)
3484 se->ckpt_type = CURSEG_WARM_NODE;
3486 se->ckpt_valid_blocks++;
3487 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
3489 MSG(1, "do_record_fsync_data: [node] ino = %u, nid = %u, blkaddr = %u\n",
3490 ino_of_node(node_blk), ofs_of_node(node_blk), blkaddr);
3493 if (IS_INODE(node_blk) && (node_blk->i.i_inline & F2FS_INLINE_DATA))
3496 if (ofs_of_node(node_blk) == XATTR_NODE_OFFSET)
3499 /* step 3: recover data indices */
3500 start = start_bidx_of_node(ofs_of_node(node_blk), node_blk);
3501 end = start + ADDRS_PER_PAGE(sbi, node_blk, NULL);
3503 for (; start < end; start++, ofs_in_node++) {
3504 blkaddr = datablock_addr(node_blk, ofs_in_node);
3506 if (!is_valid_data_blkaddr(blkaddr))
3509 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) {
3514 segno = GET_SEGNO(sbi, blkaddr);
3515 se = get_seg_entry(sbi, segno);
3516 offset = OFFSET_IN_SEG(sbi, blkaddr);
3518 if (f2fs_test_bit(offset, (char *)se->cur_valid_map))
3520 if (f2fs_test_bit(offset, (char *)se->ckpt_valid_map))
3523 if (!se->ckpt_valid_blocks)
3524 se->ckpt_type = CURSEG_WARM_DATA;
3526 se->ckpt_valid_blocks++;
3527 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
3529 MSG(1, "do_record_fsync_data: [data] ino = %u, nid = %u, blkaddr = %u\n",
3530 ino_of_node(node_blk), ofs_of_node(node_blk), blkaddr);
3535 MSG(1, "recover_data: ino = %u, nid = %u, recorded = %d, err = %d\n",
3536 ino_of_node(node_blk), ofs_of_node(node_blk),
3541 static int traverse_dnodes(struct f2fs_sb_info *sbi,
3542 struct list_head *inode_list)
3544 struct curseg_info *curseg;
3545 struct f2fs_node *node_blk;
3549 /* get node pages in the current segment */
3550 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3551 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3553 node_blk = calloc(F2FS_BLKSIZE, 1);
3557 struct fsync_inode_entry *entry;
3559 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3562 err = dev_read_block(node_blk, blkaddr);
3566 if (!is_recoverable_dnode(sbi, node_blk))
3569 entry = get_fsync_inode(inode_list,
3570 ino_of_node(node_blk));
3574 err = do_record_fsync_data(sbi, node_blk, blkaddr);
3578 if (entry->blkaddr == blkaddr)
3579 del_fsync_inode(entry);
3581 blkaddr = next_blkaddr_of_node(node_blk);
3588 static int record_fsync_data(struct f2fs_sb_info *sbi)
3590 struct list_head inode_list = LIST_HEAD_INIT(inode_list);
3593 if (!need_fsync_data_record(sbi))
3596 ret = find_fsync_inode(sbi, &inode_list);
3600 ret = late_build_segment_manager(sbi);
3602 ERR_MSG("late_build_segment_manager failed\n");
3606 ret = traverse_dnodes(sbi, &inode_list);
3608 destroy_fsync_dnodes(&inode_list);
3612 int f2fs_do_mount(struct f2fs_sb_info *sbi)
3614 struct f2fs_checkpoint *cp = NULL;
3615 struct f2fs_super_block *sb = NULL;
3618 sbi->active_logs = NR_CURSEG_TYPE;
3619 ret = validate_super_block(sbi, SB0_ADDR);
3621 ret = validate_super_block(sbi, SB1_ADDR);
3625 sb = F2FS_RAW_SUPER(sbi);
3627 ret = check_sector_size(sb);
3631 print_raw_sb_info(sb);
3635 ret = get_valid_checkpoint(sbi);
3637 ERR_MSG("Can't find valid checkpoint\n");
3643 if (sanity_check_ckpt(sbi)) {
3644 ERR_MSG("Checkpoint is polluted\n");
3647 cp = F2FS_CKPT(sbi);
3649 if (c.func != FSCK && c.func != DUMP &&
3650 !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
3651 ERR_MSG("Mount unclean image to replay log first\n");
3655 if (c.func == FSCK) {
3656 #if defined(__APPLE__)
3657 if (!c.no_kernel_check &&
3658 memcmp(c.sb_version, c.version, VERSION_NAME_LEN)) {
3661 memcpy(sbi->raw_super->version,
3662 c.version, VERSION_NAME_LEN);
3663 update_superblock(sbi->raw_super, SB_MASK_ALL);
3666 if (!c.no_kernel_check) {
3667 u32 prev_time, cur_time, time_diff;
3668 __le32 *ver_ts_ptr = (__le32 *)(sbi->raw_super->version
3669 + VERSION_NAME_LEN);
3671 cur_time = (u32)get_cp(elapsed_time);
3672 prev_time = le32_to_cpu(*ver_ts_ptr);
3674 MSG(0, "Info: version timestamp cur: %u, prev: %u\n",
3675 cur_time, prev_time);
3676 if (!memcmp(c.sb_version, c.version,
3677 VERSION_NAME_LEN)) {
3678 /* valid prev_time */
3679 if (prev_time != 0 && cur_time > prev_time) {
3680 time_diff = cur_time - prev_time;
3681 if (time_diff < CHECK_PERIOD)
3687 memcpy(sbi->raw_super->version,
3688 c.version, VERSION_NAME_LEN);
3691 *ver_ts_ptr = cpu_to_le32(cur_time);
3692 update_superblock(sbi->raw_super, SB_MASK_ALL);
3697 print_ckpt_info(sbi);
3700 if (get_cp(ckpt_flags) & CP_QUOTA_NEED_FSCK_FLAG)
3706 if (tune_sb_features(sbi))
3709 /* precompute checksum seed for metadata */
3710 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
3711 c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
3713 sbi->total_valid_node_count = get_cp(valid_node_count);
3714 sbi->total_valid_inode_count = get_cp(valid_inode_count);
3715 sbi->user_block_count = get_cp(user_block_count);
3716 sbi->total_valid_block_count = get_cp(valid_block_count);
3717 sbi->last_valid_block_count = sbi->total_valid_block_count;
3718 sbi->alloc_valid_block_count = 0;
3720 if (early_build_segment_manager(sbi)) {
3721 ERR_MSG("early_build_segment_manager failed\n");
3725 if (build_node_manager(sbi)) {
3726 ERR_MSG("build_node_manager failed\n");
3730 if (record_fsync_data(sbi)) {
3731 ERR_MSG("record_fsync_data failed\n");
3735 if (!f2fs_should_proceed(sb, get_cp(ckpt_flags)))
3738 if (late_build_segment_manager(sbi) < 0) {
3739 ERR_MSG("late_build_segment_manager failed\n");
3743 if (f2fs_late_init_nid_bitmap(sbi)) {
3744 ERR_MSG("f2fs_late_init_nid_bitmap failed\n");
3748 /* Check nat_bits */
3749 if (c.func == FSCK && is_set_ckpt_flags(cp, CP_NAT_BITS_FLAG)) {
3750 if (check_nat_bits(sbi, sb, cp) && c.fix_on)
3751 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
3756 void f2fs_do_umount(struct f2fs_sb_info *sbi)
3758 struct sit_info *sit_i = SIT_I(sbi);
3759 struct f2fs_sm_info *sm_i = SM_I(sbi);
3760 struct f2fs_nm_info *nm_i = NM_I(sbi);
3764 if (c.func == SLOAD || c.func == FSCK)
3765 free(nm_i->nid_bitmap);
3766 free(nm_i->nat_bitmap);
3770 free(sit_i->bitmap);
3771 free(sit_i->sit_bitmap);
3772 free(sit_i->sentries);
3773 free(sm_i->sit_info);
3776 for (i = 0; i < NR_CURSEG_TYPE; i++)
3777 free(sm_i->curseg_array[i].sum_blk);
3779 free(sm_i->curseg_array);
3783 free(sbi->raw_super);
3787 int f2fs_sparse_initialize_meta(struct f2fs_sb_info *sbi)
3789 struct f2fs_super_block *sb = sbi->raw_super;
3790 uint32_t sit_seg_count, sit_size;
3791 uint32_t nat_seg_count, nat_size;
3792 uint64_t sit_seg_addr, nat_seg_addr, payload_addr;
3793 uint32_t seg_size = 1 << get_sb(log_blocks_per_seg);
3799 sit_seg_addr = get_sb(sit_blkaddr);
3800 sit_seg_count = get_sb(segment_count_sit);
3801 sit_size = sit_seg_count * seg_size;
3803 DBG(1, "\tSparse: filling sit area at block offset: 0x%08"PRIx64" len: %u\n",
3804 sit_seg_addr, sit_size);
3805 ret = dev_fill(NULL, sit_seg_addr * F2FS_BLKSIZE,
3806 sit_size * F2FS_BLKSIZE);
3808 MSG(1, "\tError: While zeroing out the sit area "
3813 nat_seg_addr = get_sb(nat_blkaddr);
3814 nat_seg_count = get_sb(segment_count_nat);
3815 nat_size = nat_seg_count * seg_size;
3817 DBG(1, "\tSparse: filling nat area at block offset 0x%08"PRIx64" len: %u\n",
3818 nat_seg_addr, nat_size);
3819 ret = dev_fill(NULL, nat_seg_addr * F2FS_BLKSIZE,
3820 nat_size * F2FS_BLKSIZE);
3822 MSG(1, "\tError: While zeroing out the nat area "
3827 payload_addr = get_sb(segment0_blkaddr) + 1;
3829 DBG(1, "\tSparse: filling bitmap area at block offset 0x%08"PRIx64" len: %u\n",
3830 payload_addr, get_sb(cp_payload));
3831 ret = dev_fill(NULL, payload_addr * F2FS_BLKSIZE,
3832 get_sb(cp_payload) * F2FS_BLKSIZE);
3834 MSG(1, "\tError: While zeroing out the nat/sit bitmap area "
3839 payload_addr += seg_size;
3841 DBG(1, "\tSparse: filling bitmap area at block offset 0x%08"PRIx64" len: %u\n",
3842 payload_addr, get_sb(cp_payload));
3843 ret = dev_fill(NULL, payload_addr * F2FS_BLKSIZE,
3844 get_sb(cp_payload) * F2FS_BLKSIZE);
3846 MSG(1, "\tError: While zeroing out the nat/sit bitmap area "
3853 int f2fs_sparse_initialize_meta(struct f2fs_sb_info *sbi) { return 0; }