fsck.f2fs: fix to check validation of i_xattr_nid
[platform/upstream/f2fs-tools.git] / fsck / mount.c
1 /**
2  * mount.c
3  *
4  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include "fsck.h"
12 #include "node.h"
13 #include "xattr.h"
14 #include <locale.h>
15 #include <stdbool.h>
16 #ifdef HAVE_LINUX_POSIX_ACL_H
17 #include <linux/posix_acl.h>
18 #endif
19 #ifdef HAVE_SYS_ACL_H
20 #include <sys/acl.h>
21 #endif
22
23 #ifndef ACL_UNDEFINED_TAG
24 #define ACL_UNDEFINED_TAG       (0x00)
25 #define ACL_USER_OBJ            (0x01)
26 #define ACL_USER                (0x02)
27 #define ACL_GROUP_OBJ           (0x04)
28 #define ACL_GROUP               (0x08)
29 #define ACL_MASK                (0x10)
30 #define ACL_OTHER               (0x20)
31 #endif
32
33 u32 get_free_segments(struct f2fs_sb_info *sbi)
34 {
35         u32 i, free_segs = 0;
36
37         for (i = 0; i < TOTAL_SEGS(sbi); i++) {
38                 struct seg_entry *se = get_seg_entry(sbi, i);
39
40                 if (se->valid_blocks == 0x0 && !IS_CUR_SEGNO(sbi, i))
41                         free_segs++;
42         }
43         return free_segs;
44 }
45
46 void update_free_segments(struct f2fs_sb_info *sbi)
47 {
48         char *progress = "-*|*-";
49         static int i = 0;
50
51         if (c.dbg_lv)
52                 return;
53
54         MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
55         fflush(stdout);
56         i++;
57 }
58
59 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
60 static void print_acl(const u8 *value, int size)
61 {
62         const struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value;
63         const struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
64         const u8 *end = value + size;
65         int i, count;
66
67         if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION)) {
68                 MSG(0, "Invalid ACL version [0x%x : 0x%x]\n",
69                                 le32_to_cpu(hdr->a_version), F2FS_ACL_VERSION);
70                 return;
71         }
72
73         count = f2fs_acl_count(size);
74         if (count <= 0) {
75                 MSG(0, "Invalid ACL value size %d\n", size);
76                 return;
77         }
78
79         for (i = 0; i < count; i++) {
80                 if ((u8 *)entry > end) {
81                         MSG(0, "Invalid ACL entries count %d\n", count);
82                         return;
83                 }
84
85                 switch (le16_to_cpu(entry->e_tag)) {
86                 case ACL_USER_OBJ:
87                 case ACL_GROUP_OBJ:
88                 case ACL_MASK:
89                 case ACL_OTHER:
90                         MSG(0, "tag:0x%x perm:0x%x\n",
91                                         le16_to_cpu(entry->e_tag),
92                                         le16_to_cpu(entry->e_perm));
93                         entry = (struct f2fs_acl_entry *)((char *)entry +
94                                         sizeof(struct f2fs_acl_entry_short));
95                         break;
96                 case ACL_USER:
97                         MSG(0, "tag:0x%x perm:0x%x uid:%u\n",
98                                         le16_to_cpu(entry->e_tag),
99                                         le16_to_cpu(entry->e_perm),
100                                         le32_to_cpu(entry->e_id));
101                         entry = (struct f2fs_acl_entry *)((char *)entry +
102                                         sizeof(struct f2fs_acl_entry));
103                         break;
104                 case ACL_GROUP:
105                         MSG(0, "tag:0x%x perm:0x%x gid:%u\n",
106                                         le16_to_cpu(entry->e_tag),
107                                         le16_to_cpu(entry->e_perm),
108                                         le32_to_cpu(entry->e_id));
109                         entry = (struct f2fs_acl_entry *)((char *)entry +
110                                         sizeof(struct f2fs_acl_entry));
111                         break;
112                 default:
113                         MSG(0, "Unknown ACL tag 0x%x\n",
114                                         le16_to_cpu(entry->e_tag));
115                         return;
116                 }
117         }
118 }
119 #endif /* HAVE_LINUX_POSIX_ACL_H || HAVE_SYS_ACL_H */
120
121 static void print_xattr_entry(const struct f2fs_xattr_entry *ent)
122 {
123         const u8 *value = (const u8 *)&ent->e_name[ent->e_name_len];
124         const int size = le16_to_cpu(ent->e_value_size);
125         const struct fscrypt_context *ctx;
126         int i;
127
128         MSG(0, "\nxattr: e_name_index:%d e_name:", ent->e_name_index);
129         for (i = 0; i < ent->e_name_len; i++)
130                 MSG(0, "%c", ent->e_name[i]);
131         MSG(0, " e_name_len:%d e_value_size:%d e_value:\n",
132                         ent->e_name_len, size);
133
134         switch (ent->e_name_index) {
135 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
136         case F2FS_XATTR_INDEX_POSIX_ACL_ACCESS:
137         case F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT:
138                 print_acl(value, size);
139                 return;
140 #endif
141         case F2FS_XATTR_INDEX_ENCRYPTION:
142                 ctx = (const struct fscrypt_context *)value;
143                 if (size != sizeof(*ctx) ||
144                     ctx->format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
145                         break;
146                 MSG(0, "format: %d\n", ctx->format);
147                 MSG(0, "contents_encryption_mode: 0x%x\n", ctx->contents_encryption_mode);
148                 MSG(0, "filenames_encryption_mode: 0x%x\n", ctx->filenames_encryption_mode);
149                 MSG(0, "flags: 0x%x\n", ctx->flags);
150                 MSG(0, "master_key_descriptor: ");
151                 for (i = 0; i < FS_KEY_DESCRIPTOR_SIZE; i++)
152                         MSG(0, "%02X", ctx->master_key_descriptor[i]);
153                 MSG(0, "\nnonce: ");
154                 for (i = 0; i < FS_KEY_DERIVATION_NONCE_SIZE; i++)
155                         MSG(0, "%02X", ctx->nonce[i]);
156                 MSG(0, "\n");
157                 return;
158         }
159         for (i = 0; i < size; i++)
160                 MSG(0, "%02X", value[i]);
161         MSG(0, "\n");
162 }
163
164 void print_inode_info(struct f2fs_sb_info *sbi,
165                         struct f2fs_node *node, int name)
166 {
167         struct f2fs_inode *inode = &node->i;
168         void *xattr_addr;
169         struct f2fs_xattr_entry *ent;
170         char en[F2FS_PRINT_NAMELEN];
171         unsigned int i = 0;
172         u32 namelen = le32_to_cpu(inode->i_namelen);
173         int enc_name = file_enc_name(inode);
174         int ofs = get_extra_isize(node);
175
176         pretty_print_filename(inode->i_name, namelen, en, enc_name);
177         if (name && en[0]) {
178                 MSG(0, " - File name         : %s%s\n", en,
179                                 enc_name ? " <encrypted>" : "");
180                 setlocale(LC_ALL, "");
181                 MSG(0, " - File size         : %'llu (bytes)\n",
182                                 le64_to_cpu(inode->i_size));
183                 return;
184         }
185
186         DISP_u32(inode, i_mode);
187         DISP_u32(inode, i_advise);
188         DISP_u32(inode, i_uid);
189         DISP_u32(inode, i_gid);
190         DISP_u32(inode, i_links);
191         DISP_u64(inode, i_size);
192         DISP_u64(inode, i_blocks);
193
194         DISP_u64(inode, i_atime);
195         DISP_u32(inode, i_atime_nsec);
196         DISP_u64(inode, i_ctime);
197         DISP_u32(inode, i_ctime_nsec);
198         DISP_u64(inode, i_mtime);
199         DISP_u32(inode, i_mtime_nsec);
200
201         DISP_u32(inode, i_generation);
202         DISP_u32(inode, i_current_depth);
203         DISP_u32(inode, i_xattr_nid);
204         DISP_u32(inode, i_flags);
205         DISP_u32(inode, i_inline);
206         DISP_u32(inode, i_pino);
207         DISP_u32(inode, i_dir_level);
208
209         if (en[0]) {
210                 DISP_u32(inode, i_namelen);
211                 printf("%-30s\t\t[%s]\n", "i_name", en);
212         }
213
214         printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
215                         le32_to_cpu(inode->i_ext.fofs),
216                         le32_to_cpu(inode->i_ext.blk_addr),
217                         le32_to_cpu(inode->i_ext.len));
218
219         if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
220                 DISP_u16(inode, i_extra_isize);
221                 if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
222                         DISP_u16(inode, i_inline_xattr_size);
223                 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
224                         DISP_u32(inode, i_projid);
225                 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
226                         DISP_u32(inode, i_inode_checksum);
227                 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
228                         DISP_u64(inode, i_crtime);
229                         DISP_u32(inode, i_crtime_nsec);
230                 }
231                 if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
232                         DISP_u64(inode, i_compr_blocks);
233                         DISP_u32(inode, i_compress_algrithm);
234                         DISP_u32(inode, i_log_cluster_size);
235                         DISP_u32(inode, i_padding);
236                 }
237         }
238
239         for (i = 0; i < ADDRS_PER_INODE(inode); i++) {
240                 block_t blkaddr = le32_to_cpu(inode->i_addr[i + ofs]);
241                 char *flag = "";
242
243                 if (blkaddr == 0x0)
244                         continue;
245                 if (blkaddr == COMPRESS_ADDR)
246                         flag = "cluster flag";
247                 else if (blkaddr == NEW_ADDR)
248                         flag = "reserved flag";
249                 printf("i_addr[0x%x] %-16s\t\t[0x%8x : %u]\n", i + ofs, flag,
250                                 blkaddr, blkaddr);
251         }
252
253         DISP_u32(inode, i_nid[0]);      /* direct */
254         DISP_u32(inode, i_nid[1]);      /* direct */
255         DISP_u32(inode, i_nid[2]);      /* indirect */
256         DISP_u32(inode, i_nid[3]);      /* indirect */
257         DISP_u32(inode, i_nid[4]);      /* double indirect */
258
259         xattr_addr = read_all_xattrs(sbi, node);
260         if (xattr_addr) {
261                 list_for_each_xattr(ent, xattr_addr) {
262                         print_xattr_entry(ent);
263                 }
264                 free(xattr_addr);
265         }
266
267         printf("\n");
268 }
269
270 void print_node_info(struct f2fs_sb_info *sbi,
271                         struct f2fs_node *node_block, int verbose)
272 {
273         nid_t ino = le32_to_cpu(node_block->footer.ino);
274         nid_t nid = le32_to_cpu(node_block->footer.nid);
275         /* Is this inode? */
276         if (ino == nid) {
277                 DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
278                 print_inode_info(sbi, node_block, verbose);
279         } else {
280                 int i;
281                 u32 *dump_blk = (u32 *)node_block;
282                 DBG(verbose,
283                         "Node ID [0x%x:%u] is direct node or indirect node.\n",
284                                                                 nid, nid);
285                 for (i = 0; i < DEF_ADDRS_PER_BLOCK; i++)
286                         MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
287                                                 i, dump_blk[i], dump_blk[i]);
288         }
289 }
290
291 static void DISP_label(u_int16_t *name)
292 {
293         char buffer[MAX_VOLUME_NAME];
294
295         utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
296         printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
297 }
298
299 void print_raw_sb_info(struct f2fs_super_block *sb)
300 {
301         if (!c.dbg_lv)
302                 return;
303
304         printf("\n");
305         printf("+--------------------------------------------------------+\n");
306         printf("| Super block                                            |\n");
307         printf("+--------------------------------------------------------+\n");
308
309         DISP_u32(sb, magic);
310         DISP_u32(sb, major_ver);
311
312         DISP_label(sb->volume_name);
313
314         DISP_u32(sb, minor_ver);
315         DISP_u32(sb, log_sectorsize);
316         DISP_u32(sb, log_sectors_per_block);
317
318         DISP_u32(sb, log_blocksize);
319         DISP_u32(sb, log_blocks_per_seg);
320         DISP_u32(sb, segs_per_sec);
321         DISP_u32(sb, secs_per_zone);
322         DISP_u32(sb, checksum_offset);
323         DISP_u64(sb, block_count);
324
325         DISP_u32(sb, section_count);
326         DISP_u32(sb, segment_count);
327         DISP_u32(sb, segment_count_ckpt);
328         DISP_u32(sb, segment_count_sit);
329         DISP_u32(sb, segment_count_nat);
330
331         DISP_u32(sb, segment_count_ssa);
332         DISP_u32(sb, segment_count_main);
333         DISP_u32(sb, segment0_blkaddr);
334
335         DISP_u32(sb, cp_blkaddr);
336         DISP_u32(sb, sit_blkaddr);
337         DISP_u32(sb, nat_blkaddr);
338         DISP_u32(sb, ssa_blkaddr);
339         DISP_u32(sb, main_blkaddr);
340
341         DISP_u32(sb, root_ino);
342         DISP_u32(sb, node_ino);
343         DISP_u32(sb, meta_ino);
344         DISP_u32(sb, cp_payload);
345         DISP_u32(sb, crc);
346         DISP("%-.256s", sb, version);
347         printf("\n");
348 }
349
350 void print_ckpt_info(struct f2fs_sb_info *sbi)
351 {
352         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
353
354         if (!c.dbg_lv)
355                 return;
356
357         printf("\n");
358         printf("+--------------------------------------------------------+\n");
359         printf("| Checkpoint                                             |\n");
360         printf("+--------------------------------------------------------+\n");
361
362         DISP_u64(cp, checkpoint_ver);
363         DISP_u64(cp, user_block_count);
364         DISP_u64(cp, valid_block_count);
365         DISP_u32(cp, rsvd_segment_count);
366         DISP_u32(cp, overprov_segment_count);
367         DISP_u32(cp, free_segment_count);
368
369         DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
370         DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
371         DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
372         DISP_u32(cp, cur_node_segno[0]);
373         DISP_u32(cp, cur_node_segno[1]);
374         DISP_u32(cp, cur_node_segno[2]);
375
376         DISP_u32(cp, cur_node_blkoff[0]);
377         DISP_u32(cp, cur_node_blkoff[1]);
378         DISP_u32(cp, cur_node_blkoff[2]);
379
380
381         DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
382         DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
383         DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
384         DISP_u32(cp, cur_data_segno[0]);
385         DISP_u32(cp, cur_data_segno[1]);
386         DISP_u32(cp, cur_data_segno[2]);
387
388         DISP_u32(cp, cur_data_blkoff[0]);
389         DISP_u32(cp, cur_data_blkoff[1]);
390         DISP_u32(cp, cur_data_blkoff[2]);
391
392         DISP_u32(cp, ckpt_flags);
393         DISP_u32(cp, cp_pack_total_block_count);
394         DISP_u32(cp, cp_pack_start_sum);
395         DISP_u32(cp, valid_node_count);
396         DISP_u32(cp, valid_inode_count);
397         DISP_u32(cp, next_free_nid);
398         DISP_u32(cp, sit_ver_bitmap_bytesize);
399         DISP_u32(cp, nat_ver_bitmap_bytesize);
400         DISP_u32(cp, checksum_offset);
401         DISP_u64(cp, elapsed_time);
402
403         DISP_u32(cp, sit_nat_version_bitmap[0]);
404         printf("\n\n");
405 }
406
407 void print_cp_state(u32 flag)
408 {
409         MSG(0, "Info: checkpoint state = %x : ", flag);
410         if (flag & CP_QUOTA_NEED_FSCK_FLAG)
411                 MSG(0, "%s", " quota_need_fsck");
412         if (flag & CP_LARGE_NAT_BITMAP_FLAG)
413                 MSG(0, "%s", " large_nat_bitmap");
414         if (flag & CP_NOCRC_RECOVERY_FLAG)
415                 MSG(0, "%s", " allow_nocrc");
416         if (flag & CP_TRIMMED_FLAG)
417                 MSG(0, "%s", " trimmed");
418         if (flag & CP_NAT_BITS_FLAG)
419                 MSG(0, "%s", " nat_bits");
420         if (flag & CP_CRC_RECOVERY_FLAG)
421                 MSG(0, "%s", " crc");
422         if (flag & CP_FASTBOOT_FLAG)
423                 MSG(0, "%s", " fastboot");
424         if (flag & CP_FSCK_FLAG)
425                 MSG(0, "%s", " fsck");
426         if (flag & CP_ERROR_FLAG)
427                 MSG(0, "%s", " error");
428         if (flag & CP_COMPACT_SUM_FLAG)
429                 MSG(0, "%s", " compacted_summary");
430         if (flag & CP_ORPHAN_PRESENT_FLAG)
431                 MSG(0, "%s", " orphan_inodes");
432         if (flag & CP_DISABLED_FLAG)
433                 MSG(0, "%s", " disabled");
434         if (flag & CP_RESIZEFS_FLAG)
435                 MSG(0, "%s", " resizefs");
436         if (flag & CP_UMOUNT_FLAG)
437                 MSG(0, "%s", " unmount");
438         else
439                 MSG(0, "%s", " sudden-power-off");
440         MSG(0, "\n");
441 }
442
443 void print_sb_state(struct f2fs_super_block *sb)
444 {
445         __le32 f = sb->feature;
446         int i;
447
448         MSG(0, "Info: superblock features = %x : ", f);
449         if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
450                 MSG(0, "%s", " encrypt");
451         }
452         if (f & cpu_to_le32(F2FS_FEATURE_VERITY)) {
453                 MSG(0, "%s", " verity");
454         }
455         if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
456                 MSG(0, "%s", " blkzoned");
457         }
458         if (f & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
459                 MSG(0, "%s", " extra_attr");
460         }
461         if (f & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
462                 MSG(0, "%s", " project_quota");
463         }
464         if (f & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) {
465                 MSG(0, "%s", " inode_checksum");
466         }
467         if (f & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) {
468                 MSG(0, "%s", " flexible_inline_xattr");
469         }
470         if (f & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
471                 MSG(0, "%s", " quota_ino");
472         }
473         if (f & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
474                 MSG(0, "%s", " inode_crtime");
475         }
476         if (f & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
477                 MSG(0, "%s", " lost_found");
478         }
479         if (f & cpu_to_le32(F2FS_FEATURE_SB_CHKSUM)) {
480                 MSG(0, "%s", " sb_checksum");
481         }
482         if (f & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
483                 MSG(0, "%s", " casefold");
484         }
485         if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
486                 MSG(0, "%s", " compression");
487         }
488         MSG(0, "\n");
489         MSG(0, "Info: superblock encrypt level = %d, salt = ",
490                                         sb->encryption_level);
491         for (i = 0; i < 16; i++)
492                 MSG(0, "%02x", sb->encrypt_pw_salt[i]);
493         MSG(0, "\n");
494 }
495
496 static inline bool is_valid_data_blkaddr(block_t blkaddr)
497 {
498         if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
499                                 blkaddr == COMPRESS_ADDR)
500                 return 0;
501         return 1;
502 }
503
504 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
505                                         block_t blkaddr, int type)
506 {
507         switch (type) {
508         case META_NAT:
509                 break;
510         case META_SIT:
511                 if (blkaddr >= SIT_BLK_CNT(sbi))
512                         return 0;
513                 break;
514         case META_SSA:
515                 if (blkaddr >= MAIN_BLKADDR(sbi) ||
516                         blkaddr < SM_I(sbi)->ssa_blkaddr)
517                         return 0;
518                 break;
519         case META_CP:
520                 if (blkaddr >= SIT_I(sbi)->sit_base_addr ||
521                         blkaddr < __start_cp_addr(sbi))
522                         return 0;
523                 break;
524         case META_POR:
525                 if (blkaddr >= MAX_BLKADDR(sbi) ||
526                         blkaddr < MAIN_BLKADDR(sbi))
527                         return 0;
528                 break;
529         default:
530                 ASSERT(0);
531         }
532
533         return 1;
534 }
535
536 static inline block_t current_sit_addr(struct f2fs_sb_info *sbi,
537                                                 unsigned int start);
538
539 /*
540  * Readahead CP/NAT/SIT/SSA pages
541  */
542 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
543                                                         int type)
544 {
545         block_t blkno = start;
546         block_t blkaddr, start_blk = 0, len = 0;
547
548         for (; nrpages-- > 0; blkno++) {
549
550                 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
551                         goto out;
552
553                 switch (type) {
554                 case META_NAT:
555                         if (blkno >= NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))
556                                 blkno = 0;
557                         /* get nat block addr */
558                         blkaddr = current_nat_addr(sbi,
559                                         blkno * NAT_ENTRY_PER_BLOCK, NULL);
560                         break;
561                 case META_SIT:
562                         /* get sit block addr */
563                         blkaddr = current_sit_addr(sbi,
564                                         blkno * SIT_ENTRY_PER_BLOCK);
565                         break;
566                 case META_SSA:
567                 case META_CP:
568                 case META_POR:
569                         blkaddr = blkno;
570                         break;
571                 default:
572                         ASSERT(0);
573                 }
574
575                 if (!len) {
576                         start_blk = blkaddr;
577                         len = 1;
578                 } else if (start_blk + len == blkaddr) {
579                         len++;
580                 } else {
581                         dev_readahead(start_blk << F2FS_BLKSIZE_BITS,
582                                                 len << F2FS_BLKSIZE_BITS);
583                 }
584         }
585 out:
586         if (len)
587                 dev_readahead(start_blk << F2FS_BLKSIZE_BITS,
588                                         len << F2FS_BLKSIZE_BITS);
589         return blkno - start;
590 }
591
592 void update_superblock(struct f2fs_super_block *sb, int sb_mask)
593 {
594         int addr, ret;
595         u_int8_t *buf;
596         u32 old_crc, new_crc;
597
598         buf = calloc(BLOCK_SZ, 1);
599         ASSERT(buf);
600
601         if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) {
602                 old_crc = get_sb(crc);
603                 new_crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
604                                                 SB_CHKSUM_OFFSET);
605                 set_sb(crc, new_crc);
606                 MSG(1, "Info: SB CRC is updated (0x%x -> 0x%x)\n",
607                                                         old_crc, new_crc);
608         }
609
610         memcpy(buf + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
611         for (addr = SB0_ADDR; addr < SB_MAX_ADDR; addr++) {
612                 if (SB_MASK(addr) & sb_mask) {
613                         ret = dev_write_block(buf, addr);
614                         ASSERT(ret >= 0);
615                 }
616         }
617
618         free(buf);
619         DBG(0, "Info: Done to update superblock\n");
620 }
621
622 static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
623                                                         enum SB_ADDR sb_addr)
624 {
625         u32 segment0_blkaddr = get_sb(segment0_blkaddr);
626         u32 cp_blkaddr = get_sb(cp_blkaddr);
627         u32 sit_blkaddr = get_sb(sit_blkaddr);
628         u32 nat_blkaddr = get_sb(nat_blkaddr);
629         u32 ssa_blkaddr = get_sb(ssa_blkaddr);
630         u32 main_blkaddr = get_sb(main_blkaddr);
631         u32 segment_count_ckpt = get_sb(segment_count_ckpt);
632         u32 segment_count_sit = get_sb(segment_count_sit);
633         u32 segment_count_nat = get_sb(segment_count_nat);
634         u32 segment_count_ssa = get_sb(segment_count_ssa);
635         u32 segment_count_main = get_sb(segment_count_main);
636         u32 segment_count = get_sb(segment_count);
637         u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
638         u64 main_end_blkaddr = main_blkaddr +
639                                 (segment_count_main << log_blocks_per_seg);
640         u64 seg_end_blkaddr = segment0_blkaddr +
641                                 (segment_count << log_blocks_per_seg);
642
643         if (segment0_blkaddr != cp_blkaddr) {
644                 MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
645                                 segment0_blkaddr, cp_blkaddr);
646                 return -1;
647         }
648
649         if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
650                                                         sit_blkaddr) {
651                 MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
652                         cp_blkaddr, sit_blkaddr,
653                         segment_count_ckpt << log_blocks_per_seg);
654                 return -1;
655         }
656
657         if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
658                                                         nat_blkaddr) {
659                 MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
660                         sit_blkaddr, nat_blkaddr,
661                         segment_count_sit << log_blocks_per_seg);
662                 return -1;
663         }
664
665         if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
666                                                         ssa_blkaddr) {
667                 MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
668                         nat_blkaddr, ssa_blkaddr,
669                         segment_count_nat << log_blocks_per_seg);
670                 return -1;
671         }
672
673         if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
674                                                         main_blkaddr) {
675                 MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
676                         ssa_blkaddr, main_blkaddr,
677                         segment_count_ssa << log_blocks_per_seg);
678                 return -1;
679         }
680
681         if (main_end_blkaddr > seg_end_blkaddr) {
682                 MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
683                         main_blkaddr,
684                         segment0_blkaddr +
685                                 (segment_count << log_blocks_per_seg),
686                         segment_count_main << log_blocks_per_seg);
687                 return -1;
688         } else if (main_end_blkaddr < seg_end_blkaddr) {
689                 set_sb(segment_count, (main_end_blkaddr -
690                                 segment0_blkaddr) >> log_blocks_per_seg);
691
692                 update_superblock(sb, SB_MASK(sb_addr));
693                 MSG(0, "Info: Fix alignment: start(%u) end(%u) block(%u)\n",
694                         main_blkaddr,
695                         segment0_blkaddr +
696                                 (segment_count << log_blocks_per_seg),
697                         segment_count_main << log_blocks_per_seg);
698         }
699         return 0;
700 }
701
702 static int verify_sb_chksum(struct f2fs_super_block *sb)
703 {
704         if (SB_CHKSUM_OFFSET != get_sb(checksum_offset)) {
705                 MSG(0, "\tInvalid SB CRC offset: %u\n",
706                                         get_sb(checksum_offset));
707                 return -1;
708         }
709         if (f2fs_crc_valid(get_sb(crc), sb,
710                         get_sb(checksum_offset))) {
711                 MSG(0, "\tInvalid SB CRC: 0x%x\n", get_sb(crc));
712                 return -1;
713         }
714         return 0;
715 }
716
717 int sanity_check_raw_super(struct f2fs_super_block *sb, enum SB_ADDR sb_addr)
718 {
719         unsigned int blocksize;
720         unsigned int segment_count, segs_per_sec, secs_per_zone;
721         unsigned int total_sections, blocks_per_seg;
722
723         if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) &&
724                                         verify_sb_chksum(sb))
725                 return -1;
726
727         if (F2FS_SUPER_MAGIC != get_sb(magic)) {
728                 MSG(0, "Magic Mismatch, valid(0x%x) - read(0x%x)\n",
729                         F2FS_SUPER_MAGIC, get_sb(magic));
730                 return -1;
731         }
732
733         if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
734                 MSG(0, "Invalid page_cache_size (%d), supports only 4KB\n",
735                         PAGE_CACHE_SIZE);
736                 return -1;
737         }
738
739         blocksize = 1 << get_sb(log_blocksize);
740         if (F2FS_BLKSIZE != blocksize) {
741                 MSG(0, "Invalid blocksize (%u), supports only 4KB\n",
742                         blocksize);
743                 return -1;
744         }
745
746         /* check log blocks per segment */
747         if (get_sb(log_blocks_per_seg) != 9) {
748                 MSG(0, "Invalid log blocks per segment (%u)\n",
749                         get_sb(log_blocks_per_seg));
750                 return -1;
751         }
752
753         /* Currently, support 512/1024/2048/4096 bytes sector size */
754         if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
755                         get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE) {
756                 MSG(0, "Invalid log sectorsize (%u)\n", get_sb(log_sectorsize));
757                 return -1;
758         }
759
760         if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
761                                                 F2FS_MAX_LOG_SECTOR_SIZE) {
762                 MSG(0, "Invalid log sectors per block(%u) log sectorsize(%u)\n",
763                         get_sb(log_sectors_per_block),
764                         get_sb(log_sectorsize));
765                 return -1;
766         }
767
768         segment_count = get_sb(segment_count);
769         segs_per_sec = get_sb(segs_per_sec);
770         secs_per_zone = get_sb(secs_per_zone);
771         total_sections = get_sb(section_count);
772
773         /* blocks_per_seg should be 512, given the above check */
774         blocks_per_seg = 1 << get_sb(log_blocks_per_seg);
775
776         if (segment_count > F2FS_MAX_SEGMENT ||
777                         segment_count < F2FS_MIN_SEGMENTS) {
778                 MSG(0, "\tInvalid segment count (%u)\n", segment_count);
779                 return -1;
780         }
781
782         if (total_sections > segment_count ||
783                         total_sections < F2FS_MIN_SEGMENTS ||
784                         segs_per_sec > segment_count || !segs_per_sec) {
785                 MSG(0, "\tInvalid segment/section count (%u, %u x %u)\n",
786                         segment_count, total_sections, segs_per_sec);
787                 return 1;
788         }
789
790         if ((segment_count / segs_per_sec) < total_sections) {
791                 MSG(0, "Small segment_count (%u < %u * %u)\n",
792                         segment_count, segs_per_sec, total_sections);
793                 return 1;
794         }
795
796         if (segment_count > (get_sb(block_count) >> 9)) {
797                 MSG(0, "Wrong segment_count / block_count (%u > %llu)\n",
798                         segment_count, get_sb(block_count));
799                 return 1;
800         }
801
802         if (sb->devs[0].path[0]) {
803                 unsigned int dev_segs = le32_to_cpu(sb->devs[0].total_segments);
804                 int i = 1;
805
806                 while (i < MAX_DEVICES && sb->devs[i].path[0]) {
807                         dev_segs += le32_to_cpu(sb->devs[i].total_segments);
808                         i++;
809                 }
810                 if (segment_count != dev_segs) {
811                         MSG(0, "Segment count (%u) mismatch with total segments from devices (%u)",
812                                 segment_count, dev_segs);
813                         return 1;
814                 }
815         }
816
817         if (secs_per_zone > total_sections || !secs_per_zone) {
818                 MSG(0, "Wrong secs_per_zone / total_sections (%u, %u)\n",
819                         secs_per_zone, total_sections);
820                 return 1;
821         }
822         if (get_sb(extension_count) > F2FS_MAX_EXTENSION ||
823                         sb->hot_ext_count > F2FS_MAX_EXTENSION ||
824                         get_sb(extension_count) +
825                         sb->hot_ext_count > F2FS_MAX_EXTENSION) {
826                 MSG(0, "Corrupted extension count (%u + %u > %u)\n",
827                         get_sb(extension_count),
828                         sb->hot_ext_count,
829                         F2FS_MAX_EXTENSION);
830                 return 1;
831         }
832
833         if (get_sb(cp_payload) > (blocks_per_seg - F2FS_CP_PACKS)) {
834                 MSG(0, "Insane cp_payload (%u > %u)\n",
835                         get_sb(cp_payload), blocks_per_seg - F2FS_CP_PACKS);
836                 return 1;
837         }
838
839         /* check reserved ino info */
840         if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
841                                                 get_sb(root_ino) != 3) {
842                 MSG(0, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)\n",
843                         get_sb(node_ino), get_sb(meta_ino), get_sb(root_ino));
844                 return -1;
845         }
846
847         /* Check zoned block device feature */
848         if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
849                         !(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
850                 MSG(0, "\tMissing zoned block device feature\n");
851                 return -1;
852         }
853
854         if (sanity_check_area_boundary(sb, sb_addr))
855                 return -1;
856         return 0;
857 }
858
859 int validate_super_block(struct f2fs_sb_info *sbi, enum SB_ADDR sb_addr)
860 {
861         char buf[F2FS_BLKSIZE];
862
863         sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
864         if (!sbi->raw_super)
865                 return -ENOMEM;
866
867         if (dev_read_block(buf, sb_addr))
868                 return -1;
869
870         memcpy(sbi->raw_super, buf + F2FS_SUPER_OFFSET,
871                                         sizeof(struct f2fs_super_block));
872
873         if (!sanity_check_raw_super(sbi->raw_super, sb_addr)) {
874                 /* get kernel version */
875                 if (c.kd >= 0) {
876                         dev_read_version(c.version, 0, VERSION_LEN);
877                         get_kernel_version(c.version);
878                 } else {
879                         get_kernel_uname_version(c.version);
880                 }
881
882                 /* build sb version */
883                 memcpy(c.sb_version, sbi->raw_super->version, VERSION_LEN);
884                 get_kernel_version(c.sb_version);
885                 memcpy(c.init_version, sbi->raw_super->init_version, VERSION_LEN);
886                 get_kernel_version(c.init_version);
887
888                 MSG(0, "Info: MKFS version\n  \"%s\"\n", c.init_version);
889                 MSG(0, "Info: FSCK version\n  from \"%s\"\n    to \"%s\"\n",
890                                         c.sb_version, c.version);
891                 if (!c.no_kernel_check &&
892                                 memcmp(c.sb_version, c.version, VERSION_LEN)) {
893                         memcpy(sbi->raw_super->version,
894                                                 c.version, VERSION_LEN);
895                         update_superblock(sbi->raw_super, SB_MASK(sb_addr));
896
897                         c.auto_fix = 0;
898                         c.fix_on = 1;
899                 }
900                 print_sb_state(sbi->raw_super);
901                 return 0;
902         }
903
904         free(sbi->raw_super);
905         sbi->raw_super = NULL;
906         MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", sb_addr);
907
908         return -EINVAL;
909 }
910
911 int init_sb_info(struct f2fs_sb_info *sbi)
912 {
913         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
914         u64 total_sectors;
915         int i;
916
917         sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
918         sbi->log_blocksize = get_sb(log_blocksize);
919         sbi->blocksize = 1 << sbi->log_blocksize;
920         sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
921         sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
922         sbi->segs_per_sec = get_sb(segs_per_sec);
923         sbi->secs_per_zone = get_sb(secs_per_zone);
924         sbi->total_sections = get_sb(section_count);
925         sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
926                                 sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
927         sbi->root_ino_num = get_sb(root_ino);
928         sbi->node_ino_num = get_sb(node_ino);
929         sbi->meta_ino_num = get_sb(meta_ino);
930         sbi->cur_victim_sec = NULL_SEGNO;
931
932         for (i = 0; i < MAX_DEVICES; i++) {
933                 if (!sb->devs[i].path[0])
934                         break;
935
936                 if (i) {
937                         c.devices[i].path = strdup((char *)sb->devs[i].path);
938                         if (get_device_info(i))
939                                 ASSERT(0);
940                 } else {
941                         ASSERT(!strcmp((char *)sb->devs[i].path,
942                                                 (char *)c.devices[i].path));
943                 }
944
945                 c.devices[i].total_segments =
946                         le32_to_cpu(sb->devs[i].total_segments);
947                 if (i)
948                         c.devices[i].start_blkaddr =
949                                 c.devices[i - 1].end_blkaddr + 1;
950                 c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
951                         c.devices[i].total_segments *
952                         c.blks_per_seg - 1;
953                 if (i == 0)
954                         c.devices[i].end_blkaddr += get_sb(segment0_blkaddr);
955
956                 c.ndevs = i + 1;
957                 MSG(0, "Info: Device[%d] : %s blkaddr = %"PRIx64"--%"PRIx64"\n",
958                                 i, c.devices[i].path,
959                                 c.devices[i].start_blkaddr,
960                                 c.devices[i].end_blkaddr);
961         }
962
963         total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
964         MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
965                                 total_sectors, total_sectors >>
966                                                 (20 - get_sb(log_sectorsize)));
967         return 0;
968 }
969
970 static int verify_checksum_chksum(struct f2fs_checkpoint *cp)
971 {
972         unsigned int chksum_offset = get_cp(checksum_offset);
973         unsigned int crc, cal_crc;
974
975         if (chksum_offset < CP_MIN_CHKSUM_OFFSET ||
976                         chksum_offset > CP_CHKSUM_OFFSET) {
977                 MSG(0, "\tInvalid CP CRC offset: %u\n", chksum_offset);
978                 return -1;
979         }
980
981         crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + chksum_offset));
982         cal_crc = f2fs_checkpoint_chksum(cp);
983         if (cal_crc != crc) {
984                 MSG(0, "\tInvalid CP CRC: offset:%u, crc:0x%x, calc:0x%x\n",
985                         chksum_offset, crc, cal_crc);
986                 return -1;
987         }
988         return 0;
989 }
990
991 static void *get_checkpoint_version(block_t cp_addr)
992 {
993         void *cp_page;
994
995         cp_page = malloc(PAGE_SIZE);
996         ASSERT(cp_page);
997
998         if (dev_read_block(cp_page, cp_addr) < 0)
999                 ASSERT(0);
1000
1001         if (verify_checksum_chksum((struct f2fs_checkpoint *)cp_page))
1002                 goto out;
1003         return cp_page;
1004 out:
1005         free(cp_page);
1006         return NULL;
1007 }
1008
1009 void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
1010                                 unsigned long long *version)
1011 {
1012         void *cp_page_1, *cp_page_2;
1013         struct f2fs_checkpoint *cp;
1014         unsigned long long cur_version = 0, pre_version = 0;
1015
1016         /* Read the 1st cp block in this CP pack */
1017         cp_page_1 = get_checkpoint_version(cp_addr);
1018         if (!cp_page_1)
1019                 return NULL;
1020
1021         cp = (struct f2fs_checkpoint *)cp_page_1;
1022         if (get_cp(cp_pack_total_block_count) > sbi->blocks_per_seg)
1023                 goto invalid_cp1;
1024
1025         pre_version = get_cp(checkpoint_ver);
1026
1027         /* Read the 2nd cp block in this CP pack */
1028         cp_addr += get_cp(cp_pack_total_block_count) - 1;
1029         cp_page_2 = get_checkpoint_version(cp_addr);
1030         if (!cp_page_2)
1031                 goto invalid_cp1;
1032
1033         cp = (struct f2fs_checkpoint *)cp_page_2;
1034         cur_version = get_cp(checkpoint_ver);
1035
1036         if (cur_version == pre_version) {
1037                 *version = cur_version;
1038                 free(cp_page_2);
1039                 return cp_page_1;
1040         }
1041
1042         free(cp_page_2);
1043 invalid_cp1:
1044         free(cp_page_1);
1045         return NULL;
1046 }
1047
1048 int get_valid_checkpoint(struct f2fs_sb_info *sbi)
1049 {
1050         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1051         void *cp1, *cp2, *cur_page;
1052         unsigned long blk_size = sbi->blocksize;
1053         unsigned long long cp1_version = 0, cp2_version = 0, version;
1054         unsigned long long cp_start_blk_no;
1055         unsigned int cp_payload, cp_blks;
1056         int ret;
1057
1058         cp_payload = get_sb(cp_payload);
1059         if (cp_payload > F2FS_BLK_ALIGN(MAX_SIT_BITMAP_SIZE))
1060                 return -EINVAL;
1061
1062         cp_blks = 1 + cp_payload;
1063         sbi->ckpt = malloc(cp_blks * blk_size);
1064         if (!sbi->ckpt)
1065                 return -ENOMEM;
1066         /*
1067          * Finding out valid cp block involves read both
1068          * sets( cp pack1 and cp pack 2)
1069          */
1070         cp_start_blk_no = get_sb(cp_blkaddr);
1071         cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
1072
1073         /* The second checkpoint pack should start at the next segment */
1074         cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
1075         cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
1076
1077         if (cp1 && cp2) {
1078                 if (ver_after(cp2_version, cp1_version)) {
1079                         cur_page = cp2;
1080                         sbi->cur_cp = 2;
1081                         version = cp2_version;
1082                 } else {
1083                         cur_page = cp1;
1084                         sbi->cur_cp = 1;
1085                         version = cp1_version;
1086                 }
1087         } else if (cp1) {
1088                 cur_page = cp1;
1089                 sbi->cur_cp = 1;
1090                 version = cp1_version;
1091         } else if (cp2) {
1092                 cur_page = cp2;
1093                 sbi->cur_cp = 2;
1094                 version = cp2_version;
1095         } else
1096                 goto fail_no_cp;
1097
1098         MSG(0, "Info: CKPT version = %llx\n", version);
1099
1100         memcpy(sbi->ckpt, cur_page, blk_size);
1101
1102         if (cp_blks > 1) {
1103                 unsigned int i;
1104                 unsigned long long cp_blk_no;
1105
1106                 cp_blk_no = get_sb(cp_blkaddr);
1107                 if (cur_page == cp2)
1108                         cp_blk_no += 1 << get_sb(log_blocks_per_seg);
1109
1110                 /* copy sit bitmap */
1111                 for (i = 1; i < cp_blks; i++) {
1112                         unsigned char *ckpt = (unsigned char *)sbi->ckpt;
1113                         ret = dev_read_block(cur_page, cp_blk_no + i);
1114                         ASSERT(ret >= 0);
1115                         memcpy(ckpt + i * blk_size, cur_page, blk_size);
1116                 }
1117         }
1118         if (cp1)
1119                 free(cp1);
1120         if (cp2)
1121                 free(cp2);
1122         return 0;
1123
1124 fail_no_cp:
1125         free(sbi->ckpt);
1126         sbi->ckpt = NULL;
1127         return -EINVAL;
1128 }
1129
1130 /*
1131  * For a return value of 1, caller should further check for c.fix_on state
1132  * and take appropriate action.
1133  */
1134 static int f2fs_should_proceed(struct f2fs_super_block *sb, u32 flag)
1135 {
1136         if (!c.fix_on && (c.auto_fix || c.preen_mode)) {
1137                 if (flag & CP_FSCK_FLAG ||
1138                         flag & CP_QUOTA_NEED_FSCK_FLAG ||
1139                         (exist_qf_ino(sb) && (flag & CP_ERROR_FLAG))) {
1140                         c.fix_on = 1;
1141                 } else if (!c.preen_mode) {
1142                         print_cp_state(flag);
1143                         return 0;
1144                 }
1145         }
1146         return 1;
1147 }
1148
1149 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
1150 {
1151         unsigned int total, fsmeta;
1152         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1153         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1154         unsigned int flag = get_cp(ckpt_flags);
1155         unsigned int ovp_segments, reserved_segments;
1156         unsigned int main_segs, blocks_per_seg;
1157         unsigned int sit_segs, nat_segs;
1158         unsigned int sit_bitmap_size, nat_bitmap_size;
1159         unsigned int log_blocks_per_seg;
1160         unsigned int segment_count_main;
1161         unsigned int cp_pack_start_sum, cp_payload;
1162         block_t user_block_count;
1163         int i;
1164
1165         total = get_sb(segment_count);
1166         fsmeta = get_sb(segment_count_ckpt);
1167         sit_segs = get_sb(segment_count_sit);
1168         fsmeta += sit_segs;
1169         nat_segs = get_sb(segment_count_nat);
1170         fsmeta += nat_segs;
1171         fsmeta += get_cp(rsvd_segment_count);
1172         fsmeta += get_sb(segment_count_ssa);
1173
1174         if (fsmeta >= total)
1175                 return 1;
1176
1177         ovp_segments = get_cp(overprov_segment_count);
1178         reserved_segments = get_cp(rsvd_segment_count);
1179
1180         if (fsmeta < F2FS_MIN_SEGMENT || ovp_segments == 0 ||
1181                                         reserved_segments == 0) {
1182                 MSG(0, "\tWrong layout: check mkfs.f2fs version\n");
1183                 return 1;
1184         }
1185
1186         user_block_count = get_cp(user_block_count);
1187         segment_count_main = get_sb(segment_count_main);
1188         log_blocks_per_seg = get_sb(log_blocks_per_seg);
1189         if (!user_block_count || user_block_count >=
1190                         segment_count_main << log_blocks_per_seg) {
1191                 ASSERT_MSG("\tWrong user_block_count(%u)\n", user_block_count);
1192
1193                 if (!f2fs_should_proceed(sb, flag))
1194                         return 1;
1195                 if (!c.fix_on)
1196                         return 1;
1197
1198                 if (flag & (CP_FSCK_FLAG | CP_RESIZEFS_FLAG)) {
1199                         u32 valid_user_block_cnt;
1200                         u32 seg_cnt_main = get_sb(segment_count) -
1201                                         (get_sb(segment_count_ckpt) +
1202                                          get_sb(segment_count_sit) +
1203                                          get_sb(segment_count_nat) +
1204                                          get_sb(segment_count_ssa));
1205
1206                         /* validate segment_count_main in sb first */
1207                         if (seg_cnt_main != get_sb(segment_count_main)) {
1208                                 MSG(0, "Inconsistent segment_cnt_main %u in sb\n",
1209                                                 segment_count_main << log_blocks_per_seg);
1210                                 return 1;
1211                         }
1212                         valid_user_block_cnt = ((get_sb(segment_count_main) -
1213                                                 get_cp(overprov_segment_count)) * c.blks_per_seg);
1214                         MSG(0, "Info: Fix wrong user_block_count in CP: (%u) -> (%u)\n",
1215                                         user_block_count, valid_user_block_cnt);
1216                         set_cp(user_block_count, valid_user_block_cnt);
1217                         c.bug_on = 1;
1218                 }
1219         }
1220
1221         main_segs = get_sb(segment_count_main);
1222         blocks_per_seg = sbi->blocks_per_seg;
1223
1224         for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1225                 if (get_cp(cur_node_segno[i]) >= main_segs ||
1226                         get_cp(cur_node_blkoff[i]) >= blocks_per_seg)
1227                         return 1;
1228         }
1229         for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1230                 if (get_cp(cur_data_segno[i]) >= main_segs ||
1231                         get_cp(cur_data_blkoff[i]) >= blocks_per_seg)
1232                         return 1;
1233         }
1234
1235         sit_bitmap_size = get_cp(sit_ver_bitmap_bytesize);
1236         nat_bitmap_size = get_cp(nat_ver_bitmap_bytesize);
1237
1238         if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
1239                 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
1240                 MSG(0, "\tWrong bitmap size: sit(%u), nat(%u)\n",
1241                                 sit_bitmap_size, nat_bitmap_size);
1242                 return 1;
1243         }
1244
1245         cp_pack_start_sum = __start_sum_addr(sbi);
1246         cp_payload = __cp_payload(sbi);
1247         if (cp_pack_start_sum < cp_payload + 1 ||
1248                 cp_pack_start_sum > blocks_per_seg - 1 -
1249                         NR_CURSEG_TYPE) {
1250                 MSG(0, "\tWrong cp_pack_start_sum(%u) or cp_payload(%u)\n",
1251                         cp_pack_start_sum, cp_payload);
1252                 if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM))
1253                         return 1;
1254                 set_sb(cp_payload, cp_pack_start_sum - 1);
1255                 update_superblock(sb, SB_MASK_ALL);
1256         }
1257
1258         return 0;
1259 }
1260
1261 pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start, int *pack)
1262 {
1263         struct f2fs_nm_info *nm_i = NM_I(sbi);
1264         pgoff_t block_off;
1265         pgoff_t block_addr;
1266         int seg_off;
1267
1268         block_off = NAT_BLOCK_OFFSET(start);
1269         seg_off = block_off >> sbi->log_blocks_per_seg;
1270
1271         block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1272                         (seg_off << sbi->log_blocks_per_seg << 1) +
1273                         (block_off & ((1 << sbi->log_blocks_per_seg) -1)));
1274         if (pack)
1275                 *pack = 1;
1276
1277         if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) {
1278                 block_addr += sbi->blocks_per_seg;
1279                 if (pack)
1280                         *pack = 2;
1281         }
1282
1283         return block_addr;
1284 }
1285
1286 static int f2fs_init_nid_bitmap(struct f2fs_sb_info *sbi)
1287 {
1288         struct f2fs_nm_info *nm_i = NM_I(sbi);
1289         int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
1290         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1291         struct f2fs_summary_block *sum = curseg->sum_blk;
1292         struct f2fs_journal *journal = &sum->journal;
1293         struct f2fs_nat_block *nat_block;
1294         block_t start_blk;
1295         nid_t nid;
1296         int i;
1297
1298         if (!(c.func == SLOAD || c.func == FSCK))
1299                 return 0;
1300
1301         nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
1302         if (!nm_i->nid_bitmap)
1303                 return -ENOMEM;
1304
1305         /* arbitrarily set 0 bit */
1306         f2fs_set_bit(0, nm_i->nid_bitmap);
1307
1308         nat_block = malloc(F2FS_BLKSIZE);
1309         if (!nat_block) {
1310                 free(nm_i->nid_bitmap);
1311                 return -ENOMEM;
1312         }
1313
1314         f2fs_ra_meta_pages(sbi, 0, NAT_BLOCK_OFFSET(nm_i->max_nid),
1315                                                         META_NAT);
1316
1317         for (nid = 0; nid < nm_i->max_nid; nid++) {
1318                 if (!(nid % NAT_ENTRY_PER_BLOCK)) {
1319                         int ret;
1320
1321                         start_blk = current_nat_addr(sbi, nid, NULL);
1322                         ret = dev_read_block(nat_block, start_blk);
1323                         ASSERT(ret >= 0);
1324                 }
1325
1326                 if (nat_block->entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
1327                         f2fs_set_bit(nid, nm_i->nid_bitmap);
1328         }
1329
1330         if (nats_in_cursum(journal) > NAT_JOURNAL_ENTRIES) {
1331                 MSG(0, "\tError: f2fs_init_nid_bitmap truncate n_nats(%u) to "
1332                         "NAT_JOURNAL_ENTRIES(%lu)\n",
1333                         nats_in_cursum(journal), NAT_JOURNAL_ENTRIES);
1334                 journal->n_nats = cpu_to_le16(NAT_JOURNAL_ENTRIES);
1335                 c.fix_on = 1;
1336         }
1337
1338         for (i = 0; i < nats_in_cursum(journal); i++) {
1339                 block_t addr;
1340
1341                 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1342                 if (!IS_VALID_BLK_ADDR(sbi, addr)) {
1343                         MSG(0, "\tError: f2fs_init_nid_bitmap: addr(%u) is invalid!!!\n", addr);
1344                         journal->n_nats = cpu_to_le16(i);
1345                         c.fix_on = 1;
1346                         continue;
1347                 }
1348
1349                 nid = le32_to_cpu(nid_in_journal(journal, i));
1350                 if (!IS_VALID_NID(sbi, nid)) {
1351                         MSG(0, "\tError: f2fs_init_nid_bitmap: nid(%u) is invalid!!!\n", nid);
1352                         journal->n_nats = cpu_to_le16(i);
1353                         c.fix_on = 1;
1354                         continue;
1355                 }
1356                 if (addr != NULL_ADDR)
1357                         f2fs_set_bit(nid, nm_i->nid_bitmap);
1358         }
1359         free(nat_block);
1360         return 0;
1361 }
1362
1363 u32 update_nat_bits_flags(struct f2fs_super_block *sb,
1364                                 struct f2fs_checkpoint *cp, u32 flags)
1365 {
1366         u_int32_t nat_bits_bytes, nat_bits_blocks;
1367
1368         nat_bits_bytes = get_sb(segment_count_nat) << 5;
1369         nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
1370                                                 F2FS_BLKSIZE - 1);
1371         if (get_cp(cp_pack_total_block_count) <=
1372                         (1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
1373                 flags |= CP_NAT_BITS_FLAG;
1374         else
1375                 flags &= (~CP_NAT_BITS_FLAG);
1376
1377         return flags;
1378 }
1379
1380 /* should call flush_journal_entries() bfore this */
1381 void write_nat_bits(struct f2fs_sb_info *sbi,
1382         struct f2fs_super_block *sb, struct f2fs_checkpoint *cp, int set)
1383 {
1384         struct f2fs_nm_info *nm_i = NM_I(sbi);
1385         u_int32_t nat_blocks = get_sb(segment_count_nat) <<
1386                                 (get_sb(log_blocks_per_seg) - 1);
1387         u_int32_t nat_bits_bytes = nat_blocks >> 3;
1388         u_int32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
1389                                         8 + F2FS_BLKSIZE - 1);
1390         unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
1391         struct f2fs_nat_block *nat_block;
1392         u_int32_t i, j;
1393         block_t blkaddr;
1394         int ret;
1395
1396         nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
1397         ASSERT(nat_bits);
1398
1399         nat_block = malloc(F2FS_BLKSIZE);
1400         ASSERT(nat_block);
1401
1402         full_nat_bits = nat_bits + 8;
1403         empty_nat_bits = full_nat_bits + nat_bits_bytes;
1404
1405         memset(full_nat_bits, 0, nat_bits_bytes);
1406         memset(empty_nat_bits, 0, nat_bits_bytes);
1407
1408         for (i = 0; i < nat_blocks; i++) {
1409                 int seg_off = i >> get_sb(log_blocks_per_seg);
1410                 int valid = 0;
1411
1412                 blkaddr = (pgoff_t)(get_sb(nat_blkaddr) +
1413                                 (seg_off << get_sb(log_blocks_per_seg) << 1) +
1414                                 (i & ((1 << get_sb(log_blocks_per_seg)) - 1)));
1415
1416                 /*
1417                  * Should consider new nat_blocks is larger than old
1418                  * nm_i->nat_blocks, since nm_i->nat_bitmap is based on
1419                  * old one.
1420                  */
1421                 if (i < nm_i->nat_blocks && f2fs_test_bit(i, nm_i->nat_bitmap))
1422                         blkaddr += (1 << get_sb(log_blocks_per_seg));
1423
1424                 ret = dev_read_block(nat_block, blkaddr);
1425                 ASSERT(ret >= 0);
1426
1427                 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
1428                         if ((i == 0 && j == 0) ||
1429                                 nat_block->entries[j].block_addr != NULL_ADDR)
1430                                 valid++;
1431                 }
1432                 if (valid == 0)
1433                         test_and_set_bit_le(i, empty_nat_bits);
1434                 else if (valid == NAT_ENTRY_PER_BLOCK)
1435                         test_and_set_bit_le(i, full_nat_bits);
1436         }
1437         *(__le64 *)nat_bits = get_cp_crc(cp);
1438         free(nat_block);
1439
1440         blkaddr = get_sb(segment0_blkaddr) + (set <<
1441                                 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1442
1443         DBG(1, "\tWriting NAT bits pages, at offset 0x%08x\n", blkaddr);
1444
1445         for (i = 0; i < nat_bits_blocks; i++) {
1446                 if (dev_write_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
1447                         ASSERT_MSG("\tError: write NAT bits to disk!!!\n");
1448         }
1449         MSG(0, "Info: Write valid nat_bits in checkpoint\n");
1450
1451         free(nat_bits);
1452 }
1453
1454 static int check_nat_bits(struct f2fs_sb_info *sbi,
1455         struct f2fs_super_block *sb, struct f2fs_checkpoint *cp)
1456 {
1457         struct f2fs_nm_info *nm_i = NM_I(sbi);
1458         u_int32_t nat_blocks = get_sb(segment_count_nat) <<
1459                                 (get_sb(log_blocks_per_seg) - 1);
1460         u_int32_t nat_bits_bytes = nat_blocks >> 3;
1461         u_int32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
1462                                         8 + F2FS_BLKSIZE - 1);
1463         unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
1464         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1465         struct f2fs_journal *journal = &curseg->sum_blk->journal;
1466         u_int32_t i, j;
1467         block_t blkaddr;
1468         int err = 0;
1469
1470         nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
1471         ASSERT(nat_bits);
1472
1473         full_nat_bits = nat_bits + 8;
1474         empty_nat_bits = full_nat_bits + nat_bits_bytes;
1475
1476         blkaddr = get_sb(segment0_blkaddr) + (sbi->cur_cp <<
1477                                 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1478
1479         for (i = 0; i < nat_bits_blocks; i++) {
1480                 if (dev_read_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
1481                         ASSERT_MSG("\tError: read NAT bits to disk!!!\n");
1482         }
1483
1484         if (*(__le64 *)nat_bits != get_cp_crc(cp) || nats_in_cursum(journal)) {
1485                 /*
1486                  * if there is a journal, f2fs was not shutdown cleanly. Let's
1487                  * flush them with nat_bits.
1488                  */
1489                 if (c.fix_on)
1490                         err = -1;
1491                 /* Otherwise, kernel will disable nat_bits */
1492                 goto out;
1493         }
1494
1495         for (i = 0; i < nat_blocks; i++) {
1496                 u_int32_t start_nid = i * NAT_ENTRY_PER_BLOCK;
1497                 u_int32_t valid = 0;
1498                 int empty = test_bit_le(i, empty_nat_bits);
1499                 int full = test_bit_le(i, full_nat_bits);
1500
1501                 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
1502                         if (f2fs_test_bit(start_nid + j, nm_i->nid_bitmap))
1503                                 valid++;
1504                 }
1505                 if (valid == 0) {
1506                         if (!empty || full) {
1507                                 err = -1;
1508                                 goto out;
1509                         }
1510                 } else if (valid == NAT_ENTRY_PER_BLOCK) {
1511                         if (empty || !full) {
1512                                 err = -1;
1513                                 goto out;
1514                         }
1515                 } else {
1516                         if (empty || full) {
1517                                 err = -1;
1518                                 goto out;
1519                         }
1520                 }
1521         }
1522 out:
1523         free(nat_bits);
1524         if (!err) {
1525                 MSG(0, "Info: Checked valid nat_bits in checkpoint\n");
1526         } else {
1527                 c.bug_nat_bits = 1;
1528                 MSG(0, "Info: Corrupted valid nat_bits in checkpoint\n");
1529         }
1530         return err;
1531 }
1532
1533 int init_node_manager(struct f2fs_sb_info *sbi)
1534 {
1535         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1536         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1537         struct f2fs_nm_info *nm_i = NM_I(sbi);
1538         unsigned char *version_bitmap;
1539         unsigned int nat_segs;
1540
1541         nm_i->nat_blkaddr = get_sb(nat_blkaddr);
1542
1543         /* segment_count_nat includes pair segment so divide to 2. */
1544         nat_segs = get_sb(segment_count_nat) >> 1;
1545         nm_i->nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
1546         nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
1547         nm_i->fcnt = 0;
1548         nm_i->nat_cnt = 0;
1549         nm_i->init_scan_nid = get_cp(next_free_nid);
1550         nm_i->next_scan_nid = get_cp(next_free_nid);
1551
1552         nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1553
1554         nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
1555         if (!nm_i->nat_bitmap)
1556                 return -ENOMEM;
1557         version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1558         if (!version_bitmap)
1559                 return -EFAULT;
1560
1561         /* copy version bitmap */
1562         memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
1563         return f2fs_init_nid_bitmap(sbi);
1564 }
1565
1566 int build_node_manager(struct f2fs_sb_info *sbi)
1567 {
1568         int err;
1569         sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
1570         if (!sbi->nm_info)
1571                 return -ENOMEM;
1572
1573         err = init_node_manager(sbi);
1574         if (err)
1575                 return err;
1576
1577         return 0;
1578 }
1579
1580 int build_sit_info(struct f2fs_sb_info *sbi)
1581 {
1582         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1583         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1584         struct sit_info *sit_i;
1585         unsigned int sit_segs;
1586         int start;
1587         char *src_bitmap, *dst_bitmap;
1588         unsigned char *bitmap;
1589         unsigned int bitmap_size;
1590
1591         sit_i = malloc(sizeof(struct sit_info));
1592         if (!sit_i) {
1593                 MSG(1, "\tError: Malloc failed for build_sit_info!\n");
1594                 return -ENOMEM;
1595         }
1596
1597         SM_I(sbi)->sit_info = sit_i;
1598
1599         sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
1600         if (!sit_i->sentries) {
1601                 MSG(1, "\tError: Calloc failed for build_sit_info!\n");
1602                 goto free_sit_info;
1603         }
1604
1605         bitmap_size = TOTAL_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE;
1606
1607         if (need_fsync_data_record(sbi))
1608                 bitmap_size += bitmap_size;
1609
1610         sit_i->bitmap = calloc(bitmap_size, 1);
1611         if (!sit_i->bitmap) {
1612                 MSG(1, "\tError: Calloc failed for build_sit_info!!\n");
1613                 goto free_sentries;
1614         }
1615
1616         bitmap = sit_i->bitmap;
1617
1618         for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1619                 sit_i->sentries[start].cur_valid_map = bitmap;
1620                 bitmap += SIT_VBLOCK_MAP_SIZE;
1621
1622                 if (need_fsync_data_record(sbi)) {
1623                         sit_i->sentries[start].ckpt_valid_map = bitmap;
1624                         bitmap += SIT_VBLOCK_MAP_SIZE;
1625                 }
1626         }
1627
1628         sit_segs = get_sb(segment_count_sit) >> 1;
1629         bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1630         src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1631
1632         dst_bitmap = malloc(bitmap_size);
1633         if (!dst_bitmap) {
1634                 MSG(1, "\tError: Malloc failed for build_sit_info!!\n");
1635                 goto free_validity_maps;
1636         }
1637
1638         memcpy(dst_bitmap, src_bitmap, bitmap_size);
1639
1640         sit_i->sit_base_addr = get_sb(sit_blkaddr);
1641         sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1642         sit_i->written_valid_blocks = get_cp(valid_block_count);
1643         sit_i->sit_bitmap = dst_bitmap;
1644         sit_i->bitmap_size = bitmap_size;
1645         sit_i->dirty_sentries = 0;
1646         sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1647         sit_i->elapsed_time = get_cp(elapsed_time);
1648         return 0;
1649
1650 free_validity_maps:
1651         free(sit_i->bitmap);
1652 free_sentries:
1653         free(sit_i->sentries);
1654 free_sit_info:
1655         free(sit_i);
1656
1657         return -ENOMEM;
1658 }
1659
1660 void reset_curseg(struct f2fs_sb_info *sbi, int type)
1661 {
1662         struct curseg_info *curseg = CURSEG_I(sbi, type);
1663         struct summary_footer *sum_footer;
1664         struct seg_entry *se;
1665
1666         sum_footer = &(curseg->sum_blk->footer);
1667         memset(sum_footer, 0, sizeof(struct summary_footer));
1668         if (IS_DATASEG(type))
1669                 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1670         if (IS_NODESEG(type))
1671                 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1672         se = get_seg_entry(sbi, curseg->segno);
1673         se->type = type;
1674         se->dirty = 1;
1675 }
1676
1677 static void read_compacted_summaries(struct f2fs_sb_info *sbi)
1678 {
1679         struct curseg_info *curseg;
1680         unsigned int i, j, offset;
1681         block_t start;
1682         char *kaddr;
1683         int ret;
1684
1685         start = start_sum_block(sbi);
1686
1687         kaddr = (char *)malloc(PAGE_SIZE);
1688         ASSERT(kaddr);
1689
1690         ret = dev_read_block(kaddr, start++);
1691         ASSERT(ret >= 0);
1692
1693         curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1694         memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
1695
1696         curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1697         memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
1698                                                 SUM_JOURNAL_SIZE);
1699
1700         offset = 2 * SUM_JOURNAL_SIZE;
1701         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1702                 unsigned short blk_off;
1703                 struct curseg_info *curseg = CURSEG_I(sbi, i);
1704
1705                 reset_curseg(sbi, i);
1706
1707                 if (curseg->alloc_type == SSR)
1708                         blk_off = sbi->blocks_per_seg;
1709                 else
1710                         blk_off = curseg->next_blkoff;
1711
1712                 ASSERT(blk_off <= ENTRIES_IN_SUM);
1713
1714                 for (j = 0; j < blk_off; j++) {
1715                         struct f2fs_summary *s;
1716                         s = (struct f2fs_summary *)(kaddr + offset);
1717                         curseg->sum_blk->entries[j] = *s;
1718                         offset += SUMMARY_SIZE;
1719                         if (offset + SUMMARY_SIZE <=
1720                                         PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
1721                                 continue;
1722                         memset(kaddr, 0, PAGE_SIZE);
1723                         ret = dev_read_block(kaddr, start++);
1724                         ASSERT(ret >= 0);
1725                         offset = 0;
1726                 }
1727         }
1728         free(kaddr);
1729 }
1730
1731 static void restore_node_summary(struct f2fs_sb_info *sbi,
1732                 unsigned int segno, struct f2fs_summary_block *sum_blk)
1733 {
1734         struct f2fs_node *node_blk;
1735         struct f2fs_summary *sum_entry;
1736         block_t addr;
1737         unsigned int i;
1738         int ret;
1739
1740         node_blk = malloc(F2FS_BLKSIZE);
1741         ASSERT(node_blk);
1742
1743         /* scan the node segment */
1744         addr = START_BLOCK(sbi, segno);
1745         sum_entry = &sum_blk->entries[0];
1746
1747         for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
1748                 ret = dev_read_block(node_blk, addr);
1749                 ASSERT(ret >= 0);
1750                 sum_entry->nid = node_blk->footer.nid;
1751                 addr++;
1752         }
1753         free(node_blk);
1754 }
1755
1756 static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1757 {
1758         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1759         struct f2fs_summary_block *sum_blk;
1760         struct curseg_info *curseg;
1761         unsigned int segno = 0;
1762         block_t blk_addr = 0;
1763         int ret;
1764
1765         if (IS_DATASEG(type)) {
1766                 segno = get_cp(cur_data_segno[type]);
1767                 if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1768                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1769                 else
1770                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1771         } else {
1772                 segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
1773                 if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1774                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1775                                                         type - CURSEG_HOT_NODE);
1776                 else
1777                         blk_addr = GET_SUM_BLKADDR(sbi, segno);
1778         }
1779
1780         sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
1781         ASSERT(sum_blk);
1782
1783         ret = dev_read_block(sum_blk, blk_addr);
1784         ASSERT(ret >= 0);
1785
1786         if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1787                 restore_node_summary(sbi, segno, sum_blk);
1788
1789         curseg = CURSEG_I(sbi, type);
1790         memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
1791         reset_curseg(sbi, type);
1792         free(sum_blk);
1793 }
1794
1795 void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
1796                                         struct f2fs_summary *sum)
1797 {
1798         struct f2fs_summary_block *sum_blk;
1799         u32 segno, offset;
1800         int type, ret;
1801         struct seg_entry *se;
1802
1803         segno = GET_SEGNO(sbi, blk_addr);
1804         offset = OFFSET_IN_SEG(sbi, blk_addr);
1805
1806         se = get_seg_entry(sbi, segno);
1807
1808         sum_blk = get_sum_block(sbi, segno, &type);
1809         memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
1810         sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
1811                                                         SUM_TYPE_DATA;
1812
1813         /* write SSA all the time */
1814         ret = dev_write_block(sum_blk, GET_SUM_BLKADDR(sbi, segno));
1815         ASSERT(ret >= 0);
1816
1817         if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
1818                                         type == SEG_TYPE_MAX)
1819                 free(sum_blk);
1820 }
1821
1822 static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
1823 {
1824         int type = CURSEG_HOT_DATA;
1825
1826         if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1827                 read_compacted_summaries(sbi);
1828                 type = CURSEG_HOT_NODE;
1829         }
1830
1831         for (; type <= CURSEG_COLD_NODE; type++)
1832                 read_normal_summaries(sbi, type);
1833 }
1834
1835 static int build_curseg(struct f2fs_sb_info *sbi)
1836 {
1837         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1838         struct curseg_info *array;
1839         unsigned short blk_off;
1840         unsigned int segno;
1841         int i;
1842
1843         array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
1844         if (!array) {
1845                 MSG(1, "\tError: Malloc failed for build_curseg!\n");
1846                 return -ENOMEM;
1847         }
1848
1849         SM_I(sbi)->curseg_array = array;
1850
1851         for (i = 0; i < NR_CURSEG_TYPE; i++) {
1852                 array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
1853                 if (!array[i].sum_blk) {
1854                         MSG(1, "\tError: Malloc failed for build_curseg!!\n");
1855                         goto seg_cleanup;
1856                 }
1857
1858                 if (i <= CURSEG_COLD_DATA) {
1859                         blk_off = get_cp(cur_data_blkoff[i]);
1860                         segno = get_cp(cur_data_segno[i]);
1861                 }
1862                 if (i > CURSEG_COLD_DATA) {
1863                         blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
1864                         segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
1865                 }
1866                 ASSERT(segno < TOTAL_SEGS(sbi));
1867                 ASSERT(blk_off < DEFAULT_BLOCKS_PER_SEGMENT);
1868
1869                 array[i].segno = segno;
1870                 array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
1871                 array[i].next_segno = NULL_SEGNO;
1872                 array[i].next_blkoff = blk_off;
1873                 array[i].alloc_type = cp->alloc_type[i];
1874         }
1875         restore_curseg_summaries(sbi);
1876         return 0;
1877
1878 seg_cleanup:
1879         for(--i ; i >=0; --i)
1880                 free(array[i].sum_blk);
1881         free(array);
1882
1883         return -ENOMEM;
1884 }
1885
1886 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
1887 {
1888         unsigned int end_segno = SM_I(sbi)->segment_count - 1;
1889         ASSERT(segno <= end_segno);
1890 }
1891
1892 static inline block_t current_sit_addr(struct f2fs_sb_info *sbi,
1893                                                 unsigned int segno)
1894 {
1895         struct sit_info *sit_i = SIT_I(sbi);
1896         unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1897         block_t blk_addr = sit_i->sit_base_addr + offset;
1898
1899         check_seg_range(sbi, segno);
1900
1901         /* calculate sit block address */
1902         if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1903                 blk_addr += sit_i->sit_blocks;
1904
1905         return blk_addr;
1906 }
1907
1908 void get_current_sit_page(struct f2fs_sb_info *sbi,
1909                         unsigned int segno, struct f2fs_sit_block *sit_blk)
1910 {
1911         block_t blk_addr = current_sit_addr(sbi, segno);
1912
1913         ASSERT(dev_read_block(sit_blk, blk_addr) >= 0);
1914 }
1915
1916 void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
1917                         unsigned int segno, struct f2fs_sit_block *sit_blk)
1918 {
1919         block_t blk_addr = current_sit_addr(sbi, segno);
1920
1921         ASSERT(dev_write_block(sit_blk, blk_addr) >= 0);
1922 }
1923
1924 void check_block_count(struct f2fs_sb_info *sbi,
1925                 unsigned int segno, struct f2fs_sit_entry *raw_sit)
1926 {
1927         struct f2fs_sm_info *sm_info = SM_I(sbi);
1928         unsigned int end_segno = sm_info->segment_count - 1;
1929         int valid_blocks = 0;
1930         unsigned int i;
1931
1932         /* check segment usage */
1933         if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
1934                 ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
1935                                 segno, GET_SIT_VBLOCKS(raw_sit));
1936
1937         /* check boundary of a given segment number */
1938         if (segno > end_segno)
1939                 ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
1940
1941         /* check bitmap with valid block count */
1942         for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1943                 valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
1944
1945         if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
1946                 ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
1947                                 segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
1948
1949         if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
1950                 ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
1951                                 segno, GET_SIT_TYPE(raw_sit));
1952 }
1953
1954 void __seg_info_from_raw_sit(struct seg_entry *se,
1955                 struct f2fs_sit_entry *raw_sit)
1956 {
1957         se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
1958         memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
1959         se->type = GET_SIT_TYPE(raw_sit);
1960         se->orig_type = GET_SIT_TYPE(raw_sit);
1961         se->mtime = le64_to_cpu(raw_sit->mtime);
1962 }
1963
1964 void seg_info_from_raw_sit(struct f2fs_sb_info *sbi, struct seg_entry *se,
1965                                                 struct f2fs_sit_entry *raw_sit)
1966 {
1967         __seg_info_from_raw_sit(se, raw_sit);
1968
1969         if (!need_fsync_data_record(sbi))
1970                 return;
1971         se->ckpt_valid_blocks = se->valid_blocks;
1972         memcpy(se->ckpt_valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1973         se->ckpt_type = se->type;
1974 }
1975
1976 struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
1977                 unsigned int segno)
1978 {
1979         struct sit_info *sit_i = SIT_I(sbi);
1980         return &sit_i->sentries[segno];
1981 }
1982
1983 unsigned short get_seg_vblocks(struct f2fs_sb_info *sbi, struct seg_entry *se)
1984 {
1985         if (!need_fsync_data_record(sbi))
1986                 return se->valid_blocks;
1987         else
1988                 return se->ckpt_valid_blocks;
1989 }
1990
1991 unsigned char *get_seg_bitmap(struct f2fs_sb_info *sbi, struct seg_entry *se)
1992 {
1993         if (!need_fsync_data_record(sbi))
1994                 return se->cur_valid_map;
1995         else
1996                 return se->ckpt_valid_map;
1997 }
1998
1999 unsigned char get_seg_type(struct f2fs_sb_info *sbi, struct seg_entry *se)
2000 {
2001         if (!need_fsync_data_record(sbi))
2002                 return se->type;
2003         else
2004                 return se->ckpt_type;
2005 }
2006
2007 struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
2008                                 unsigned int segno, int *ret_type)
2009 {
2010         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2011         struct f2fs_summary_block *sum_blk;
2012         struct curseg_info *curseg;
2013         int type, ret;
2014         u64 ssa_blk;
2015
2016         *ret_type= SEG_TYPE_MAX;
2017
2018         ssa_blk = GET_SUM_BLKADDR(sbi, segno);
2019         for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
2020                 if (segno == get_cp(cur_node_segno[type])) {
2021                         curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
2022                         if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
2023                                 ASSERT_MSG("segno [0x%x] indicates a data "
2024                                                 "segment, but should be node",
2025                                                 segno);
2026                                 *ret_type = -SEG_TYPE_CUR_NODE;
2027                         } else {
2028                                 *ret_type = SEG_TYPE_CUR_NODE;
2029                         }
2030                         return curseg->sum_blk;
2031                 }
2032         }
2033
2034         for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
2035                 if (segno == get_cp(cur_data_segno[type])) {
2036                         curseg = CURSEG_I(sbi, type);
2037                         if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
2038                                 ASSERT_MSG("segno [0x%x] indicates a node "
2039                                                 "segment, but should be data",
2040                                                 segno);
2041                                 *ret_type = -SEG_TYPE_CUR_DATA;
2042                         } else {
2043                                 *ret_type = SEG_TYPE_CUR_DATA;
2044                         }
2045                         return curseg->sum_blk;
2046                 }
2047         }
2048
2049         sum_blk = calloc(BLOCK_SZ, 1);
2050         ASSERT(sum_blk);
2051
2052         ret = dev_read_block(sum_blk, ssa_blk);
2053         ASSERT(ret >= 0);
2054
2055         if (IS_SUM_NODE_SEG(sum_blk->footer))
2056                 *ret_type = SEG_TYPE_NODE;
2057         else if (IS_SUM_DATA_SEG(sum_blk->footer))
2058                 *ret_type = SEG_TYPE_DATA;
2059
2060         return sum_blk;
2061 }
2062
2063 int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
2064                                 struct f2fs_summary *sum_entry)
2065 {
2066         struct f2fs_summary_block *sum_blk;
2067         u32 segno, offset;
2068         int type;
2069
2070         segno = GET_SEGNO(sbi, blk_addr);
2071         offset = OFFSET_IN_SEG(sbi, blk_addr);
2072
2073         sum_blk = get_sum_block(sbi, segno, &type);
2074         memcpy(sum_entry, &(sum_blk->entries[offset]),
2075                                 sizeof(struct f2fs_summary));
2076         if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
2077                                         type == SEG_TYPE_MAX)
2078                 free(sum_blk);
2079         return type;
2080 }
2081
2082 static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
2083                                 struct f2fs_nat_entry *raw_nat)
2084 {
2085         struct f2fs_nat_block *nat_block;
2086         pgoff_t block_addr;
2087         int entry_off;
2088         int ret;
2089
2090         if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
2091                 return;
2092
2093         nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2094         ASSERT(nat_block);
2095
2096         entry_off = nid % NAT_ENTRY_PER_BLOCK;
2097         block_addr = current_nat_addr(sbi, nid, NULL);
2098
2099         ret = dev_read_block(nat_block, block_addr);
2100         ASSERT(ret >= 0);
2101
2102         memcpy(raw_nat, &nat_block->entries[entry_off],
2103                                         sizeof(struct f2fs_nat_entry));
2104         free(nat_block);
2105 }
2106
2107 void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
2108                                 u16 ofs_in_node, block_t newaddr)
2109 {
2110         struct f2fs_node *node_blk = NULL;
2111         struct node_info ni;
2112         block_t oldaddr, startaddr, endaddr;
2113         int ret;
2114
2115         node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
2116         ASSERT(node_blk);
2117
2118         get_node_info(sbi, nid, &ni);
2119
2120         /* read node_block */
2121         ret = dev_read_block(node_blk, ni.blk_addr);
2122         ASSERT(ret >= 0);
2123
2124         /* check its block address */
2125         if (node_blk->footer.nid == node_blk->footer.ino) {
2126                 int ofs = get_extra_isize(node_blk);
2127
2128                 oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs + ofs_in_node]);
2129                 node_blk->i.i_addr[ofs + ofs_in_node] = cpu_to_le32(newaddr);
2130                 ret = write_inode(node_blk, ni.blk_addr);
2131                 ASSERT(ret >= 0);
2132         } else {
2133                 oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
2134                 node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
2135                 ret = dev_write_block(node_blk, ni.blk_addr);
2136                 ASSERT(ret >= 0);
2137         }
2138
2139         /* check extent cache entry */
2140         if (node_blk->footer.nid != node_blk->footer.ino) {
2141                 get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
2142
2143                 /* read inode block */
2144                 ret = dev_read_block(node_blk, ni.blk_addr);
2145                 ASSERT(ret >= 0);
2146         }
2147
2148         startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
2149         endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
2150         if (oldaddr >= startaddr && oldaddr < endaddr) {
2151                 node_blk->i.i_ext.len = 0;
2152
2153                 /* update inode block */
2154                 ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
2155         }
2156         free(node_blk);
2157 }
2158
2159 void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
2160                                         nid_t nid, block_t newaddr)
2161 {
2162         struct f2fs_nat_block *nat_block;
2163         pgoff_t block_addr;
2164         int entry_off;
2165         int ret;
2166
2167         nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2168         ASSERT(nat_block);
2169
2170         entry_off = nid % NAT_ENTRY_PER_BLOCK;
2171         block_addr = current_nat_addr(sbi, nid, NULL);
2172
2173         ret = dev_read_block(nat_block, block_addr);
2174         ASSERT(ret >= 0);
2175
2176         if (ino)
2177                 nat_block->entries[entry_off].ino = cpu_to_le32(ino);
2178         nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
2179         if (c.func == FSCK)
2180                 F2FS_FSCK(sbi)->entries[nid] = nat_block->entries[entry_off];
2181
2182         ret = dev_write_block(nat_block, block_addr);
2183         ASSERT(ret >= 0);
2184         free(nat_block);
2185 }
2186
2187 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
2188 {
2189         struct f2fs_nat_entry raw_nat;
2190
2191         ni->nid = nid;
2192         if (c.func == FSCK && F2FS_FSCK(sbi)->nr_nat_entries) {
2193                 node_info_from_raw_nat(ni, &(F2FS_FSCK(sbi)->entries[nid]));
2194                 if (ni->blk_addr)
2195                         return;
2196                 /* nat entry is not cached, read it */
2197         }
2198
2199         get_nat_entry(sbi, nid, &raw_nat);
2200         node_info_from_raw_nat(ni, &raw_nat);
2201 }
2202
2203 static int build_sit_entries(struct f2fs_sb_info *sbi)
2204 {
2205         struct sit_info *sit_i = SIT_I(sbi);
2206         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2207         struct f2fs_journal *journal = &curseg->sum_blk->journal;
2208         struct f2fs_sit_block *sit_blk;
2209         struct seg_entry *se;
2210         struct f2fs_sit_entry sit;
2211         int sit_blk_cnt = SIT_BLK_CNT(sbi);
2212         unsigned int i, segno, end;
2213         unsigned int readed, start_blk = 0;
2214
2215         sit_blk = calloc(BLOCK_SZ, 1);
2216         if (!sit_blk) {
2217                 MSG(1, "\tError: Calloc failed for build_sit_entries!\n");
2218                 return -ENOMEM;
2219         }
2220
2221         do {
2222                 readed = f2fs_ra_meta_pages(sbi, start_blk, MAX_RA_BLOCKS,
2223                                                                 META_SIT);
2224
2225                 segno = start_blk * sit_i->sents_per_block;
2226                 end = (start_blk + readed) * sit_i->sents_per_block;
2227
2228                 for (; segno < end && segno < TOTAL_SEGS(sbi); segno++) {
2229                         se = &sit_i->sentries[segno];
2230
2231                         get_current_sit_page(sbi, segno, sit_blk);
2232                         sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2233
2234                         check_block_count(sbi, segno, &sit);
2235                         seg_info_from_raw_sit(sbi, se, &sit);
2236                 }
2237                 start_blk += readed;
2238         } while (start_blk < sit_blk_cnt);
2239
2240
2241         free(sit_blk);
2242
2243         if (sits_in_cursum(journal) > SIT_JOURNAL_ENTRIES) {
2244                 MSG(0, "\tError: build_sit_entries truncate n_sits(%u) to "
2245                         "SIT_JOURNAL_ENTRIES(%lu)\n",
2246                         sits_in_cursum(journal), SIT_JOURNAL_ENTRIES);
2247                 journal->n_sits = cpu_to_le16(SIT_JOURNAL_ENTRIES);
2248                 c.fix_on = 1;
2249         }
2250
2251         for (i = 0; i < sits_in_cursum(journal); i++) {
2252                 segno = le32_to_cpu(segno_in_journal(journal, i));
2253
2254                 if (segno >= TOTAL_SEGS(sbi)) {
2255                         MSG(0, "\tError: build_sit_entries: segno(%u) is invalid!!!\n", segno);
2256                         journal->n_sits = cpu_to_le16(i);
2257                         c.fix_on = 1;
2258                         continue;
2259                 }
2260
2261                 se = &sit_i->sentries[segno];
2262                 sit = sit_in_journal(journal, i);
2263
2264                 check_block_count(sbi, segno, &sit);
2265                 seg_info_from_raw_sit(sbi, se, &sit);
2266         }
2267         return 0;
2268 }
2269
2270 static int build_segment_manager(struct f2fs_sb_info *sbi)
2271 {
2272         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2273         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2274         struct f2fs_sm_info *sm_info;
2275
2276         sm_info = malloc(sizeof(struct f2fs_sm_info));
2277         if (!sm_info) {
2278                 MSG(1, "\tError: Malloc failed for build_segment_manager!\n");
2279                 return -ENOMEM;
2280         }
2281
2282         /* init sm info */
2283         sbi->sm_info = sm_info;
2284         sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
2285         sm_info->main_blkaddr = get_sb(main_blkaddr);
2286         sm_info->segment_count = get_sb(segment_count);
2287         sm_info->reserved_segments = get_cp(rsvd_segment_count);
2288         sm_info->ovp_segments = get_cp(overprov_segment_count);
2289         sm_info->main_segments = get_sb(segment_count_main);
2290         sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
2291
2292         if (build_sit_info(sbi) || build_curseg(sbi) || build_sit_entries(sbi)) {
2293                 free(sm_info);
2294                 return -ENOMEM;
2295         }
2296
2297         return 0;
2298 }
2299
2300 void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
2301 {
2302         struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2303         struct f2fs_sm_info *sm_i = SM_I(sbi);
2304         unsigned int segno = 0;
2305         char *ptr = NULL;
2306         u32 sum_vblocks = 0;
2307         u32 free_segs = 0;
2308         struct seg_entry *se;
2309
2310         fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
2311         fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
2312         ASSERT(fsck->sit_area_bitmap);
2313         ptr = fsck->sit_area_bitmap;
2314
2315         ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
2316
2317         for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
2318                 se = get_seg_entry(sbi, segno);
2319
2320                 memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2321                 ptr += SIT_VBLOCK_MAP_SIZE;
2322
2323                 if (se->valid_blocks == 0x0) {
2324                         if (le32_to_cpu(sbi->ckpt->cur_node_segno[0]) == segno ||
2325                                 le32_to_cpu(sbi->ckpt->cur_data_segno[0]) == segno ||
2326                                 le32_to_cpu(sbi->ckpt->cur_node_segno[1]) == segno ||
2327                                 le32_to_cpu(sbi->ckpt->cur_data_segno[1]) == segno ||
2328                                 le32_to_cpu(sbi->ckpt->cur_node_segno[2]) == segno ||
2329                                 le32_to_cpu(sbi->ckpt->cur_data_segno[2]) == segno) {
2330                                 continue;
2331                         } else {
2332                                 free_segs++;
2333                         }
2334                 } else {
2335                         sum_vblocks += se->valid_blocks;
2336                 }
2337         }
2338         fsck->chk.sit_valid_blocks = sum_vblocks;
2339         fsck->chk.sit_free_segs = free_segs;
2340
2341         DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
2342                         sum_vblocks, sum_vblocks,
2343                         free_segs, free_segs);
2344 }
2345
2346 void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
2347 {
2348         struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2349         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2350         struct sit_info *sit_i = SIT_I(sbi);
2351         struct f2fs_sit_block *sit_blk;
2352         unsigned int segno = 0;
2353         struct f2fs_summary_block *sum = curseg->sum_blk;
2354         char *ptr = NULL;
2355
2356         sit_blk = calloc(BLOCK_SZ, 1);
2357         ASSERT(sit_blk);
2358         /* remove sit journal */
2359         sum->journal.n_sits = 0;
2360
2361         ptr = fsck->main_area_bitmap;
2362
2363         for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
2364                 struct f2fs_sit_entry *sit;
2365                 struct seg_entry *se;
2366                 u16 valid_blocks = 0;
2367                 u16 type;
2368                 int i;
2369
2370                 get_current_sit_page(sbi, segno, sit_blk);
2371                 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2372                 memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
2373
2374                 /* update valid block count */
2375                 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
2376                         valid_blocks += get_bits_in_byte(sit->valid_map[i]);
2377
2378                 se = get_seg_entry(sbi, segno);
2379                 memcpy(se->cur_valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
2380                 se->valid_blocks = valid_blocks;
2381                 type = se->type;
2382                 if (type >= NO_CHECK_TYPE) {
2383                         ASSERT_MSG("Invalide type and valid blocks=%x,%x",
2384                                         segno, valid_blocks);
2385                         type = 0;
2386                 }
2387                 sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
2388                                                                 valid_blocks);
2389                 rewrite_current_sit_page(sbi, segno, sit_blk);
2390
2391                 ptr += SIT_VBLOCK_MAP_SIZE;
2392         }
2393
2394         free(sit_blk);
2395 }
2396
2397 static int flush_sit_journal_entries(struct f2fs_sb_info *sbi)
2398 {
2399         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2400         struct f2fs_journal *journal = &curseg->sum_blk->journal;
2401         struct sit_info *sit_i = SIT_I(sbi);
2402         struct f2fs_sit_block *sit_blk;
2403         unsigned int segno;
2404         int i;
2405
2406         sit_blk = calloc(BLOCK_SZ, 1);
2407         ASSERT(sit_blk);
2408         for (i = 0; i < sits_in_cursum(journal); i++) {
2409                 struct f2fs_sit_entry *sit;
2410                 struct seg_entry *se;
2411
2412                 segno = segno_in_journal(journal, i);
2413                 se = get_seg_entry(sbi, segno);
2414
2415                 get_current_sit_page(sbi, segno, sit_blk);
2416                 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2417
2418                 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2419                 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2420                                                         se->valid_blocks);
2421                 sit->mtime = cpu_to_le64(se->mtime);
2422
2423                 rewrite_current_sit_page(sbi, segno, sit_blk);
2424         }
2425
2426         free(sit_blk);
2427         journal->n_sits = 0;
2428         return i;
2429 }
2430
2431 static int flush_nat_journal_entries(struct f2fs_sb_info *sbi)
2432 {
2433         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2434         struct f2fs_journal *journal = &curseg->sum_blk->journal;
2435         struct f2fs_nat_block *nat_block;
2436         pgoff_t block_addr;
2437         int entry_off;
2438         nid_t nid;
2439         int ret;
2440         int i = 0;
2441
2442         nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2443         ASSERT(nat_block);
2444 next:
2445         if (i >= nats_in_cursum(journal)) {
2446                 free(nat_block);
2447                 journal->n_nats = 0;
2448                 return i;
2449         }
2450
2451         nid = le32_to_cpu(nid_in_journal(journal, i));
2452
2453         entry_off = nid % NAT_ENTRY_PER_BLOCK;
2454         block_addr = current_nat_addr(sbi, nid, NULL);
2455
2456         ret = dev_read_block(nat_block, block_addr);
2457         ASSERT(ret >= 0);
2458
2459         memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
2460                                         sizeof(struct f2fs_nat_entry));
2461
2462         ret = dev_write_block(nat_block, block_addr);
2463         ASSERT(ret >= 0);
2464         i++;
2465         goto next;
2466 }
2467
2468 void flush_journal_entries(struct f2fs_sb_info *sbi)
2469 {
2470         int n_nats = flush_nat_journal_entries(sbi);
2471         int n_sits = flush_sit_journal_entries(sbi);
2472
2473         if (n_nats || n_sits)
2474                 write_checkpoints(sbi);
2475 }
2476
2477 void flush_sit_entries(struct f2fs_sb_info *sbi)
2478 {
2479         struct sit_info *sit_i = SIT_I(sbi);
2480         struct f2fs_sit_block *sit_blk;
2481         unsigned int segno = 0;
2482
2483         sit_blk = calloc(BLOCK_SZ, 1);
2484         ASSERT(sit_blk);
2485         /* update free segments */
2486         for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
2487                 struct f2fs_sit_entry *sit;
2488                 struct seg_entry *se;
2489
2490                 se = get_seg_entry(sbi, segno);
2491
2492                 if (!se->dirty)
2493                         continue;
2494
2495                 get_current_sit_page(sbi, segno, sit_blk);
2496                 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2497                 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2498                 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2499                                                         se->valid_blocks);
2500                 rewrite_current_sit_page(sbi, segno, sit_blk);
2501         }
2502
2503         free(sit_blk);
2504 }
2505
2506 int relocate_curseg_offset(struct f2fs_sb_info *sbi, int type)
2507 {
2508         struct curseg_info *curseg = CURSEG_I(sbi, type);
2509         struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
2510         unsigned int i;
2511
2512         if (c.zoned_model == F2FS_ZONED_HM)
2513                 return -EINVAL;
2514
2515         for (i = 0; i < sbi->blocks_per_seg; i++) {
2516                 if (!f2fs_test_bit(i, (const char *)se->cur_valid_map))
2517                         break;
2518         }
2519
2520         if (i == sbi->blocks_per_seg)
2521                 return -EINVAL;
2522
2523         DBG(1, "Update curseg[%d].next_blkoff %u -> %u, alloc_type %s -> SSR\n",
2524                         type, curseg->next_blkoff, i,
2525                         curseg->alloc_type == LFS ? "LFS" : "SSR");
2526
2527         curseg->next_blkoff = i;
2528         curseg->alloc_type = SSR;
2529
2530         return 0;
2531 }
2532
2533 void set_section_type(struct f2fs_sb_info *sbi, unsigned int segno, int type)
2534 {
2535         unsigned int i;
2536
2537         if (sbi->segs_per_sec == 1)
2538                 return;
2539
2540         for (i = 0; i < sbi->segs_per_sec; i++) {
2541                 struct seg_entry *se = get_seg_entry(sbi, segno + i);
2542
2543                 se->type = type;
2544         }
2545 }
2546
2547 #ifdef HAVE_LINUX_BLKZONED_H
2548
2549 static bool write_pointer_at_zone_start(struct f2fs_sb_info *sbi,
2550                                         unsigned int zone_segno)
2551 {
2552         u_int64_t sector;
2553         struct blk_zone blkz;
2554         block_t block = START_BLOCK(sbi, zone_segno);
2555         int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
2556         int ret, j;
2557
2558         if (c.zoned_model != F2FS_ZONED_HM)
2559                 return true;
2560
2561         for (j = 0; j < MAX_DEVICES; j++) {
2562                 if (!c.devices[j].path)
2563                         break;
2564                 if (c.devices[j].start_blkaddr <= block &&
2565                     block <= c.devices[j].end_blkaddr)
2566                         break;
2567         }
2568
2569         if (j >= MAX_DEVICES)
2570                 return false;
2571
2572         sector = (block - c.devices[j].start_blkaddr) << log_sectors_per_block;
2573         ret = f2fs_report_zone(j, sector, &blkz);
2574         if (ret)
2575                 return false;
2576
2577         if (blk_zone_type(&blkz) != BLK_ZONE_TYPE_SEQWRITE_REQ)
2578                 return true;
2579
2580         return blk_zone_sector(&blkz) == blk_zone_wp_sector(&blkz);
2581 }
2582
2583 #else
2584
2585 static bool write_pointer_at_zone_start(struct f2fs_sb_info *UNUSED(sbi),
2586                                         unsigned int UNUSED(zone_segno))
2587 {
2588         return true;
2589 }
2590
2591 #endif
2592
2593 int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left,
2594                                                 int want_type, bool new_sec)
2595 {
2596         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2597         struct seg_entry *se;
2598         u32 segno;
2599         u32 offset;
2600         int not_enough = 0;
2601         u64 end_blkaddr = (get_sb(segment_count_main) <<
2602                         get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
2603
2604         if (*to > 0)
2605                 *to -= left;
2606         if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
2607                 not_enough = 1;
2608
2609         while (*to >= SM_I(sbi)->main_blkaddr && *to < end_blkaddr) {
2610                 unsigned short vblocks;
2611                 unsigned char *bitmap;
2612                 unsigned char type;
2613
2614                 segno = GET_SEGNO(sbi, *to);
2615                 offset = OFFSET_IN_SEG(sbi, *to);
2616
2617                 se = get_seg_entry(sbi, segno);
2618
2619                 vblocks = get_seg_vblocks(sbi, se);
2620                 bitmap = get_seg_bitmap(sbi, se);
2621                 type = get_seg_type(sbi, se);
2622
2623                 if (vblocks == sbi->blocks_per_seg ||
2624                                 IS_CUR_SEGNO(sbi, segno)) {
2625                         *to = left ? START_BLOCK(sbi, segno) - 1:
2626                                                 START_BLOCK(sbi, segno + 1);
2627                         continue;
2628                 }
2629
2630                 if (vblocks == 0 && not_enough) {
2631                         *to = left ? START_BLOCK(sbi, segno) - 1:
2632                                                 START_BLOCK(sbi, segno + 1);
2633                         continue;
2634                 }
2635
2636                 if (vblocks == 0 && !(segno % sbi->segs_per_sec)) {
2637                         struct seg_entry *se2;
2638                         unsigned int i;
2639
2640                         for (i = 1; i < sbi->segs_per_sec; i++) {
2641                                 se2 = get_seg_entry(sbi, segno + i);
2642                                 if (get_seg_vblocks(sbi, se2))
2643                                         break;
2644                         }
2645
2646                         if (i == sbi->segs_per_sec &&
2647                             write_pointer_at_zone_start(sbi, segno)) {
2648                                 set_section_type(sbi, segno, want_type);
2649                                 return 0;
2650                         }
2651                 }
2652
2653                 if (type == want_type && !new_sec &&
2654                         !f2fs_test_bit(offset, (const char *)bitmap))
2655                         return 0;
2656
2657                 *to = left ? *to - 1: *to + 1;
2658         }
2659         return -1;
2660 }
2661
2662 static void move_one_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left,
2663                                  int i)
2664 {
2665         struct curseg_info *curseg = CURSEG_I(sbi, i);
2666         struct f2fs_summary_block buf;
2667         u32 old_segno;
2668         u64 ssa_blk, to;
2669         int ret;
2670
2671         /* update original SSA too */
2672         ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2673         ret = dev_write_block(curseg->sum_blk, ssa_blk);
2674         ASSERT(ret >= 0);
2675
2676         to = from;
2677         ret = find_next_free_block(sbi, &to, left, i,
2678                                    c.zoned_model == F2FS_ZONED_HM);
2679         ASSERT(ret == 0);
2680
2681         old_segno = curseg->segno;
2682         curseg->segno = GET_SEGNO(sbi, to);
2683         curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
2684         curseg->alloc_type = c.zoned_model == F2FS_ZONED_HM ? LFS : SSR;
2685
2686         /* update new segno */
2687         ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2688         ret = dev_read_block(&buf, ssa_blk);
2689         ASSERT(ret >= 0);
2690
2691         memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
2692
2693         /* update se->types */
2694         reset_curseg(sbi, i);
2695
2696         FIX_MSG("Move curseg[%d] %x -> %x after %"PRIx64"\n",
2697                 i, old_segno, curseg->segno, from);
2698 }
2699
2700 void move_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left)
2701 {
2702         int i;
2703
2704         /* update summary blocks having nullified journal entries */
2705         for (i = 0; i < NO_CHECK_TYPE; i++)
2706                 move_one_curseg_info(sbi, from, left, i);
2707 }
2708
2709 void update_curseg_info(struct f2fs_sb_info *sbi, int type)
2710 {
2711         if (!relocate_curseg_offset(sbi, type))
2712                 return;
2713         move_one_curseg_info(sbi, SM_I(sbi)->main_blkaddr, 0, type);
2714 }
2715
2716 void zero_journal_entries(struct f2fs_sb_info *sbi)
2717 {
2718         int i;
2719
2720         for (i = 0; i < NO_CHECK_TYPE; i++)
2721                 CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
2722 }
2723
2724 void write_curseg_info(struct f2fs_sb_info *sbi)
2725 {
2726         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2727         int i;
2728
2729         for (i = 0; i < NO_CHECK_TYPE; i++) {
2730                 cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
2731                 if (i < CURSEG_HOT_NODE) {
2732                         set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
2733                         set_cp(cur_data_blkoff[i],
2734                                         CURSEG_I(sbi, i)->next_blkoff);
2735                 } else {
2736                         int n = i - CURSEG_HOT_NODE;
2737
2738                         set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
2739                         set_cp(cur_node_blkoff[n],
2740                                         CURSEG_I(sbi, i)->next_blkoff);
2741                 }
2742         }
2743 }
2744
2745 int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
2746                                         struct f2fs_nat_entry *raw_nat)
2747 {
2748         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2749         struct f2fs_journal *journal = &curseg->sum_blk->journal;
2750         int i = 0;
2751
2752         for (i = 0; i < nats_in_cursum(journal); i++) {
2753                 if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
2754                         memcpy(raw_nat, &nat_in_journal(journal, i),
2755                                                 sizeof(struct f2fs_nat_entry));
2756                         DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
2757                         return i;
2758                 }
2759         }
2760         return -1;
2761 }
2762
2763 void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
2764 {
2765         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2766         struct f2fs_journal *journal = &curseg->sum_blk->journal;
2767         struct f2fs_nat_block *nat_block;
2768         pgoff_t block_addr;
2769         int entry_off;
2770         int ret;
2771         int i = 0;
2772
2773         /* check in journal */
2774         for (i = 0; i < nats_in_cursum(journal); i++) {
2775                 if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
2776                         memset(&nat_in_journal(journal, i), 0,
2777                                         sizeof(struct f2fs_nat_entry));
2778                         FIX_MSG("Remove nid [0x%x] in nat journal", nid);
2779                         return;
2780                 }
2781         }
2782         nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2783         ASSERT(nat_block);
2784
2785         entry_off = nid % NAT_ENTRY_PER_BLOCK;
2786         block_addr = current_nat_addr(sbi, nid, NULL);
2787
2788         ret = dev_read_block(nat_block, block_addr);
2789         ASSERT(ret >= 0);
2790
2791         if (nid == F2FS_NODE_INO(sbi) || nid == F2FS_META_INO(sbi)) {
2792                 FIX_MSG("nid [0x%x] block_addr= 0x%x -> 0x1", nid,
2793                         le32_to_cpu(nat_block->entries[entry_off].block_addr));
2794                 nat_block->entries[entry_off].block_addr = cpu_to_le32(0x1);
2795         } else {
2796                 memset(&nat_block->entries[entry_off], 0,
2797                                         sizeof(struct f2fs_nat_entry));
2798                 FIX_MSG("Remove nid [0x%x] in NAT", nid);
2799         }
2800
2801         ret = dev_write_block(nat_block, block_addr);
2802         ASSERT(ret >= 0);
2803         free(nat_block);
2804 }
2805
2806 void duplicate_checkpoint(struct f2fs_sb_info *sbi)
2807 {
2808         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2809         unsigned long long dst, src;
2810         void *buf;
2811         unsigned int seg_size = 1 << get_sb(log_blocks_per_seg);
2812         int ret;
2813
2814         if (sbi->cp_backuped)
2815                 return;
2816
2817         buf = malloc(F2FS_BLKSIZE * seg_size);
2818         ASSERT(buf);
2819
2820         if (sbi->cur_cp == 1) {
2821                 src = get_sb(cp_blkaddr);
2822                 dst = src + seg_size;
2823         } else {
2824                 dst = get_sb(cp_blkaddr);
2825                 src = dst + seg_size;
2826         }
2827
2828         ret = dev_read(buf, src << F2FS_BLKSIZE_BITS,
2829                                 seg_size << F2FS_BLKSIZE_BITS);
2830         ASSERT(ret >= 0);
2831
2832         ret = dev_write(buf, dst << F2FS_BLKSIZE_BITS,
2833                                 seg_size << F2FS_BLKSIZE_BITS);
2834         ASSERT(ret >= 0);
2835
2836         free(buf);
2837
2838         ret = f2fs_fsync_device();
2839         ASSERT(ret >= 0);
2840
2841         sbi->cp_backuped = 1;
2842
2843         MSG(0, "Info: Duplicate valid checkpoint to mirror position "
2844                 "%llu -> %llu\n", src, dst);
2845 }
2846
2847 void write_checkpoint(struct f2fs_sb_info *sbi)
2848 {
2849         struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2850         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2851         block_t orphan_blks = 0;
2852         unsigned long long cp_blk_no;
2853         u32 flags = CP_UMOUNT_FLAG;
2854         int i, ret;
2855         u_int32_t crc = 0;
2856
2857         if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
2858                 orphan_blks = __start_sum_addr(sbi) - 1;
2859                 flags |= CP_ORPHAN_PRESENT_FLAG;
2860         }
2861         if (is_set_ckpt_flags(cp, CP_TRIMMED_FLAG))
2862                 flags |= CP_TRIMMED_FLAG;
2863         if (is_set_ckpt_flags(cp, CP_DISABLED_FLAG))
2864                 flags |= CP_DISABLED_FLAG;
2865         if (is_set_ckpt_flags(cp, CP_LARGE_NAT_BITMAP_FLAG)) {
2866                 flags |= CP_LARGE_NAT_BITMAP_FLAG;
2867                 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
2868         } else {
2869                 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
2870         }
2871
2872         set_cp(free_segment_count, get_free_segments(sbi));
2873         if (c.func == FSCK) {
2874                 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2875
2876                 set_cp(valid_block_count, fsck->chk.valid_blk_cnt);
2877                 set_cp(valid_node_count, fsck->chk.valid_node_cnt);
2878                 set_cp(valid_inode_count, fsck->chk.valid_inode_cnt);
2879         } else {
2880                 set_cp(valid_block_count, sbi->total_valid_block_count);
2881                 set_cp(valid_node_count, sbi->total_valid_node_count);
2882                 set_cp(valid_inode_count, sbi->total_valid_inode_count);
2883         }
2884         set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
2885
2886         flags = update_nat_bits_flags(sb, cp, flags);
2887         set_cp(ckpt_flags, flags);
2888
2889         crc = f2fs_checkpoint_chksum(cp);
2890         *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
2891                                                         cpu_to_le32(crc);
2892
2893         cp_blk_no = get_sb(cp_blkaddr);
2894         if (sbi->cur_cp == 2)
2895                 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
2896
2897         /* write the first cp */
2898         ret = dev_write_block(cp, cp_blk_no++);
2899         ASSERT(ret >= 0);
2900
2901         /* skip payload */
2902         cp_blk_no += get_sb(cp_payload);
2903         /* skip orphan blocks */
2904         cp_blk_no += orphan_blks;
2905
2906         /* update summary blocks having nullified journal entries */
2907         for (i = 0; i < NO_CHECK_TYPE; i++) {
2908                 struct curseg_info *curseg = CURSEG_I(sbi, i);
2909                 u64 ssa_blk;
2910
2911                 ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
2912                 ASSERT(ret >= 0);
2913
2914                 /* update original SSA too */
2915                 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2916                 ret = dev_write_block(curseg->sum_blk, ssa_blk);
2917                 ASSERT(ret >= 0);
2918         }
2919
2920         /* Write nat bits */
2921         if (flags & CP_NAT_BITS_FLAG)
2922                 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
2923
2924         /* in case of sudden power off */
2925         ret = f2fs_fsync_device();
2926         ASSERT(ret >= 0);
2927
2928         /* write the last cp */
2929         ret = dev_write_block(cp, cp_blk_no++);
2930         ASSERT(ret >= 0);
2931
2932         ret = f2fs_fsync_device();
2933         ASSERT(ret >= 0);
2934 }
2935
2936 void write_checkpoints(struct f2fs_sb_info *sbi)
2937 {
2938         /* copy valid checkpoint to its mirror position */
2939         duplicate_checkpoint(sbi);
2940
2941         /* repair checkpoint at CP #0 position */
2942         sbi->cur_cp = 1;
2943         write_checkpoint(sbi);
2944 }
2945
2946 void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
2947 {
2948         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2949         struct f2fs_journal *journal = &curseg->sum_blk->journal;
2950         struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2951         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2952         struct f2fs_nm_info *nm_i = NM_I(sbi);
2953         struct f2fs_nat_block *nat_block;
2954         struct node_info ni;
2955         u32 nid, nr_nat_blks;
2956         pgoff_t block_off;
2957         pgoff_t block_addr;
2958         int seg_off;
2959         int ret;
2960         unsigned int i;
2961
2962         nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2963         ASSERT(nat_block);
2964
2965         /* Alloc & build nat entry bitmap */
2966         nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
2967                                         sbi->log_blocks_per_seg;
2968
2969         fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
2970         fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
2971         fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
2972         ASSERT(fsck->nat_area_bitmap);
2973
2974         fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
2975                                         fsck->nr_nat_entries);
2976         ASSERT(fsck->entries);
2977
2978         for (block_off = 0; block_off < nr_nat_blks; block_off++) {
2979
2980                 seg_off = block_off >> sbi->log_blocks_per_seg;
2981                 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
2982                         (seg_off << sbi->log_blocks_per_seg << 1) +
2983                         (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
2984
2985                 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
2986                         block_addr += sbi->blocks_per_seg;
2987
2988                 ret = dev_read_block(nat_block, block_addr);
2989                 ASSERT(ret >= 0);
2990
2991                 nid = block_off * NAT_ENTRY_PER_BLOCK;
2992                 for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
2993                         ni.nid = nid + i;
2994
2995                         if ((nid + i) == F2FS_NODE_INO(sbi) ||
2996                                         (nid + i) == F2FS_META_INO(sbi)) {
2997                                 /*
2998                                  * block_addr of node/meta inode should be 0x1.
2999                                  * Set this bit, and fsck_verify will fix it.
3000                                  */
3001                                 if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
3002                                         ASSERT_MSG("\tError: ino[0x%x] block_addr[0x%x] is invalid\n",
3003                                                         nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
3004                                         f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
3005                                 }
3006                                 continue;
3007                         }
3008
3009                         node_info_from_raw_nat(&ni, &nat_block->entries[i]);
3010                         if (ni.blk_addr == 0x0)
3011                                 continue;
3012                         if (ni.ino == 0x0) {
3013                                 ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
3014                                         " is invalid\n", ni.ino, ni.blk_addr);
3015                         }
3016                         if (ni.ino == (nid + i)) {
3017                                 fsck->nat_valid_inode_cnt++;
3018                                 DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
3019                         }
3020                         if (nid + i == 0) {
3021                                 /*
3022                                  * nat entry [0] must be null.  If
3023                                  * it is corrupted, set its bit in
3024                                  * nat_area_bitmap, fsck_verify will
3025                                  * nullify it
3026                                  */
3027                                 ASSERT_MSG("Invalid nat entry[0]: "
3028                                         "blk_addr[0x%x]\n", ni.blk_addr);
3029                                 fsck->chk.valid_nat_entry_cnt--;
3030                         }
3031
3032                         DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
3033                                 nid + i, ni.blk_addr, ni.ino);
3034                         f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
3035                         fsck->chk.valid_nat_entry_cnt++;
3036
3037                         fsck->entries[nid + i] = nat_block->entries[i];
3038                 }
3039         }
3040
3041         /* Traverse nat journal, update the corresponding entries */
3042         for (i = 0; i < nats_in_cursum(journal); i++) {
3043                 struct f2fs_nat_entry raw_nat;
3044                 nid = le32_to_cpu(nid_in_journal(journal, i));
3045                 ni.nid = nid;
3046
3047                 DBG(3, "==> Found nid [0x%x] in nat cache, update it\n", nid);
3048
3049                 /* Clear the original bit and count */
3050                 if (fsck->entries[nid].block_addr != 0x0) {
3051                         fsck->chk.valid_nat_entry_cnt--;
3052                         f2fs_clear_bit(nid, fsck->nat_area_bitmap);
3053                         if (fsck->entries[nid].ino == nid)
3054                                 fsck->nat_valid_inode_cnt--;
3055                 }
3056
3057                 /* Use nat entries in journal */
3058                 memcpy(&raw_nat, &nat_in_journal(journal, i),
3059                                         sizeof(struct f2fs_nat_entry));
3060                 node_info_from_raw_nat(&ni, &raw_nat);
3061                 if (ni.blk_addr != 0x0) {
3062                         if (ni.ino == 0x0)
3063                                 ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
3064                                         " is invalid\n", ni.ino, ni.blk_addr);
3065                         if (ni.ino == nid) {
3066                                 fsck->nat_valid_inode_cnt++;
3067                                 DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
3068                         }
3069                         f2fs_set_bit(nid, fsck->nat_area_bitmap);
3070                         fsck->chk.valid_nat_entry_cnt++;
3071                         DBG(3, "nid[0x%x] in nat cache\n", nid);
3072                 }
3073                 fsck->entries[nid] = raw_nat;
3074         }
3075         free(nat_block);
3076
3077         DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
3078                         fsck->chk.valid_nat_entry_cnt,
3079                         fsck->chk.valid_nat_entry_cnt);
3080 }
3081
3082 static int check_sector_size(struct f2fs_super_block *sb)
3083 {
3084         u_int32_t log_sectorsize, log_sectors_per_block;
3085
3086         log_sectorsize = log_base_2(c.sector_size);
3087         log_sectors_per_block = log_base_2(c.sectors_per_blk);
3088
3089         if (log_sectorsize == get_sb(log_sectorsize) &&
3090                         log_sectors_per_block == get_sb(log_sectors_per_block))
3091                 return 0;
3092
3093         set_sb(log_sectorsize, log_sectorsize);
3094         set_sb(log_sectors_per_block, log_sectors_per_block);
3095
3096         update_superblock(sb, SB_MASK_ALL);
3097         return 0;
3098 }
3099
3100 static int tune_sb_features(struct f2fs_sb_info *sbi)
3101 {
3102         int sb_changed = 0;
3103         struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3104
3105         if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) &&
3106                         c.feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
3107                 sb->feature |= cpu_to_le32(F2FS_FEATURE_ENCRYPT);
3108                 MSG(0, "Info: Set Encryption feature\n");
3109                 sb_changed = 1;
3110         }
3111         if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) &&
3112                 c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
3113                 if (!c.s_encoding) {
3114                         ERR_MSG("ERROR: Must specify encoding to enable casefolding.\n");
3115                         return -1;
3116                 }
3117                 sb->feature |= cpu_to_le32(F2FS_FEATURE_CASEFOLD);
3118                 MSG(0, "Info: Set Casefold feature\n");
3119                 sb_changed = 1;
3120         }
3121         /* TODO: quota needs to allocate inode numbers */
3122
3123         c.feature = sb->feature;
3124         if (!sb_changed)
3125                 return 0;
3126
3127         update_superblock(sb, SB_MASK_ALL);
3128         return 0;
3129 }
3130
3131 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
3132                                                                 nid_t ino)
3133 {
3134         struct fsync_inode_entry *entry;
3135
3136         list_for_each_entry(entry, head, list)
3137                 if (entry->ino == ino)
3138                         return entry;
3139
3140         return NULL;
3141 }
3142
3143 static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
3144                                                                 nid_t ino)
3145 {
3146         struct fsync_inode_entry *entry;
3147
3148         entry = calloc(sizeof(struct fsync_inode_entry), 1);
3149         if (!entry)
3150                 return NULL;
3151         entry->ino = ino;
3152         list_add_tail(&entry->list, head);
3153         return entry;
3154 }
3155
3156 static void del_fsync_inode(struct fsync_inode_entry *entry)
3157 {
3158         list_del(&entry->list);
3159         free(entry);
3160 }
3161
3162 static void destroy_fsync_dnodes(struct list_head *head)
3163 {
3164         struct fsync_inode_entry *entry, *tmp;
3165
3166         list_for_each_entry_safe(entry, tmp, head, list)
3167                 del_fsync_inode(entry);
3168 }
3169
3170 static int find_fsync_inode(struct f2fs_sb_info *sbi, struct list_head *head)
3171 {
3172         struct curseg_info *curseg;
3173         struct f2fs_node *node_blk;
3174         block_t blkaddr;
3175         unsigned int loop_cnt = 0;
3176         unsigned int free_blocks = TOTAL_SEGS(sbi) * sbi->blocks_per_seg -
3177                                                 sbi->total_valid_block_count;
3178         int err = 0;
3179
3180         /* get node pages in the current segment */
3181         curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3182         blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3183
3184         node_blk = calloc(F2FS_BLKSIZE, 1);
3185         ASSERT(node_blk);
3186
3187         while (1) {
3188                 struct fsync_inode_entry *entry;
3189
3190                 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3191                         break;
3192
3193                 err = dev_read_block(node_blk, blkaddr);
3194                 if (err)
3195                         break;
3196
3197                 if (!is_recoverable_dnode(sbi, node_blk))
3198                         break;
3199
3200                 if (!is_fsync_dnode(node_blk))
3201                         goto next;
3202
3203                 entry = get_fsync_inode(head, ino_of_node(node_blk));
3204                 if (!entry) {
3205                         entry = add_fsync_inode(head, ino_of_node(node_blk));
3206                         if (!entry) {
3207                                 err = -1;
3208                                 break;
3209                         }
3210                 }
3211                 entry->blkaddr = blkaddr;
3212
3213                 if (IS_INODE(node_blk) && is_dent_dnode(node_blk))
3214                         entry->last_dentry = blkaddr;
3215 next:
3216                 /* sanity check in order to detect looped node chain */
3217                 if (++loop_cnt >= free_blocks ||
3218                         blkaddr == next_blkaddr_of_node(node_blk)) {
3219                         MSG(0, "\tdetect looped node chain, blkaddr:%u, next:%u\n",
3220                                     blkaddr,
3221                                     next_blkaddr_of_node(node_blk));
3222                         err = -1;
3223                         break;
3224                 }
3225
3226                 blkaddr = next_blkaddr_of_node(node_blk);
3227         }
3228
3229         free(node_blk);
3230         return err;
3231 }
3232
3233 static int do_record_fsync_data(struct f2fs_sb_info *sbi,
3234                                         struct f2fs_node *node_blk,
3235                                         block_t blkaddr)
3236 {
3237         unsigned int segno, offset;
3238         struct seg_entry *se;
3239         unsigned int ofs_in_node = 0;
3240         unsigned int start, end;
3241         int err = 0, recorded = 0;
3242
3243         segno = GET_SEGNO(sbi, blkaddr);
3244         se = get_seg_entry(sbi, segno);
3245         offset = OFFSET_IN_SEG(sbi, blkaddr);
3246
3247         if (f2fs_test_bit(offset, (char *)se->cur_valid_map)) {
3248                 ASSERT(0);
3249                 return -1;
3250         }
3251         if (f2fs_test_bit(offset, (char *)se->ckpt_valid_map)) {
3252                 ASSERT(0);
3253                 return -1;
3254         }
3255
3256         if (!se->ckpt_valid_blocks)
3257                 se->ckpt_type = CURSEG_WARM_NODE;
3258
3259         se->ckpt_valid_blocks++;
3260         f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
3261
3262         MSG(1, "do_record_fsync_data: [node] ino = %u, nid = %u, blkaddr = %u\n",
3263             ino_of_node(node_blk), ofs_of_node(node_blk), blkaddr);
3264
3265         /* inline data */
3266         if (IS_INODE(node_blk) && (node_blk->i.i_inline & F2FS_INLINE_DATA))
3267                 return 0;
3268         /* xattr node */
3269         if (ofs_of_node(node_blk) == XATTR_NODE_OFFSET)
3270                 return 0;
3271
3272         /* step 3: recover data indices */
3273         start = start_bidx_of_node(ofs_of_node(node_blk), node_blk);
3274         end = start + ADDRS_PER_PAGE(sbi, node_blk, NULL);
3275
3276         for (; start < end; start++, ofs_in_node++) {
3277                 blkaddr = datablock_addr(node_blk, ofs_in_node);
3278
3279                 if (!is_valid_data_blkaddr(blkaddr))
3280                         continue;
3281
3282                 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) {
3283                         err = -1;
3284                         goto out;
3285                 }
3286
3287                 segno = GET_SEGNO(sbi, blkaddr);
3288                 se = get_seg_entry(sbi, segno);
3289                 offset = OFFSET_IN_SEG(sbi, blkaddr);
3290
3291                 if (f2fs_test_bit(offset, (char *)se->cur_valid_map))
3292                         continue;
3293                 if (f2fs_test_bit(offset, (char *)se->ckpt_valid_map))
3294                         continue;
3295
3296                 if (!se->ckpt_valid_blocks)
3297                         se->ckpt_type = CURSEG_WARM_DATA;
3298
3299                 se->ckpt_valid_blocks++;
3300                 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
3301
3302                 MSG(1, "do_record_fsync_data: [data] ino = %u, nid = %u, blkaddr = %u\n",
3303                     ino_of_node(node_blk), ofs_of_node(node_blk), blkaddr);
3304
3305                 recorded++;
3306         }
3307 out:
3308         MSG(1, "recover_data: ino = %u, nid = %u, recorded = %d, err = %d\n",
3309                     ino_of_node(node_blk), ofs_of_node(node_blk),
3310                     recorded, err);
3311         return err;
3312 }
3313
3314 static int traverse_dnodes(struct f2fs_sb_info *sbi,
3315                                 struct list_head *inode_list)
3316 {
3317         struct curseg_info *curseg;
3318         struct f2fs_node *node_blk;
3319         block_t blkaddr;
3320         int err = 0;
3321
3322         /* get node pages in the current segment */
3323         curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3324         blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3325
3326         node_blk = calloc(F2FS_BLKSIZE, 1);
3327         ASSERT(node_blk);
3328
3329         while (1) {
3330                 struct fsync_inode_entry *entry;
3331
3332                 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3333                         break;
3334
3335                 err = dev_read_block(node_blk, blkaddr);
3336                 if (err)
3337                         break;
3338
3339                 if (!is_recoverable_dnode(sbi, node_blk))
3340                         break;
3341
3342                 entry = get_fsync_inode(inode_list,
3343                                         ino_of_node(node_blk));
3344                 if (!entry)
3345                         goto next;
3346
3347                 err = do_record_fsync_data(sbi, node_blk, blkaddr);
3348                 if (err)
3349                         break;
3350
3351                 if (entry->blkaddr == blkaddr)
3352                         del_fsync_inode(entry);
3353 next:
3354                 blkaddr = next_blkaddr_of_node(node_blk);
3355         }
3356
3357         free(node_blk);
3358         return err;
3359 }
3360
3361 static int record_fsync_data(struct f2fs_sb_info *sbi)
3362 {
3363         struct list_head inode_list = LIST_HEAD_INIT(inode_list);
3364         int ret;
3365
3366         if (!need_fsync_data_record(sbi))
3367                 return 0;
3368
3369         ret = find_fsync_inode(sbi, &inode_list);
3370         if (ret)
3371                 goto out;
3372
3373         ret = traverse_dnodes(sbi, &inode_list);
3374 out:
3375         destroy_fsync_dnodes(&inode_list);
3376         return ret;
3377 }
3378
3379 int f2fs_do_mount(struct f2fs_sb_info *sbi)
3380 {
3381         struct f2fs_checkpoint *cp = NULL;
3382         struct f2fs_super_block *sb = NULL;
3383         int ret;
3384
3385         sbi->active_logs = NR_CURSEG_TYPE;
3386         ret = validate_super_block(sbi, SB0_ADDR);
3387         if (ret) {
3388                 ret = validate_super_block(sbi, SB1_ADDR);
3389                 if (ret)
3390                         return -1;
3391         }
3392         sb = F2FS_RAW_SUPER(sbi);
3393
3394         ret = check_sector_size(sb);
3395         if (ret)
3396                 return -1;
3397
3398         print_raw_sb_info(sb);
3399
3400         init_sb_info(sbi);
3401
3402         ret = get_valid_checkpoint(sbi);
3403         if (ret) {
3404                 ERR_MSG("Can't find valid checkpoint\n");
3405                 return -1;
3406         }
3407
3408         c.bug_on = 0;
3409
3410         if (sanity_check_ckpt(sbi)) {
3411                 ERR_MSG("Checkpoint is polluted\n");
3412                 return -1;
3413         }
3414         cp = F2FS_CKPT(sbi);
3415
3416         if (c.func != FSCK && c.func != DUMP &&
3417                 !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
3418                 ERR_MSG("Mount unclean image to replay log first\n");
3419                 return -1;
3420         }
3421
3422         print_ckpt_info(sbi);
3423
3424         if (c.quota_fix) {
3425                 if (get_cp(ckpt_flags) & CP_QUOTA_NEED_FSCK_FLAG)
3426                         c.fix_on = 1;
3427         }
3428
3429         if (tune_sb_features(sbi))
3430                 return -1;
3431
3432         /* precompute checksum seed for metadata */
3433         if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
3434                 c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
3435
3436         sbi->total_valid_node_count = get_cp(valid_node_count);
3437         sbi->total_valid_inode_count = get_cp(valid_inode_count);
3438         sbi->user_block_count = get_cp(user_block_count);
3439         sbi->total_valid_block_count = get_cp(valid_block_count);
3440         sbi->last_valid_block_count = sbi->total_valid_block_count;
3441         sbi->alloc_valid_block_count = 0;
3442
3443         if (build_segment_manager(sbi)) {
3444                 ERR_MSG("build_segment_manager failed\n");
3445                 return -1;
3446         }
3447
3448         if (build_node_manager(sbi)) {
3449                 ERR_MSG("build_node_manager failed\n");
3450                 return -1;
3451         }
3452
3453         if (record_fsync_data(sbi)) {
3454                 ERR_MSG("record_fsync_data failed\n");
3455                 return -1;
3456         }
3457
3458         if (!f2fs_should_proceed(sb, get_cp(ckpt_flags)))
3459                 return 1;
3460
3461         /* Check nat_bits */
3462         if (c.func == FSCK && is_set_ckpt_flags(cp, CP_NAT_BITS_FLAG)) {
3463                 if (check_nat_bits(sbi, sb, cp) && c.fix_on)
3464                         write_nat_bits(sbi, sb, cp, sbi->cur_cp);
3465         }
3466         return 0;
3467 }
3468
3469 void f2fs_do_umount(struct f2fs_sb_info *sbi)
3470 {
3471         struct sit_info *sit_i = SIT_I(sbi);
3472         struct f2fs_sm_info *sm_i = SM_I(sbi);
3473         struct f2fs_nm_info *nm_i = NM_I(sbi);
3474         unsigned int i;
3475
3476         /* free nm_info */
3477         if (c.func == SLOAD || c.func == FSCK)
3478                 free(nm_i->nid_bitmap);
3479         free(nm_i->nat_bitmap);
3480         free(sbi->nm_info);
3481
3482         /* free sit_info */
3483         free(sit_i->bitmap);
3484         free(sit_i->sit_bitmap);
3485         free(sit_i->sentries);
3486         free(sm_i->sit_info);
3487
3488         /* free sm_info */
3489         for (i = 0; i < NR_CURSEG_TYPE; i++)
3490                 free(sm_i->curseg_array[i].sum_blk);
3491
3492         free(sm_i->curseg_array);
3493         free(sbi->sm_info);
3494
3495         free(sbi->ckpt);
3496         free(sbi->raw_super);
3497 }
3498
3499 #ifdef WITH_ANDROID
3500 int f2fs_sparse_initialize_meta(struct f2fs_sb_info *sbi)
3501 {
3502         struct f2fs_super_block *sb = sbi->raw_super;
3503         u_int32_t sit_seg_count, sit_size;
3504         u_int32_t nat_seg_count, nat_size;
3505         u_int64_t sit_seg_addr, nat_seg_addr, payload_addr;
3506         u_int32_t seg_size = 1 << get_sb(log_blocks_per_seg);
3507         int ret;
3508
3509         if (!c.sparse_mode)
3510                 return 0;
3511
3512         sit_seg_addr = get_sb(sit_blkaddr);
3513         sit_seg_count = get_sb(segment_count_sit);
3514         sit_size = sit_seg_count * seg_size;
3515
3516         DBG(1, "\tSparse: filling sit area at block offset: 0x%08"PRIx64" len: %u\n",
3517                                                         sit_seg_addr, sit_size);
3518         ret = dev_fill(NULL, sit_seg_addr * F2FS_BLKSIZE,
3519                                         sit_size * F2FS_BLKSIZE);
3520         if (ret) {
3521                 MSG(1, "\tError: While zeroing out the sit area "
3522                                 "on disk!!!\n");
3523                 return -1;
3524         }
3525
3526         nat_seg_addr = get_sb(nat_blkaddr);
3527         nat_seg_count = get_sb(segment_count_nat);
3528         nat_size = nat_seg_count * seg_size;
3529
3530         DBG(1, "\tSparse: filling nat area at block offset 0x%08"PRIx64" len: %u\n",
3531                                                         nat_seg_addr, nat_size);
3532         ret = dev_fill(NULL, nat_seg_addr * F2FS_BLKSIZE,
3533                                         nat_size * F2FS_BLKSIZE);
3534         if (ret) {
3535                 MSG(1, "\tError: While zeroing out the nat area "
3536                                 "on disk!!!\n");
3537                 return -1;
3538         }
3539
3540         payload_addr = get_sb(segment0_blkaddr) + 1;
3541
3542         DBG(1, "\tSparse: filling bitmap area at block offset 0x%08"PRIx64" len: %u\n",
3543                                         payload_addr, get_sb(cp_payload));
3544         ret = dev_fill(NULL, payload_addr * F2FS_BLKSIZE,
3545                                         get_sb(cp_payload) * F2FS_BLKSIZE);
3546         if (ret) {
3547                 MSG(1, "\tError: While zeroing out the nat/sit bitmap area "
3548                                 "on disk!!!\n");
3549                 return -1;
3550         }
3551
3552         payload_addr += seg_size;
3553
3554         DBG(1, "\tSparse: filling bitmap area at block offset 0x%08"PRIx64" len: %u\n",
3555                                         payload_addr, get_sb(cp_payload));
3556         ret = dev_fill(NULL, payload_addr * F2FS_BLKSIZE,
3557                                         get_sb(cp_payload) * F2FS_BLKSIZE);
3558         if (ret) {
3559                 MSG(1, "\tError: While zeroing out the nat/sit bitmap area "
3560                                 "on disk!!!\n");
3561                 return -1;
3562         }
3563         return 0;
3564 }
3565 #else
3566 int f2fs_sparse_initialize_meta(struct f2fs_sb_info *sbi) { return 0; }
3567 #endif