4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/f2fs_fs.h>
27 #include <linux/sysfs.h>
28 #include <linux/quota.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/f2fs.h>
40 static struct kmem_cache *f2fs_inode_cachep;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 char *f2fs_fault_name[FAULT_MAX] = {
45 [FAULT_KMALLOC] = "kmalloc",
46 [FAULT_KVMALLOC] = "kvmalloc",
47 [FAULT_PAGE_ALLOC] = "page alloc",
48 [FAULT_PAGE_GET] = "page get",
49 [FAULT_ALLOC_BIO] = "alloc bio",
50 [FAULT_ALLOC_NID] = "alloc nid",
51 [FAULT_ORPHAN] = "orphan",
52 [FAULT_BLOCK] = "no more block",
53 [FAULT_DIR_DEPTH] = "too big dir depth",
54 [FAULT_EVICT_INODE] = "evict_inode fail",
55 [FAULT_TRUNCATE] = "truncate fail",
56 [FAULT_IO] = "IO error",
57 [FAULT_CHECKPOINT] = "checkpoint error",
58 [FAULT_DISCARD] = "discard error",
61 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
64 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
67 atomic_set(&ffi->inject_ops, 0);
68 ffi->inject_rate = rate;
72 ffi->inject_type = type;
75 memset(ffi, 0, sizeof(struct f2fs_fault_info));
79 /* f2fs-wide shrinker description */
80 static struct shrinker f2fs_shrinker_info = {
81 .scan_objects = f2fs_shrink_scan,
82 .count_objects = f2fs_shrink_count,
83 .seeks = DEFAULT_SEEKS,
88 Opt_disable_roll_forward,
99 Opt_disable_ext_identify,
102 Opt_inline_xattr_size,
140 Opt_test_dummy_encryption,
144 static match_table_t f2fs_tokens = {
145 {Opt_gc_background, "background_gc=%s"},
146 {Opt_disable_roll_forward, "disable_roll_forward"},
147 {Opt_norecovery, "norecovery"},
148 {Opt_discard, "discard"},
149 {Opt_nodiscard, "nodiscard"},
150 {Opt_noheap, "no_heap"},
152 {Opt_user_xattr, "user_xattr"},
153 {Opt_nouser_xattr, "nouser_xattr"},
155 {Opt_noacl, "noacl"},
156 {Opt_active_logs, "active_logs=%u"},
157 {Opt_disable_ext_identify, "disable_ext_identify"},
158 {Opt_inline_xattr, "inline_xattr"},
159 {Opt_noinline_xattr, "noinline_xattr"},
160 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
161 {Opt_inline_data, "inline_data"},
162 {Opt_inline_dentry, "inline_dentry"},
163 {Opt_noinline_dentry, "noinline_dentry"},
164 {Opt_flush_merge, "flush_merge"},
165 {Opt_noflush_merge, "noflush_merge"},
166 {Opt_nobarrier, "nobarrier"},
167 {Opt_fastboot, "fastboot"},
168 {Opt_extent_cache, "extent_cache"},
169 {Opt_noextent_cache, "noextent_cache"},
170 {Opt_noinline_data, "noinline_data"},
171 {Opt_data_flush, "data_flush"},
172 {Opt_reserve_root, "reserve_root=%u"},
173 {Opt_resgid, "resgid=%u"},
174 {Opt_resuid, "resuid=%u"},
175 {Opt_mode, "mode=%s"},
176 {Opt_io_size_bits, "io_bits=%u"},
177 {Opt_fault_injection, "fault_injection=%u"},
178 {Opt_fault_type, "fault_type=%u"},
179 {Opt_lazytime, "lazytime"},
180 {Opt_nolazytime, "nolazytime"},
181 {Opt_quota, "quota"},
182 {Opt_noquota, "noquota"},
183 {Opt_usrquota, "usrquota"},
184 {Opt_grpquota, "grpquota"},
185 {Opt_prjquota, "prjquota"},
186 {Opt_usrjquota, "usrjquota=%s"},
187 {Opt_grpjquota, "grpjquota=%s"},
188 {Opt_prjjquota, "prjjquota=%s"},
189 {Opt_offusrjquota, "usrjquota="},
190 {Opt_offgrpjquota, "grpjquota="},
191 {Opt_offprjjquota, "prjjquota="},
192 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
193 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
194 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
195 {Opt_whint, "whint_mode=%s"},
196 {Opt_alloc, "alloc_mode=%s"},
197 {Opt_fsync, "fsync_mode=%s"},
198 {Opt_test_dummy_encryption, "test_dummy_encryption"},
202 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
204 struct va_format vaf;
210 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
214 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
216 block_t limit = (sbi->user_block_count << 1) / 1000;
219 if (test_opt(sbi, RESERVE_ROOT) &&
220 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
221 F2FS_OPTION(sbi).root_reserved_blocks = limit;
222 f2fs_msg(sbi->sb, KERN_INFO,
223 "Reduce reserved blocks for root = %u",
224 F2FS_OPTION(sbi).root_reserved_blocks);
226 if (!test_opt(sbi, RESERVE_ROOT) &&
227 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
228 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
229 !gid_eq(F2FS_OPTION(sbi).s_resgid,
230 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
231 f2fs_msg(sbi->sb, KERN_INFO,
232 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
233 from_kuid_munged(&init_user_ns,
234 F2FS_OPTION(sbi).s_resuid),
235 from_kgid_munged(&init_user_ns,
236 F2FS_OPTION(sbi).s_resgid));
239 static void init_once(void *foo)
241 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
243 inode_init_once(&fi->vfs_inode);
247 static const char * const quotatypes[] = INITQFNAMES;
248 #define QTYPE2NAME(t) (quotatypes[t])
249 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
252 struct f2fs_sb_info *sbi = F2FS_SB(sb);
256 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
257 f2fs_msg(sb, KERN_ERR,
258 "Cannot change journaled "
259 "quota options when quota turned on");
262 if (f2fs_sb_has_quota_ino(sb)) {
263 f2fs_msg(sb, KERN_INFO,
264 "QUOTA feature is enabled, so ignore qf_name");
268 qname = match_strdup(args);
270 f2fs_msg(sb, KERN_ERR,
271 "Not enough memory for storing quotafile name");
274 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
275 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
278 f2fs_msg(sb, KERN_ERR,
279 "%s quota file already specified",
283 if (strchr(qname, '/')) {
284 f2fs_msg(sb, KERN_ERR,
285 "quotafile must be on filesystem root");
288 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
296 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
298 struct f2fs_sb_info *sbi = F2FS_SB(sb);
300 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
301 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
302 " when quota turned on");
305 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
306 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
310 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
313 * We do the test below only for project quotas. 'usrquota' and
314 * 'grpquota' mount options are allowed even without quota feature
315 * to support legacy quotas in quota files.
317 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
318 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
319 "Cannot enable project quota enforcement.");
322 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
323 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
324 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
325 if (test_opt(sbi, USRQUOTA) &&
326 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
327 clear_opt(sbi, USRQUOTA);
329 if (test_opt(sbi, GRPQUOTA) &&
330 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
331 clear_opt(sbi, GRPQUOTA);
333 if (test_opt(sbi, PRJQUOTA) &&
334 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
335 clear_opt(sbi, PRJQUOTA);
337 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
338 test_opt(sbi, PRJQUOTA)) {
339 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
344 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
345 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
351 if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
352 f2fs_msg(sbi->sb, KERN_INFO,
353 "QUOTA feature is enabled, so ignore jquota_fmt");
354 F2FS_OPTION(sbi).s_jquota_fmt = 0;
360 static int parse_options(struct super_block *sb, char *options)
362 struct f2fs_sb_info *sbi = F2FS_SB(sb);
363 substring_t args[MAX_OPT_ARGS];
375 while ((p = strsep(&options, ",")) != NULL) {
380 * Initialize args struct so we know whether arg was
381 * found; some options take optional arguments.
383 args[0].to = args[0].from = NULL;
384 token = match_token(p, f2fs_tokens, args);
387 case Opt_gc_background:
388 name = match_strdup(&args[0]);
392 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
394 clear_opt(sbi, FORCE_FG_GC);
395 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
396 clear_opt(sbi, BG_GC);
397 clear_opt(sbi, FORCE_FG_GC);
398 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
400 set_opt(sbi, FORCE_FG_GC);
407 case Opt_disable_roll_forward:
408 set_opt(sbi, DISABLE_ROLL_FORWARD);
411 /* this option mounts f2fs with ro */
412 set_opt(sbi, DISABLE_ROLL_FORWARD);
413 if (!f2fs_readonly(sb))
417 set_opt(sbi, DISCARD);
420 if (f2fs_sb_has_blkzoned(sb)) {
421 f2fs_msg(sb, KERN_WARNING,
422 "discard is required for zoned block devices");
425 clear_opt(sbi, DISCARD);
428 set_opt(sbi, NOHEAP);
431 clear_opt(sbi, NOHEAP);
433 #ifdef CONFIG_F2FS_FS_XATTR
435 set_opt(sbi, XATTR_USER);
437 case Opt_nouser_xattr:
438 clear_opt(sbi, XATTR_USER);
440 case Opt_inline_xattr:
441 set_opt(sbi, INLINE_XATTR);
443 case Opt_noinline_xattr:
444 clear_opt(sbi, INLINE_XATTR);
446 case Opt_inline_xattr_size:
447 if (args->from && match_int(args, &arg))
449 set_opt(sbi, INLINE_XATTR_SIZE);
450 F2FS_OPTION(sbi).inline_xattr_size = arg;
454 f2fs_msg(sb, KERN_INFO,
455 "user_xattr options not supported");
457 case Opt_nouser_xattr:
458 f2fs_msg(sb, KERN_INFO,
459 "nouser_xattr options not supported");
461 case Opt_inline_xattr:
462 f2fs_msg(sb, KERN_INFO,
463 "inline_xattr options not supported");
465 case Opt_noinline_xattr:
466 f2fs_msg(sb, KERN_INFO,
467 "noinline_xattr options not supported");
470 #ifdef CONFIG_F2FS_FS_POSIX_ACL
472 set_opt(sbi, POSIX_ACL);
475 clear_opt(sbi, POSIX_ACL);
479 f2fs_msg(sb, KERN_INFO, "acl options not supported");
482 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
485 case Opt_active_logs:
486 if (args->from && match_int(args, &arg))
488 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
490 F2FS_OPTION(sbi).active_logs = arg;
492 case Opt_disable_ext_identify:
493 set_opt(sbi, DISABLE_EXT_IDENTIFY);
495 case Opt_inline_data:
496 set_opt(sbi, INLINE_DATA);
498 case Opt_inline_dentry:
499 set_opt(sbi, INLINE_DENTRY);
501 case Opt_noinline_dentry:
502 clear_opt(sbi, INLINE_DENTRY);
504 case Opt_flush_merge:
505 set_opt(sbi, FLUSH_MERGE);
507 case Opt_noflush_merge:
508 clear_opt(sbi, FLUSH_MERGE);
511 set_opt(sbi, NOBARRIER);
514 set_opt(sbi, FASTBOOT);
516 case Opt_extent_cache:
517 set_opt(sbi, EXTENT_CACHE);
519 case Opt_noextent_cache:
520 clear_opt(sbi, EXTENT_CACHE);
522 case Opt_noinline_data:
523 clear_opt(sbi, INLINE_DATA);
526 set_opt(sbi, DATA_FLUSH);
528 case Opt_reserve_root:
529 if (args->from && match_int(args, &arg))
531 if (test_opt(sbi, RESERVE_ROOT)) {
532 f2fs_msg(sb, KERN_INFO,
533 "Preserve previous reserve_root=%u",
534 F2FS_OPTION(sbi).root_reserved_blocks);
536 F2FS_OPTION(sbi).root_reserved_blocks = arg;
537 set_opt(sbi, RESERVE_ROOT);
541 if (args->from && match_int(args, &arg))
543 uid = make_kuid(current_user_ns(), arg);
544 if (!uid_valid(uid)) {
545 f2fs_msg(sb, KERN_ERR,
546 "Invalid uid value %d", arg);
549 F2FS_OPTION(sbi).s_resuid = uid;
552 if (args->from && match_int(args, &arg))
554 gid = make_kgid(current_user_ns(), arg);
555 if (!gid_valid(gid)) {
556 f2fs_msg(sb, KERN_ERR,
557 "Invalid gid value %d", arg);
560 F2FS_OPTION(sbi).s_resgid = gid;
563 name = match_strdup(&args[0]);
567 if (strlen(name) == 8 &&
568 !strncmp(name, "adaptive", 8)) {
569 if (f2fs_sb_has_blkzoned(sb)) {
570 f2fs_msg(sb, KERN_WARNING,
571 "adaptive mode is not allowed with "
572 "zoned block device feature");
576 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
577 } else if (strlen(name) == 3 &&
578 !strncmp(name, "lfs", 3)) {
579 set_opt_mode(sbi, F2FS_MOUNT_LFS);
586 case Opt_io_size_bits:
587 if (args->from && match_int(args, &arg))
589 if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
590 f2fs_msg(sb, KERN_WARNING,
591 "Not support %d, larger than %d",
592 1 << arg, BIO_MAX_PAGES);
595 F2FS_OPTION(sbi).write_io_size_bits = arg;
597 case Opt_fault_injection:
598 if (args->from && match_int(args, &arg))
600 #ifdef CONFIG_F2FS_FAULT_INJECTION
601 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
602 set_opt(sbi, FAULT_INJECTION);
604 f2fs_msg(sb, KERN_INFO,
605 "FAULT_INJECTION was not selected");
609 if (args->from && match_int(args, &arg))
611 #ifdef CONFIG_F2FS_FAULT_INJECTION
612 f2fs_build_fault_attr(sbi, 0, arg);
613 set_opt(sbi, FAULT_INJECTION);
615 f2fs_msg(sb, KERN_INFO,
616 "FAULT_INJECTION was not selected");
620 sb->s_flags |= SB_LAZYTIME;
623 sb->s_flags &= ~SB_LAZYTIME;
628 set_opt(sbi, USRQUOTA);
631 set_opt(sbi, GRPQUOTA);
634 set_opt(sbi, PRJQUOTA);
637 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
642 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
647 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
651 case Opt_offusrjquota:
652 ret = f2fs_clear_qf_name(sb, USRQUOTA);
656 case Opt_offgrpjquota:
657 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
661 case Opt_offprjjquota:
662 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
666 case Opt_jqfmt_vfsold:
667 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
669 case Opt_jqfmt_vfsv0:
670 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
672 case Opt_jqfmt_vfsv1:
673 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
676 clear_opt(sbi, QUOTA);
677 clear_opt(sbi, USRQUOTA);
678 clear_opt(sbi, GRPQUOTA);
679 clear_opt(sbi, PRJQUOTA);
689 case Opt_offusrjquota:
690 case Opt_offgrpjquota:
691 case Opt_offprjjquota:
692 case Opt_jqfmt_vfsold:
693 case Opt_jqfmt_vfsv0:
694 case Opt_jqfmt_vfsv1:
696 f2fs_msg(sb, KERN_INFO,
697 "quota operations not supported");
701 name = match_strdup(&args[0]);
704 if (strlen(name) == 10 &&
705 !strncmp(name, "user-based", 10)) {
706 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
707 } else if (strlen(name) == 3 &&
708 !strncmp(name, "off", 3)) {
709 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
710 } else if (strlen(name) == 8 &&
711 !strncmp(name, "fs-based", 8)) {
712 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
720 name = match_strdup(&args[0]);
724 if (strlen(name) == 7 &&
725 !strncmp(name, "default", 7)) {
726 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
727 } else if (strlen(name) == 5 &&
728 !strncmp(name, "reuse", 5)) {
729 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
737 name = match_strdup(&args[0]);
740 if (strlen(name) == 5 &&
741 !strncmp(name, "posix", 5)) {
742 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
743 } else if (strlen(name) == 6 &&
744 !strncmp(name, "strict", 6)) {
745 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
746 } else if (strlen(name) == 9 &&
747 !strncmp(name, "nobarrier", 9)) {
748 F2FS_OPTION(sbi).fsync_mode =
749 FSYNC_MODE_NOBARRIER;
756 case Opt_test_dummy_encryption:
757 #ifdef CONFIG_F2FS_FS_ENCRYPTION
758 if (!f2fs_sb_has_encrypt(sb)) {
759 f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
763 F2FS_OPTION(sbi).test_dummy_encryption = true;
764 f2fs_msg(sb, KERN_INFO,
765 "Test dummy encryption mode enabled");
767 f2fs_msg(sb, KERN_INFO,
768 "Test dummy encryption mount option ignored");
772 f2fs_msg(sb, KERN_ERR,
773 "Unrecognized mount option \"%s\" or missing value",
779 if (f2fs_check_quota_options(sbi))
782 if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
783 f2fs_msg(sbi->sb, KERN_INFO,
784 "Filesystem with quota feature cannot be mounted RDWR "
785 "without CONFIG_QUOTA");
788 if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
789 f2fs_msg(sb, KERN_ERR,
790 "Filesystem with project quota feature cannot be "
791 "mounted RDWR without CONFIG_QUOTA");
796 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
797 f2fs_msg(sb, KERN_ERR,
798 "Should set mode=lfs with %uKB-sized IO",
799 F2FS_IO_SIZE_KB(sbi));
803 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
804 if (!f2fs_sb_has_extra_attr(sb) ||
805 !f2fs_sb_has_flexible_inline_xattr(sb)) {
806 f2fs_msg(sb, KERN_ERR,
807 "extra_attr or flexible_inline_xattr "
811 if (!test_opt(sbi, INLINE_XATTR)) {
812 f2fs_msg(sb, KERN_ERR,
813 "inline_xattr_size option should be "
814 "set with inline_xattr option");
817 if (F2FS_OPTION(sbi).inline_xattr_size <
818 sizeof(struct f2fs_xattr_header) / sizeof(__le32) ||
819 F2FS_OPTION(sbi).inline_xattr_size >
820 DEF_ADDRS_PER_INODE -
821 F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) -
822 DEF_INLINE_RESERVED_SIZE -
823 MIN_INLINE_DENTRY_SIZE / sizeof(__le32)) {
824 f2fs_msg(sb, KERN_ERR,
825 "inline xattr size is out of range");
830 /* Not pass down write hints if the number of active logs is lesser
831 * than NR_CURSEG_TYPE.
833 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
834 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
838 static struct inode *f2fs_alloc_inode(struct super_block *sb)
840 struct f2fs_inode_info *fi;
842 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
846 init_once((void *) fi);
848 /* Initialize f2fs-specific inode info */
849 atomic_set(&fi->dirty_pages, 0);
850 init_rwsem(&fi->i_sem);
851 INIT_LIST_HEAD(&fi->dirty_list);
852 INIT_LIST_HEAD(&fi->gdirty_list);
853 INIT_LIST_HEAD(&fi->inmem_ilist);
854 INIT_LIST_HEAD(&fi->inmem_pages);
855 mutex_init(&fi->inmem_lock);
856 init_rwsem(&fi->i_gc_rwsem[READ]);
857 init_rwsem(&fi->i_gc_rwsem[WRITE]);
858 init_rwsem(&fi->i_mmap_sem);
859 init_rwsem(&fi->i_xattr_sem);
861 /* Will be used by directory only */
862 fi->i_dir_level = F2FS_SB(sb)->dir_level;
864 return &fi->vfs_inode;
867 static int f2fs_drop_inode(struct inode *inode)
871 * This is to avoid a deadlock condition like below.
872 * writeback_single_inode(inode)
873 * - f2fs_write_data_page
874 * - f2fs_gc -> iput -> evict
875 * - inode_wait_for_writeback(inode)
877 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
878 if (!inode->i_nlink && !is_bad_inode(inode)) {
879 /* to avoid evict_inode call simultaneously */
880 atomic_inc(&inode->i_count);
881 spin_unlock(&inode->i_lock);
883 /* some remained atomic pages should discarded */
884 if (f2fs_is_atomic_file(inode))
885 f2fs_drop_inmem_pages(inode);
887 /* should remain fi->extent_tree for writepage */
888 f2fs_destroy_extent_node(inode);
890 sb_start_intwrite(inode->i_sb);
891 f2fs_i_size_write(inode, 0);
893 if (F2FS_HAS_BLOCKS(inode))
894 f2fs_truncate(inode);
896 sb_end_intwrite(inode->i_sb);
898 spin_lock(&inode->i_lock);
899 atomic_dec(&inode->i_count);
901 trace_f2fs_drop_inode(inode, 0);
904 ret = generic_drop_inode(inode);
905 trace_f2fs_drop_inode(inode, ret);
909 int f2fs_inode_dirtied(struct inode *inode, bool sync)
911 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
914 spin_lock(&sbi->inode_lock[DIRTY_META]);
915 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
918 set_inode_flag(inode, FI_DIRTY_INODE);
919 stat_inc_dirty_inode(sbi, DIRTY_META);
921 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
922 list_add_tail(&F2FS_I(inode)->gdirty_list,
923 &sbi->inode_list[DIRTY_META]);
924 inc_page_count(sbi, F2FS_DIRTY_IMETA);
926 spin_unlock(&sbi->inode_lock[DIRTY_META]);
930 void f2fs_inode_synced(struct inode *inode)
932 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
934 spin_lock(&sbi->inode_lock[DIRTY_META]);
935 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
936 spin_unlock(&sbi->inode_lock[DIRTY_META]);
939 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
940 list_del_init(&F2FS_I(inode)->gdirty_list);
941 dec_page_count(sbi, F2FS_DIRTY_IMETA);
943 clear_inode_flag(inode, FI_DIRTY_INODE);
944 clear_inode_flag(inode, FI_AUTO_RECOVER);
945 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
946 spin_unlock(&sbi->inode_lock[DIRTY_META]);
950 * f2fs_dirty_inode() is called from __mark_inode_dirty()
952 * We should call set_dirty_inode to write the dirty inode through write_inode.
954 static void f2fs_dirty_inode(struct inode *inode, int flags)
956 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
958 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
959 inode->i_ino == F2FS_META_INO(sbi))
962 if (flags == I_DIRTY_TIME)
965 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
966 clear_inode_flag(inode, FI_AUTO_RECOVER);
968 f2fs_inode_dirtied(inode, false);
971 static void f2fs_i_callback(struct rcu_head *head)
973 struct inode *inode = container_of(head, struct inode, i_rcu);
974 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
977 static void f2fs_destroy_inode(struct inode *inode)
979 call_rcu(&inode->i_rcu, f2fs_i_callback);
982 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
984 percpu_counter_destroy(&sbi->alloc_valid_block_count);
985 percpu_counter_destroy(&sbi->total_valid_inode_count);
988 static void destroy_device_list(struct f2fs_sb_info *sbi)
992 for (i = 0; i < sbi->s_ndevs; i++) {
993 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
994 #ifdef CONFIG_BLK_DEV_ZONED
995 kfree(FDEV(i).blkz_type);
1001 static void f2fs_put_super(struct super_block *sb)
1003 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1007 f2fs_quota_off_umount(sb);
1009 /* prevent remaining shrinker jobs */
1010 mutex_lock(&sbi->umount_mutex);
1013 * We don't need to do checkpoint when superblock is clean.
1014 * But, the previous checkpoint was not done by umount, it needs to do
1015 * clean checkpoint again.
1017 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1018 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
1019 struct cp_control cpc = {
1020 .reason = CP_UMOUNT,
1022 f2fs_write_checkpoint(sbi, &cpc);
1025 /* be sure to wait for any on-going discard commands */
1026 dropped = f2fs_wait_discard_bios(sbi);
1028 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1029 !sbi->discard_blks && !dropped) {
1030 struct cp_control cpc = {
1031 .reason = CP_UMOUNT | CP_TRIMMED,
1033 f2fs_write_checkpoint(sbi, &cpc);
1037 * normally superblock is clean, so we need to release this.
1038 * In addition, EIO will skip do checkpoint, we need this as well.
1040 f2fs_release_ino_entry(sbi, true);
1042 f2fs_leave_shrinker(sbi);
1043 mutex_unlock(&sbi->umount_mutex);
1045 /* our cp_error case, we can wait for any writeback page */
1046 f2fs_flush_merged_writes(sbi);
1048 f2fs_wait_on_all_pages_writeback(sbi);
1050 f2fs_bug_on(sbi, sbi->fsync_node_num);
1052 iput(sbi->node_inode);
1053 sbi->node_inode = NULL;
1055 iput(sbi->meta_inode);
1056 sbi->meta_inode = NULL;
1059 * iput() can update stat information, if f2fs_write_checkpoint()
1060 * above failed with error.
1062 f2fs_destroy_stats(sbi);
1064 /* destroy f2fs internal modules */
1065 f2fs_destroy_node_manager(sbi);
1066 f2fs_destroy_segment_manager(sbi);
1070 f2fs_unregister_sysfs(sbi);
1072 sb->s_fs_info = NULL;
1073 if (sbi->s_chksum_driver)
1074 crypto_free_shash(sbi->s_chksum_driver);
1075 kfree(sbi->raw_super);
1077 destroy_device_list(sbi);
1078 mempool_destroy(sbi->write_io_dummy);
1080 for (i = 0; i < MAXQUOTAS; i++)
1081 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1083 destroy_percpu_info(sbi);
1084 for (i = 0; i < NR_PAGE_TYPE; i++)
1085 kfree(sbi->write_io[i]);
1089 int f2fs_sync_fs(struct super_block *sb, int sync)
1091 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1094 if (unlikely(f2fs_cp_error(sbi)))
1097 trace_f2fs_sync_fs(sb, sync);
1099 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1103 struct cp_control cpc;
1105 cpc.reason = __get_cp_reason(sbi);
1107 mutex_lock(&sbi->gc_mutex);
1108 err = f2fs_write_checkpoint(sbi, &cpc);
1109 mutex_unlock(&sbi->gc_mutex);
1111 f2fs_trace_ios(NULL, 1);
1116 static int f2fs_freeze(struct super_block *sb)
1118 if (f2fs_readonly(sb))
1121 /* IO error happened before */
1122 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1125 /* must be clean, since sync_filesystem() was already called */
1126 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1131 static int f2fs_unfreeze(struct super_block *sb)
1137 static int f2fs_statfs_project(struct super_block *sb,
1138 kprojid_t projid, struct kstatfs *buf)
1141 struct dquot *dquot;
1145 qid = make_kqid_projid(projid);
1146 dquot = dqget(sb, qid);
1148 return PTR_ERR(dquot);
1149 spin_lock(&dquot->dq_dqb_lock);
1151 limit = (dquot->dq_dqb.dqb_bsoftlimit ?
1152 dquot->dq_dqb.dqb_bsoftlimit :
1153 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
1154 if (limit && buf->f_blocks > limit) {
1155 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
1156 buf->f_blocks = limit;
1157 buf->f_bfree = buf->f_bavail =
1158 (buf->f_blocks > curblock) ?
1159 (buf->f_blocks - curblock) : 0;
1162 limit = dquot->dq_dqb.dqb_isoftlimit ?
1163 dquot->dq_dqb.dqb_isoftlimit :
1164 dquot->dq_dqb.dqb_ihardlimit;
1165 if (limit && buf->f_files > limit) {
1166 buf->f_files = limit;
1168 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1169 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1172 spin_unlock(&dquot->dq_dqb_lock);
1178 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1180 struct super_block *sb = dentry->d_sb;
1181 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1182 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1183 block_t total_count, user_block_count, start_count;
1184 u64 avail_node_count;
1186 total_count = le64_to_cpu(sbi->raw_super->block_count);
1187 user_block_count = sbi->user_block_count;
1188 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1189 buf->f_type = F2FS_SUPER_MAGIC;
1190 buf->f_bsize = sbi->blocksize;
1192 buf->f_blocks = total_count - start_count;
1193 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1194 sbi->current_reserved_blocks;
1195 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1196 buf->f_bavail = buf->f_bfree -
1197 F2FS_OPTION(sbi).root_reserved_blocks;
1201 avail_node_count = sbi->total_node_count - sbi->nquota_files -
1202 F2FS_RESERVED_NODE_NUM;
1204 if (avail_node_count > user_block_count) {
1205 buf->f_files = user_block_count;
1206 buf->f_ffree = buf->f_bavail;
1208 buf->f_files = avail_node_count;
1209 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1213 buf->f_namelen = F2FS_NAME_LEN;
1214 buf->f_fsid.val[0] = (u32)id;
1215 buf->f_fsid.val[1] = (u32)(id >> 32);
1218 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1219 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1220 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1226 static inline void f2fs_show_quota_options(struct seq_file *seq,
1227 struct super_block *sb)
1230 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1232 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1235 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1246 seq_printf(seq, ",jqfmt=%s", fmtname);
1249 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1250 seq_show_option(seq, "usrjquota",
1251 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1253 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1254 seq_show_option(seq, "grpjquota",
1255 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1257 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1258 seq_show_option(seq, "prjjquota",
1259 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1263 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1265 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1267 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
1268 if (test_opt(sbi, FORCE_FG_GC))
1269 seq_printf(seq, ",background_gc=%s", "sync");
1271 seq_printf(seq, ",background_gc=%s", "on");
1273 seq_printf(seq, ",background_gc=%s", "off");
1275 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1276 seq_puts(seq, ",disable_roll_forward");
1277 if (test_opt(sbi, DISCARD))
1278 seq_puts(seq, ",discard");
1279 if (test_opt(sbi, NOHEAP))
1280 seq_puts(seq, ",no_heap");
1282 seq_puts(seq, ",heap");
1283 #ifdef CONFIG_F2FS_FS_XATTR
1284 if (test_opt(sbi, XATTR_USER))
1285 seq_puts(seq, ",user_xattr");
1287 seq_puts(seq, ",nouser_xattr");
1288 if (test_opt(sbi, INLINE_XATTR))
1289 seq_puts(seq, ",inline_xattr");
1291 seq_puts(seq, ",noinline_xattr");
1292 if (test_opt(sbi, INLINE_XATTR_SIZE))
1293 seq_printf(seq, ",inline_xattr_size=%u",
1294 F2FS_OPTION(sbi).inline_xattr_size);
1296 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1297 if (test_opt(sbi, POSIX_ACL))
1298 seq_puts(seq, ",acl");
1300 seq_puts(seq, ",noacl");
1302 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1303 seq_puts(seq, ",disable_ext_identify");
1304 if (test_opt(sbi, INLINE_DATA))
1305 seq_puts(seq, ",inline_data");
1307 seq_puts(seq, ",noinline_data");
1308 if (test_opt(sbi, INLINE_DENTRY))
1309 seq_puts(seq, ",inline_dentry");
1311 seq_puts(seq, ",noinline_dentry");
1312 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1313 seq_puts(seq, ",flush_merge");
1314 if (test_opt(sbi, NOBARRIER))
1315 seq_puts(seq, ",nobarrier");
1316 if (test_opt(sbi, FASTBOOT))
1317 seq_puts(seq, ",fastboot");
1318 if (test_opt(sbi, EXTENT_CACHE))
1319 seq_puts(seq, ",extent_cache");
1321 seq_puts(seq, ",noextent_cache");
1322 if (test_opt(sbi, DATA_FLUSH))
1323 seq_puts(seq, ",data_flush");
1325 seq_puts(seq, ",mode=");
1326 if (test_opt(sbi, ADAPTIVE))
1327 seq_puts(seq, "adaptive");
1328 else if (test_opt(sbi, LFS))
1329 seq_puts(seq, "lfs");
1330 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1331 if (test_opt(sbi, RESERVE_ROOT))
1332 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1333 F2FS_OPTION(sbi).root_reserved_blocks,
1334 from_kuid_munged(&init_user_ns,
1335 F2FS_OPTION(sbi).s_resuid),
1336 from_kgid_munged(&init_user_ns,
1337 F2FS_OPTION(sbi).s_resgid));
1338 if (F2FS_IO_SIZE_BITS(sbi))
1339 seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
1340 #ifdef CONFIG_F2FS_FAULT_INJECTION
1341 if (test_opt(sbi, FAULT_INJECTION)) {
1342 seq_printf(seq, ",fault_injection=%u",
1343 F2FS_OPTION(sbi).fault_info.inject_rate);
1344 seq_printf(seq, ",fault_type=%u",
1345 F2FS_OPTION(sbi).fault_info.inject_type);
1349 if (test_opt(sbi, QUOTA))
1350 seq_puts(seq, ",quota");
1351 if (test_opt(sbi, USRQUOTA))
1352 seq_puts(seq, ",usrquota");
1353 if (test_opt(sbi, GRPQUOTA))
1354 seq_puts(seq, ",grpquota");
1355 if (test_opt(sbi, PRJQUOTA))
1356 seq_puts(seq, ",prjquota");
1358 f2fs_show_quota_options(seq, sbi->sb);
1359 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1360 seq_printf(seq, ",whint_mode=%s", "user-based");
1361 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1362 seq_printf(seq, ",whint_mode=%s", "fs-based");
1363 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1364 if (F2FS_OPTION(sbi).test_dummy_encryption)
1365 seq_puts(seq, ",test_dummy_encryption");
1368 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1369 seq_printf(seq, ",alloc_mode=%s", "default");
1370 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1371 seq_printf(seq, ",alloc_mode=%s", "reuse");
1373 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1374 seq_printf(seq, ",fsync_mode=%s", "posix");
1375 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1376 seq_printf(seq, ",fsync_mode=%s", "strict");
1377 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1378 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1382 static void default_options(struct f2fs_sb_info *sbi)
1384 /* init some FS parameters */
1385 F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
1386 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1387 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1388 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1389 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1390 F2FS_OPTION(sbi).test_dummy_encryption = false;
1391 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1392 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1394 set_opt(sbi, BG_GC);
1395 set_opt(sbi, INLINE_XATTR);
1396 set_opt(sbi, INLINE_DATA);
1397 set_opt(sbi, INLINE_DENTRY);
1398 set_opt(sbi, EXTENT_CACHE);
1399 set_opt(sbi, NOHEAP);
1400 sbi->sb->s_flags |= SB_LAZYTIME;
1401 set_opt(sbi, FLUSH_MERGE);
1402 set_opt(sbi, DISCARD);
1403 if (f2fs_sb_has_blkzoned(sbi->sb))
1404 set_opt_mode(sbi, F2FS_MOUNT_LFS);
1406 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
1408 #ifdef CONFIG_F2FS_FS_XATTR
1409 set_opt(sbi, XATTR_USER);
1411 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1412 set_opt(sbi, POSIX_ACL);
1415 f2fs_build_fault_attr(sbi, 0, 0);
1419 static int f2fs_enable_quotas(struct super_block *sb);
1421 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1423 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1424 struct f2fs_mount_info org_mount_opt;
1425 unsigned long old_sb_flags;
1427 bool need_restart_gc = false;
1428 bool need_stop_gc = false;
1429 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
1435 * Save the old mount options in case we
1436 * need to restore them.
1438 org_mount_opt = sbi->mount_opt;
1439 old_sb_flags = sb->s_flags;
1442 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
1443 for (i = 0; i < MAXQUOTAS; i++) {
1444 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1445 org_mount_opt.s_qf_names[i] =
1446 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
1448 if (!org_mount_opt.s_qf_names[i]) {
1449 for (j = 0; j < i; j++)
1450 kfree(org_mount_opt.s_qf_names[j]);
1454 org_mount_opt.s_qf_names[i] = NULL;
1459 /* recover superblocks we couldn't write due to previous RO mount */
1460 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1461 err = f2fs_commit_super(sbi, false);
1462 f2fs_msg(sb, KERN_INFO,
1463 "Try to recover all the superblocks, ret: %d", err);
1465 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1468 default_options(sbi);
1470 /* parse mount options */
1471 err = parse_options(sb, data);
1476 * Previous and new state of filesystem is RO,
1477 * so skip checking GC and FLUSH_MERGE conditions.
1479 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1483 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1484 err = dquot_suspend(sb, -1);
1487 } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
1488 /* dquot_resume needs RW */
1489 sb->s_flags &= ~SB_RDONLY;
1490 if (sb_any_quota_suspended(sb)) {
1491 dquot_resume(sb, -1);
1492 } else if (f2fs_sb_has_quota_ino(sb)) {
1493 err = f2fs_enable_quotas(sb);
1499 /* disallow enable/disable extent_cache dynamically */
1500 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1502 f2fs_msg(sbi->sb, KERN_WARNING,
1503 "switch extent_cache option is not allowed");
1508 * We stop the GC thread if FS is mounted as RO
1509 * or if background_gc = off is passed in mount
1510 * option. Also sync the filesystem.
1512 if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
1513 if (sbi->gc_thread) {
1514 f2fs_stop_gc_thread(sbi);
1515 need_restart_gc = true;
1517 } else if (!sbi->gc_thread) {
1518 err = f2fs_start_gc_thread(sbi);
1521 need_stop_gc = true;
1524 if (*flags & SB_RDONLY ||
1525 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1526 writeback_inodes_sb(sb, WB_REASON_SYNC);
1529 set_sbi_flag(sbi, SBI_IS_DIRTY);
1530 set_sbi_flag(sbi, SBI_IS_CLOSE);
1531 f2fs_sync_fs(sb, 1);
1532 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1536 * We stop issue flush thread if FS is mounted as RO
1537 * or if flush_merge is not passed in mount option.
1539 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1540 clear_opt(sbi, FLUSH_MERGE);
1541 f2fs_destroy_flush_cmd_control(sbi, false);
1543 err = f2fs_create_flush_cmd_control(sbi);
1549 /* Release old quota file names */
1550 for (i = 0; i < MAXQUOTAS; i++)
1551 kfree(org_mount_opt.s_qf_names[i]);
1553 /* Update the POSIXACL Flag */
1554 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1555 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1557 limit_reserve_root(sbi);
1560 if (need_restart_gc) {
1561 if (f2fs_start_gc_thread(sbi))
1562 f2fs_msg(sbi->sb, KERN_WARNING,
1563 "background gc thread has stopped");
1564 } else if (need_stop_gc) {
1565 f2fs_stop_gc_thread(sbi);
1569 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
1570 for (i = 0; i < MAXQUOTAS; i++) {
1571 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1572 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
1575 sbi->mount_opt = org_mount_opt;
1576 sb->s_flags = old_sb_flags;
1581 /* Read data from quotafile */
1582 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1583 size_t len, loff_t off)
1585 struct inode *inode = sb_dqopt(sb)->files[type];
1586 struct address_space *mapping = inode->i_mapping;
1587 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1588 int offset = off & (sb->s_blocksize - 1);
1591 loff_t i_size = i_size_read(inode);
1598 if (off + len > i_size)
1601 while (toread > 0) {
1602 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1604 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
1606 if (PTR_ERR(page) == -ENOMEM) {
1607 congestion_wait(BLK_RW_ASYNC, HZ/50);
1610 return PTR_ERR(page);
1615 if (unlikely(page->mapping != mapping)) {
1616 f2fs_put_page(page, 1);
1619 if (unlikely(!PageUptodate(page))) {
1620 f2fs_put_page(page, 1);
1624 kaddr = kmap_atomic(page);
1625 memcpy(data, kaddr + offset, tocopy);
1626 kunmap_atomic(kaddr);
1627 f2fs_put_page(page, 1);
1637 /* Write to quotafile */
1638 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1639 const char *data, size_t len, loff_t off)
1641 struct inode *inode = sb_dqopt(sb)->files[type];
1642 struct address_space *mapping = inode->i_mapping;
1643 const struct address_space_operations *a_ops = mapping->a_ops;
1644 int offset = off & (sb->s_blocksize - 1);
1645 size_t towrite = len;
1651 while (towrite > 0) {
1652 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1655 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1657 if (unlikely(err)) {
1658 if (err == -ENOMEM) {
1659 congestion_wait(BLK_RW_ASYNC, HZ/50);
1665 kaddr = kmap_atomic(page);
1666 memcpy(kaddr + offset, data, tocopy);
1667 kunmap_atomic(kaddr);
1668 flush_dcache_page(page);
1670 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1681 inode->i_mtime = inode->i_ctime = current_time(inode);
1682 f2fs_mark_inode_dirty_sync(inode, false);
1683 return len - towrite;
1686 static struct dquot **f2fs_get_dquots(struct inode *inode)
1688 return F2FS_I(inode)->i_dquot;
1691 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1693 return &F2FS_I(inode)->i_reserved_quota;
1696 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1698 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
1699 F2FS_OPTION(sbi).s_jquota_fmt, type);
1702 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1707 if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
1708 err = f2fs_enable_quotas(sbi->sb);
1710 f2fs_msg(sbi->sb, KERN_ERR,
1711 "Cannot turn on quota_ino: %d", err);
1717 for (i = 0; i < MAXQUOTAS; i++) {
1718 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1719 err = f2fs_quota_on_mount(sbi, i);
1724 f2fs_msg(sbi->sb, KERN_ERR,
1725 "Cannot turn on quotas: %d on %d", err, i);
1731 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1734 struct inode *qf_inode;
1735 unsigned long qf_inum;
1738 BUG_ON(!f2fs_sb_has_quota_ino(sb));
1740 qf_inum = f2fs_qf_ino(sb, type);
1744 qf_inode = f2fs_iget(sb, qf_inum);
1745 if (IS_ERR(qf_inode)) {
1746 f2fs_msg(sb, KERN_ERR,
1747 "Bad quota inode %u:%lu", type, qf_inum);
1748 return PTR_ERR(qf_inode);
1751 /* Don't account quota for quota files to avoid recursion */
1752 qf_inode->i_flags |= S_NOQUOTA;
1753 err = dquot_enable(qf_inode, type, format_id, flags);
1758 static int f2fs_enable_quotas(struct super_block *sb)
1761 unsigned long qf_inum;
1762 bool quota_mopt[MAXQUOTAS] = {
1763 test_opt(F2FS_SB(sb), USRQUOTA),
1764 test_opt(F2FS_SB(sb), GRPQUOTA),
1765 test_opt(F2FS_SB(sb), PRJQUOTA),
1768 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
1769 for (type = 0; type < MAXQUOTAS; type++) {
1770 qf_inum = f2fs_qf_ino(sb, type);
1772 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
1773 DQUOT_USAGE_ENABLED |
1774 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
1776 f2fs_msg(sb, KERN_ERR,
1777 "Failed to enable quota tracking "
1778 "(type=%d, err=%d). Please run "
1779 "fsck to fix.", type, err);
1780 for (type--; type >= 0; type--)
1781 dquot_quota_off(sb, type);
1789 static int f2fs_quota_sync(struct super_block *sb, int type)
1791 struct quota_info *dqopt = sb_dqopt(sb);
1795 ret = dquot_writeback_dquots(sb, type);
1800 * Now when everything is written we can discard the pagecache so
1801 * that userspace sees the changes.
1803 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1804 if (type != -1 && cnt != type)
1806 if (!sb_has_quota_active(sb, cnt))
1809 ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
1813 inode_lock(dqopt->files[cnt]);
1814 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
1815 inode_unlock(dqopt->files[cnt]);
1820 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
1821 const struct path *path)
1823 struct inode *inode;
1826 err = f2fs_quota_sync(sb, type);
1830 err = dquot_quota_on(sb, type, format_id, path);
1834 inode = d_inode(path->dentry);
1837 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
1838 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
1839 S_NOATIME | S_IMMUTABLE);
1840 inode_unlock(inode);
1841 f2fs_mark_inode_dirty_sync(inode, false);
1846 static int f2fs_quota_off(struct super_block *sb, int type)
1848 struct inode *inode = sb_dqopt(sb)->files[type];
1851 if (!inode || !igrab(inode))
1852 return dquot_quota_off(sb, type);
1854 err = f2fs_quota_sync(sb, type);
1858 err = dquot_quota_off(sb, type);
1859 if (err || f2fs_sb_has_quota_ino(sb))
1863 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
1864 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
1865 inode_unlock(inode);
1866 f2fs_mark_inode_dirty_sync(inode, false);
1872 void f2fs_quota_off_umount(struct super_block *sb)
1877 for (type = 0; type < MAXQUOTAS; type++) {
1878 err = f2fs_quota_off(sb, type);
1880 int ret = dquot_quota_off(sb, type);
1882 f2fs_msg(sb, KERN_ERR,
1883 "Fail to turn off disk quota "
1884 "(type: %d, err: %d, ret:%d), Please "
1885 "run fsck to fix it.", type, err, ret);
1886 set_sbi_flag(F2FS_SB(sb), SBI_NEED_FSCK);
1891 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
1893 struct quota_info *dqopt = sb_dqopt(sb);
1896 for (type = 0; type < MAXQUOTAS; type++) {
1897 if (!dqopt->files[type])
1899 f2fs_inode_synced(dqopt->files[type]);
1904 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
1906 *projid = F2FS_I(inode)->i_projid;
1910 static const struct dquot_operations f2fs_quota_operations = {
1911 .get_reserved_space = f2fs_get_reserved_space,
1912 .write_dquot = dquot_commit,
1913 .acquire_dquot = dquot_acquire,
1914 .release_dquot = dquot_release,
1915 .mark_dirty = dquot_mark_dquot_dirty,
1916 .write_info = dquot_commit_info,
1917 .alloc_dquot = dquot_alloc,
1918 .destroy_dquot = dquot_destroy,
1919 .get_projid = f2fs_get_projid,
1920 .get_next_id = dquot_get_next_id,
1923 static const struct quotactl_ops f2fs_quotactl_ops = {
1924 .quota_on = f2fs_quota_on,
1925 .quota_off = f2fs_quota_off,
1926 .quota_sync = f2fs_quota_sync,
1927 .get_state = dquot_get_state,
1928 .set_info = dquot_set_dqinfo,
1929 .get_dqblk = dquot_get_dqblk,
1930 .set_dqblk = dquot_set_dqblk,
1931 .get_nextdqblk = dquot_get_next_dqblk,
1934 void f2fs_quota_off_umount(struct super_block *sb)
1939 static const struct super_operations f2fs_sops = {
1940 .alloc_inode = f2fs_alloc_inode,
1941 .drop_inode = f2fs_drop_inode,
1942 .destroy_inode = f2fs_destroy_inode,
1943 .write_inode = f2fs_write_inode,
1944 .dirty_inode = f2fs_dirty_inode,
1945 .show_options = f2fs_show_options,
1947 .quota_read = f2fs_quota_read,
1948 .quota_write = f2fs_quota_write,
1949 .get_dquots = f2fs_get_dquots,
1951 .evict_inode = f2fs_evict_inode,
1952 .put_super = f2fs_put_super,
1953 .sync_fs = f2fs_sync_fs,
1954 .freeze_fs = f2fs_freeze,
1955 .unfreeze_fs = f2fs_unfreeze,
1956 .statfs = f2fs_statfs,
1957 .remount_fs = f2fs_remount,
1960 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1961 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
1963 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1964 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1968 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
1971 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1974 * Encrypting the root directory is not allowed because fsck
1975 * expects lost+found directory to exist and remain unencrypted
1976 * if LOST_FOUND feature is enabled.
1979 if (f2fs_sb_has_lost_found(sbi->sb) &&
1980 inode->i_ino == F2FS_ROOT_INO(sbi))
1983 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1984 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1985 ctx, len, fs_data, XATTR_CREATE);
1988 static bool f2fs_dummy_context(struct inode *inode)
1990 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
1993 static const struct fscrypt_operations f2fs_cryptops = {
1994 .key_prefix = "f2fs:",
1995 .get_context = f2fs_get_context,
1996 .set_context = f2fs_set_context,
1997 .dummy_context = f2fs_dummy_context,
1998 .empty_dir = f2fs_empty_dir,
1999 .max_namelen = F2FS_NAME_LEN,
2003 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2004 u64 ino, u32 generation)
2006 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2007 struct inode *inode;
2009 if (f2fs_check_nid_range(sbi, ino))
2010 return ERR_PTR(-ESTALE);
2013 * f2fs_iget isn't quite right if the inode is currently unallocated!
2014 * However f2fs_iget currently does appropriate checks to handle stale
2015 * inodes so everything is OK.
2017 inode = f2fs_iget(sb, ino);
2019 return ERR_CAST(inode);
2020 if (unlikely(generation && inode->i_generation != generation)) {
2021 /* we didn't find the right inode.. */
2023 return ERR_PTR(-ESTALE);
2028 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2029 int fh_len, int fh_type)
2031 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
2032 f2fs_nfs_get_inode);
2035 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
2036 int fh_len, int fh_type)
2038 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
2039 f2fs_nfs_get_inode);
2042 static const struct export_operations f2fs_export_ops = {
2043 .fh_to_dentry = f2fs_fh_to_dentry,
2044 .fh_to_parent = f2fs_fh_to_parent,
2045 .get_parent = f2fs_get_parent,
2048 static loff_t max_file_blocks(void)
2051 loff_t leaf_count = ADDRS_PER_BLOCK;
2054 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2055 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2056 * space in inode.i_addr, it will be more safe to reassign
2060 /* two direct node blocks */
2061 result += (leaf_count * 2);
2063 /* two indirect node blocks */
2064 leaf_count *= NIDS_PER_BLOCK;
2065 result += (leaf_count * 2);
2067 /* one double indirect node block */
2068 leaf_count *= NIDS_PER_BLOCK;
2069 result += leaf_count;
2074 static int __f2fs_commit_super(struct buffer_head *bh,
2075 struct f2fs_super_block *super)
2079 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2080 set_buffer_dirty(bh);
2083 /* it's rare case, we can do fua all the time */
2084 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2087 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2088 struct buffer_head *bh)
2090 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2091 (bh->b_data + F2FS_SUPER_OFFSET);
2092 struct super_block *sb = sbi->sb;
2093 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2094 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2095 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2096 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2097 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2098 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2099 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2100 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2101 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2102 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2103 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2104 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2105 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2106 u64 main_end_blkaddr = main_blkaddr +
2107 (segment_count_main << log_blocks_per_seg);
2108 u64 seg_end_blkaddr = segment0_blkaddr +
2109 (segment_count << log_blocks_per_seg);
2111 if (segment0_blkaddr != cp_blkaddr) {
2112 f2fs_msg(sb, KERN_INFO,
2113 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2114 segment0_blkaddr, cp_blkaddr);
2118 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2120 f2fs_msg(sb, KERN_INFO,
2121 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2122 cp_blkaddr, sit_blkaddr,
2123 segment_count_ckpt << log_blocks_per_seg);
2127 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2129 f2fs_msg(sb, KERN_INFO,
2130 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2131 sit_blkaddr, nat_blkaddr,
2132 segment_count_sit << log_blocks_per_seg);
2136 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2138 f2fs_msg(sb, KERN_INFO,
2139 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2140 nat_blkaddr, ssa_blkaddr,
2141 segment_count_nat << log_blocks_per_seg);
2145 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2147 f2fs_msg(sb, KERN_INFO,
2148 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2149 ssa_blkaddr, main_blkaddr,
2150 segment_count_ssa << log_blocks_per_seg);
2154 if (main_end_blkaddr > seg_end_blkaddr) {
2155 f2fs_msg(sb, KERN_INFO,
2156 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2159 (segment_count << log_blocks_per_seg),
2160 segment_count_main << log_blocks_per_seg);
2162 } else if (main_end_blkaddr < seg_end_blkaddr) {
2166 /* fix in-memory information all the time */
2167 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
2168 segment0_blkaddr) >> log_blocks_per_seg);
2170 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2171 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2174 err = __f2fs_commit_super(bh, NULL);
2175 res = err ? "failed" : "done";
2177 f2fs_msg(sb, KERN_INFO,
2178 "Fix alignment : %s, start(%u) end(%u) block(%u)",
2181 (segment_count << log_blocks_per_seg),
2182 segment_count_main << log_blocks_per_seg);
2189 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2190 struct buffer_head *bh)
2192 block_t segment_count, segs_per_sec, secs_per_zone;
2193 block_t total_sections, blocks_per_seg;
2194 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2195 (bh->b_data + F2FS_SUPER_OFFSET);
2196 struct super_block *sb = sbi->sb;
2197 unsigned int blocksize;
2199 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
2200 f2fs_msg(sb, KERN_INFO,
2201 "Magic Mismatch, valid(0x%x) - read(0x%x)",
2202 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2206 /* Currently, support only 4KB page cache size */
2207 if (F2FS_BLKSIZE != PAGE_SIZE) {
2208 f2fs_msg(sb, KERN_INFO,
2209 "Invalid page_cache_size (%lu), supports only 4KB\n",
2214 /* Currently, support only 4KB block size */
2215 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2216 if (blocksize != F2FS_BLKSIZE) {
2217 f2fs_msg(sb, KERN_INFO,
2218 "Invalid blocksize (%u), supports only 4KB\n",
2223 /* check log blocks per segment */
2224 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2225 f2fs_msg(sb, KERN_INFO,
2226 "Invalid log blocks per segment (%u)\n",
2227 le32_to_cpu(raw_super->log_blocks_per_seg));
2231 /* Currently, support 512/1024/2048/4096 bytes sector size */
2232 if (le32_to_cpu(raw_super->log_sectorsize) >
2233 F2FS_MAX_LOG_SECTOR_SIZE ||
2234 le32_to_cpu(raw_super->log_sectorsize) <
2235 F2FS_MIN_LOG_SECTOR_SIZE) {
2236 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
2237 le32_to_cpu(raw_super->log_sectorsize));
2240 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2241 le32_to_cpu(raw_super->log_sectorsize) !=
2242 F2FS_MAX_LOG_SECTOR_SIZE) {
2243 f2fs_msg(sb, KERN_INFO,
2244 "Invalid log sectors per block(%u) log sectorsize(%u)",
2245 le32_to_cpu(raw_super->log_sectors_per_block),
2246 le32_to_cpu(raw_super->log_sectorsize));
2250 segment_count = le32_to_cpu(raw_super->segment_count);
2251 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2252 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2253 total_sections = le32_to_cpu(raw_super->section_count);
2255 /* blocks_per_seg should be 512, given the above check */
2256 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
2258 if (segment_count > F2FS_MAX_SEGMENT ||
2259 segment_count < F2FS_MIN_SEGMENTS) {
2260 f2fs_msg(sb, KERN_INFO,
2261 "Invalid segment count (%u)",
2266 if (total_sections > segment_count ||
2267 total_sections < F2FS_MIN_SEGMENTS ||
2268 segs_per_sec > segment_count || !segs_per_sec) {
2269 f2fs_msg(sb, KERN_INFO,
2270 "Invalid segment/section count (%u, %u x %u)",
2271 segment_count, total_sections, segs_per_sec);
2275 if ((segment_count / segs_per_sec) < total_sections) {
2276 f2fs_msg(sb, KERN_INFO,
2277 "Small segment_count (%u < %u * %u)",
2278 segment_count, segs_per_sec, total_sections);
2282 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2283 f2fs_msg(sb, KERN_INFO,
2284 "Wrong segment_count / block_count (%u > %llu)",
2285 segment_count, le64_to_cpu(raw_super->block_count));
2289 if (secs_per_zone > total_sections || !secs_per_zone) {
2290 f2fs_msg(sb, KERN_INFO,
2291 "Wrong secs_per_zone / total_sections (%u, %u)",
2292 secs_per_zone, total_sections);
2295 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2296 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
2297 (le32_to_cpu(raw_super->extension_count) +
2298 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
2299 f2fs_msg(sb, KERN_INFO,
2300 "Corrupted extension count (%u + %u > %u)",
2301 le32_to_cpu(raw_super->extension_count),
2302 raw_super->hot_ext_count,
2303 F2FS_MAX_EXTENSION);
2307 if (le32_to_cpu(raw_super->cp_payload) >
2308 (blocks_per_seg - F2FS_CP_PACKS)) {
2309 f2fs_msg(sb, KERN_INFO,
2310 "Insane cp_payload (%u > %u)",
2311 le32_to_cpu(raw_super->cp_payload),
2312 blocks_per_seg - F2FS_CP_PACKS);
2316 /* check reserved ino info */
2317 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2318 le32_to_cpu(raw_super->meta_ino) != 2 ||
2319 le32_to_cpu(raw_super->root_ino) != 3) {
2320 f2fs_msg(sb, KERN_INFO,
2321 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2322 le32_to_cpu(raw_super->node_ino),
2323 le32_to_cpu(raw_super->meta_ino),
2324 le32_to_cpu(raw_super->root_ino));
2328 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2329 if (sanity_check_area_boundary(sbi, bh))
2335 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2337 unsigned int total, fsmeta;
2338 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2339 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2340 unsigned int ovp_segments, reserved_segments;
2341 unsigned int main_segs, blocks_per_seg;
2342 unsigned int sit_segs, nat_segs;
2343 unsigned int sit_bitmap_size, nat_bitmap_size;
2344 unsigned int log_blocks_per_seg;
2345 unsigned int segment_count_main;
2346 unsigned int cp_pack_start_sum, cp_payload;
2347 block_t user_block_count;
2350 total = le32_to_cpu(raw_super->segment_count);
2351 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2352 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
2354 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
2356 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2357 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2359 if (unlikely(fsmeta >= total))
2362 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2363 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2365 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
2366 ovp_segments == 0 || reserved_segments == 0)) {
2367 f2fs_msg(sbi->sb, KERN_ERR,
2368 "Wrong layout: check mkfs.f2fs version");
2372 user_block_count = le64_to_cpu(ckpt->user_block_count);
2373 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2374 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2375 if (!user_block_count || user_block_count >=
2376 segment_count_main << log_blocks_per_seg) {
2377 f2fs_msg(sbi->sb, KERN_ERR,
2378 "Wrong user_block_count: %u", user_block_count);
2382 main_segs = le32_to_cpu(raw_super->segment_count_main);
2383 blocks_per_seg = sbi->blocks_per_seg;
2385 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2386 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
2387 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
2389 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
2390 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2391 le32_to_cpu(ckpt->cur_node_segno[j])) {
2392 f2fs_msg(sbi->sb, KERN_ERR,
2393 "Node segment (%u, %u) has the same "
2395 le32_to_cpu(ckpt->cur_node_segno[i]));
2400 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
2401 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
2402 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
2404 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
2405 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
2406 le32_to_cpu(ckpt->cur_data_segno[j])) {
2407 f2fs_msg(sbi->sb, KERN_ERR,
2408 "Data segment (%u, %u) has the same "
2410 le32_to_cpu(ckpt->cur_data_segno[i]));
2415 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2416 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
2417 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2418 le32_to_cpu(ckpt->cur_data_segno[j])) {
2419 f2fs_msg(sbi->sb, KERN_ERR,
2420 "Node segment (%u) and Data segment (%u)"
2421 " has the same segno: %u", i, j,
2422 le32_to_cpu(ckpt->cur_node_segno[i]));
2428 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2429 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2431 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2432 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2433 f2fs_msg(sbi->sb, KERN_ERR,
2434 "Wrong bitmap size: sit: %u, nat:%u",
2435 sit_bitmap_size, nat_bitmap_size);
2439 cp_pack_start_sum = __start_sum_addr(sbi);
2440 cp_payload = __cp_payload(sbi);
2441 if (cp_pack_start_sum < cp_payload + 1 ||
2442 cp_pack_start_sum > blocks_per_seg - 1 -
2444 f2fs_msg(sbi->sb, KERN_ERR,
2445 "Wrong cp_pack_start_sum: %u",
2450 if (unlikely(f2fs_cp_error(sbi))) {
2451 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2457 static void init_sb_info(struct f2fs_sb_info *sbi)
2459 struct f2fs_super_block *raw_super = sbi->raw_super;
2462 sbi->log_sectors_per_block =
2463 le32_to_cpu(raw_super->log_sectors_per_block);
2464 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
2465 sbi->blocksize = 1 << sbi->log_blocksize;
2466 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2467 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
2468 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2469 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2470 sbi->total_sections = le32_to_cpu(raw_super->section_count);
2471 sbi->total_node_count =
2472 (le32_to_cpu(raw_super->segment_count_nat) / 2)
2473 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
2474 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
2475 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
2476 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
2477 sbi->cur_victim_sec = NULL_SECNO;
2478 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
2480 sbi->dir_level = DEF_DIR_LEVEL;
2481 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
2482 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
2483 clear_sbi_flag(sbi, SBI_NEED_FSCK);
2485 for (i = 0; i < NR_COUNT_TYPE; i++)
2486 atomic_set(&sbi->nr_pages[i], 0);
2488 for (i = 0; i < META; i++)
2489 atomic_set(&sbi->wb_sync_req[i], 0);
2491 INIT_LIST_HEAD(&sbi->s_list);
2492 mutex_init(&sbi->umount_mutex);
2493 for (i = 0; i < NR_PAGE_TYPE - 1; i++)
2494 for (j = HOT; j < NR_TEMP_TYPE; j++)
2495 mutex_init(&sbi->wio_mutex[i][j]);
2496 init_rwsem(&sbi->io_order_lock);
2497 spin_lock_init(&sbi->cp_lock);
2499 sbi->dirty_device = 0;
2500 spin_lock_init(&sbi->dev_lock);
2502 init_rwsem(&sbi->sb_lock);
2505 static int init_percpu_info(struct f2fs_sb_info *sbi)
2509 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
2513 return percpu_counter_init(&sbi->total_valid_inode_count, 0,
2517 #ifdef CONFIG_BLK_DEV_ZONED
2518 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
2520 struct block_device *bdev = FDEV(devi).bdev;
2521 sector_t nr_sectors = bdev->bd_part->nr_sects;
2522 sector_t sector = 0;
2523 struct blk_zone *zones;
2524 unsigned int i, nr_zones;
2528 if (!f2fs_sb_has_blkzoned(sbi->sb))
2531 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
2532 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
2534 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
2535 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
2536 __ilog2_u32(sbi->blocks_per_blkz))
2538 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
2539 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
2540 sbi->log_blocks_per_blkz;
2541 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
2542 FDEV(devi).nr_blkz++;
2544 FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
2546 if (!FDEV(devi).blkz_type)
2549 #define F2FS_REPORT_NR_ZONES 4096
2551 zones = f2fs_kzalloc(sbi,
2552 array_size(F2FS_REPORT_NR_ZONES,
2553 sizeof(struct blk_zone)),
2558 /* Get block zones type */
2559 while (zones && sector < nr_sectors) {
2561 nr_zones = F2FS_REPORT_NR_ZONES;
2562 err = blkdev_report_zones(bdev, sector,
2572 for (i = 0; i < nr_zones; i++) {
2573 FDEV(devi).blkz_type[n] = zones[i].type;
2574 sector += zones[i].len;
2586 * Read f2fs raw super block.
2587 * Because we have two copies of super block, so read both of them
2588 * to get the first valid one. If any one of them is broken, we pass
2589 * them recovery flag back to the caller.
2591 static int read_raw_super_block(struct f2fs_sb_info *sbi,
2592 struct f2fs_super_block **raw_super,
2593 int *valid_super_block, int *recovery)
2595 struct super_block *sb = sbi->sb;
2597 struct buffer_head *bh;
2598 struct f2fs_super_block *super;
2601 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
2605 for (block = 0; block < 2; block++) {
2606 bh = sb_bread(sb, block);
2608 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
2614 /* sanity checking of raw super */
2615 if (sanity_check_raw_super(sbi, bh)) {
2616 f2fs_msg(sb, KERN_ERR,
2617 "Can't find valid F2FS filesystem in %dth superblock",
2619 err = -EFSCORRUPTED;
2625 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
2627 *valid_super_block = block;
2633 /* Fail to read any one of the superblocks*/
2637 /* No valid superblock */
2646 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
2648 struct buffer_head *bh;
2651 if ((recover && f2fs_readonly(sbi->sb)) ||
2652 bdev_read_only(sbi->sb->s_bdev)) {
2653 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2657 /* write back-up superblock first */
2658 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
2661 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2664 /* if we are in recovery path, skip writing valid superblock */
2668 /* write current valid superblock */
2669 bh = sb_bread(sbi->sb, sbi->valid_super_block);
2672 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2677 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
2679 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2680 unsigned int max_devices = MAX_DEVICES;
2683 /* Initialize single device information */
2684 if (!RDEV(0).path[0]) {
2685 if (!bdev_is_zoned(sbi->sb->s_bdev))
2691 * Initialize multiple devices information, or single
2692 * zoned block device information.
2694 sbi->devs = f2fs_kzalloc(sbi,
2695 array_size(max_devices,
2696 sizeof(struct f2fs_dev_info)),
2701 for (i = 0; i < max_devices; i++) {
2703 if (i > 0 && !RDEV(i).path[0])
2706 if (max_devices == 1) {
2707 /* Single zoned block device mount */
2709 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
2710 sbi->sb->s_mode, sbi->sb->s_type);
2712 /* Multi-device mount */
2713 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
2714 FDEV(i).total_segments =
2715 le32_to_cpu(RDEV(i).total_segments);
2717 FDEV(i).start_blk = 0;
2718 FDEV(i).end_blk = FDEV(i).start_blk +
2719 (FDEV(i).total_segments <<
2720 sbi->log_blocks_per_seg) - 1 +
2721 le32_to_cpu(raw_super->segment0_blkaddr);
2723 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
2724 FDEV(i).end_blk = FDEV(i).start_blk +
2725 (FDEV(i).total_segments <<
2726 sbi->log_blocks_per_seg) - 1;
2728 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
2729 sbi->sb->s_mode, sbi->sb->s_type);
2731 if (IS_ERR(FDEV(i).bdev))
2732 return PTR_ERR(FDEV(i).bdev);
2734 /* to release errored devices */
2735 sbi->s_ndevs = i + 1;
2737 #ifdef CONFIG_BLK_DEV_ZONED
2738 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
2739 !f2fs_sb_has_blkzoned(sbi->sb)) {
2740 f2fs_msg(sbi->sb, KERN_ERR,
2741 "Zoned block device feature not enabled\n");
2744 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
2745 if (init_blkz_info(sbi, i)) {
2746 f2fs_msg(sbi->sb, KERN_ERR,
2747 "Failed to initialize F2FS blkzone information");
2750 if (max_devices == 1)
2752 f2fs_msg(sbi->sb, KERN_INFO,
2753 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
2755 FDEV(i).total_segments,
2756 FDEV(i).start_blk, FDEV(i).end_blk,
2757 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
2758 "Host-aware" : "Host-managed");
2762 f2fs_msg(sbi->sb, KERN_INFO,
2763 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
2765 FDEV(i).total_segments,
2766 FDEV(i).start_blk, FDEV(i).end_blk);
2768 f2fs_msg(sbi->sb, KERN_INFO,
2769 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
2773 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
2775 struct f2fs_sm_info *sm_i = SM_I(sbi);
2777 /* adjust parameters according to the volume size */
2778 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
2779 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
2780 sm_i->dcc_info->discard_granularity = 1;
2781 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
2784 sbi->readdir_ra = 1;
2787 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
2789 struct f2fs_sb_info *sbi;
2790 struct f2fs_super_block *raw_super;
2793 bool retry = true, need_fsck = false;
2794 char *options = NULL;
2795 int recovery, i, valid_super_block;
2796 struct curseg_info *seg_i;
2801 valid_super_block = -1;
2804 /* allocate memory for f2fs-specific super block info */
2805 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
2811 /* Load the checksum driver */
2812 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
2813 if (IS_ERR(sbi->s_chksum_driver)) {
2814 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
2815 err = PTR_ERR(sbi->s_chksum_driver);
2816 sbi->s_chksum_driver = NULL;
2820 /* set a block size */
2821 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
2822 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
2826 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
2831 sb->s_fs_info = sbi;
2832 sbi->raw_super = raw_super;
2834 /* precompute checksum seed for metadata */
2835 if (f2fs_sb_has_inode_chksum(sb))
2836 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
2837 sizeof(raw_super->uuid));
2840 * The BLKZONED feature indicates that the drive was formatted with
2841 * zone alignment optimization. This is optional for host-aware
2842 * devices, but mandatory for host-managed zoned block devices.
2844 #ifndef CONFIG_BLK_DEV_ZONED
2845 if (f2fs_sb_has_blkzoned(sb)) {
2846 f2fs_msg(sb, KERN_ERR,
2847 "Zoned block device support is not enabled\n");
2852 default_options(sbi);
2853 /* parse mount options */
2854 options = kstrdup((const char *)data, GFP_KERNEL);
2855 if (data && !options) {
2860 err = parse_options(sb, options);
2864 sbi->max_file_blocks = max_file_blocks();
2865 sb->s_maxbytes = sbi->max_file_blocks <<
2866 le32_to_cpu(raw_super->log_blocksize);
2867 sb->s_max_links = F2FS_LINK_MAX;
2868 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2871 sb->dq_op = &f2fs_quota_operations;
2872 if (f2fs_sb_has_quota_ino(sb))
2873 sb->s_qcop = &dquot_quotactl_sysfile_ops;
2875 sb->s_qcop = &f2fs_quotactl_ops;
2876 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
2878 if (f2fs_sb_has_quota_ino(sbi->sb)) {
2879 for (i = 0; i < MAXQUOTAS; i++) {
2880 if (f2fs_qf_ino(sbi->sb, i))
2881 sbi->nquota_files++;
2886 sb->s_op = &f2fs_sops;
2887 #ifdef CONFIG_F2FS_FS_ENCRYPTION
2888 sb->s_cop = &f2fs_cryptops;
2890 sb->s_xattr = f2fs_xattr_handlers;
2891 sb->s_export_op = &f2fs_export_ops;
2892 sb->s_magic = F2FS_SUPER_MAGIC;
2893 sb->s_time_gran = 1;
2894 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2895 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2896 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
2897 sb->s_iflags |= SB_I_CGROUPWB;
2899 /* init f2fs-specific super block info */
2900 sbi->valid_super_block = valid_super_block;
2901 mutex_init(&sbi->gc_mutex);
2902 mutex_init(&sbi->writepages);
2903 mutex_init(&sbi->cp_mutex);
2904 init_rwsem(&sbi->node_write);
2905 init_rwsem(&sbi->node_change);
2907 /* disallow all the data/node/meta page writes */
2908 set_sbi_flag(sbi, SBI_POR_DOING);
2909 spin_lock_init(&sbi->stat_lock);
2911 /* init iostat info */
2912 spin_lock_init(&sbi->iostat_lock);
2913 sbi->iostat_enable = false;
2915 for (i = 0; i < NR_PAGE_TYPE; i++) {
2916 int n = (i == META) ? 1: NR_TEMP_TYPE;
2922 sizeof(struct f2fs_bio_info)),
2924 if (!sbi->write_io[i]) {
2929 for (j = HOT; j < n; j++) {
2930 init_rwsem(&sbi->write_io[i][j].io_rwsem);
2931 sbi->write_io[i][j].sbi = sbi;
2932 sbi->write_io[i][j].bio = NULL;
2933 spin_lock_init(&sbi->write_io[i][j].io_lock);
2934 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
2938 init_rwsem(&sbi->cp_rwsem);
2939 init_waitqueue_head(&sbi->cp_wait);
2942 err = init_percpu_info(sbi);
2946 if (F2FS_IO_SIZE(sbi) > 1) {
2947 sbi->write_io_dummy =
2948 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
2949 if (!sbi->write_io_dummy) {
2955 /* get an inode for meta space */
2956 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
2957 if (IS_ERR(sbi->meta_inode)) {
2958 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
2959 err = PTR_ERR(sbi->meta_inode);
2963 err = f2fs_get_valid_checkpoint(sbi);
2965 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
2966 goto free_meta_inode;
2969 /* Initialize device list */
2970 err = f2fs_scan_devices(sbi);
2972 f2fs_msg(sb, KERN_ERR, "Failed to find devices");
2976 sbi->total_valid_node_count =
2977 le32_to_cpu(sbi->ckpt->valid_node_count);
2978 percpu_counter_set(&sbi->total_valid_inode_count,
2979 le32_to_cpu(sbi->ckpt->valid_inode_count));
2980 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
2981 sbi->total_valid_block_count =
2982 le64_to_cpu(sbi->ckpt->valid_block_count);
2983 sbi->last_valid_block_count = sbi->total_valid_block_count;
2984 sbi->reserved_blocks = 0;
2985 sbi->current_reserved_blocks = 0;
2986 limit_reserve_root(sbi);
2988 for (i = 0; i < NR_INODE_TYPE; i++) {
2989 INIT_LIST_HEAD(&sbi->inode_list[i]);
2990 spin_lock_init(&sbi->inode_lock[i]);
2993 f2fs_init_extent_cache_info(sbi);
2995 f2fs_init_ino_entry_info(sbi);
2997 f2fs_init_fsync_node_info(sbi);
2999 /* setup f2fs internal modules */
3000 err = f2fs_build_segment_manager(sbi);
3002 f2fs_msg(sb, KERN_ERR,
3003 "Failed to initialize F2FS segment manager");
3006 err = f2fs_build_node_manager(sbi);
3008 f2fs_msg(sb, KERN_ERR,
3009 "Failed to initialize F2FS node manager");
3013 /* For write statistics */
3014 if (sb->s_bdev->bd_part)
3015 sbi->sectors_written_start =
3016 (u64)part_stat_read(sb->s_bdev->bd_part,
3017 sectors[STAT_WRITE]);
3019 /* Read accumulated write IO statistics if exists */
3020 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
3021 if (__exist_node_summaries(sbi))
3022 sbi->kbytes_written =
3023 le64_to_cpu(seg_i->journal->info.kbytes_written);
3025 f2fs_build_gc_manager(sbi);
3027 err = f2fs_build_stats(sbi);
3031 /* get an inode for node space */
3032 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
3033 if (IS_ERR(sbi->node_inode)) {
3034 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
3035 err = PTR_ERR(sbi->node_inode);
3039 /* read root inode and dentry */
3040 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
3042 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
3043 err = PTR_ERR(root);
3044 goto free_node_inode;
3046 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
3047 !root->i_size || !root->i_nlink) {
3050 goto free_node_inode;
3053 sb->s_root = d_make_root(root); /* allocate root dentry */
3056 goto free_root_inode;
3059 err = f2fs_register_sysfs(sbi);
3061 goto free_root_inode;
3064 /* Enable quota usage during mount */
3065 if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
3066 err = f2fs_enable_quotas(sb);
3068 f2fs_msg(sb, KERN_ERR,
3069 "Cannot turn on quotas: error %d", err);
3074 /* if there are nt orphan nodes free them */
3075 err = f2fs_recover_orphan_inodes(sbi);
3079 /* recover fsynced data */
3080 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
3082 * mount should be failed, when device has readonly mode, and
3083 * previous checkpoint was not done by clean system shutdown.
3085 if (bdev_read_only(sb->s_bdev) &&
3086 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3092 set_sbi_flag(sbi, SBI_NEED_FSCK);
3097 err = f2fs_recover_fsync_data(sbi, false);
3100 f2fs_msg(sb, KERN_ERR,
3101 "Cannot recover all fsync data errno=%d", err);
3105 err = f2fs_recover_fsync_data(sbi, true);
3107 if (!f2fs_readonly(sb) && err > 0) {
3109 f2fs_msg(sb, KERN_ERR,
3110 "Need to recover fsync data");
3115 /* f2fs_recover_fsync_data() cleared this already */
3116 clear_sbi_flag(sbi, SBI_POR_DOING);
3119 * If filesystem is not mounted as read-only then
3120 * do start the gc_thread.
3122 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
3123 /* After POR, we can run background GC thread.*/
3124 err = f2fs_start_gc_thread(sbi);
3130 /* recover broken superblock */
3132 err = f2fs_commit_super(sbi, true);
3133 f2fs_msg(sb, KERN_INFO,
3134 "Try to recover %dth superblock, ret: %d",
3135 sbi->valid_super_block ? 1 : 2, err);
3138 f2fs_join_shrinker(sbi);
3140 f2fs_tuning_parameters(sbi);
3142 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
3143 cur_cp_version(F2FS_CKPT(sbi)));
3144 f2fs_update_time(sbi, CP_TIME);
3145 f2fs_update_time(sbi, REQ_TIME);
3150 f2fs_truncate_quota_inode_pages(sb);
3151 if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
3152 f2fs_quota_off_umount(sbi->sb);
3155 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
3156 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
3157 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
3158 * falls into an infinite loop in f2fs_sync_meta_pages().
3160 truncate_inode_pages_final(META_MAPPING(sbi));
3164 f2fs_unregister_sysfs(sbi);
3169 f2fs_release_ino_entry(sbi, true);
3170 truncate_inode_pages_final(NODE_MAPPING(sbi));
3171 iput(sbi->node_inode);
3172 sbi->node_inode = NULL;
3174 f2fs_destroy_stats(sbi);
3176 f2fs_destroy_node_manager(sbi);
3178 f2fs_destroy_segment_manager(sbi);
3180 destroy_device_list(sbi);
3183 make_bad_inode(sbi->meta_inode);
3184 iput(sbi->meta_inode);
3185 sbi->meta_inode = NULL;
3187 mempool_destroy(sbi->write_io_dummy);
3189 destroy_percpu_info(sbi);
3191 for (i = 0; i < NR_PAGE_TYPE; i++)
3192 kfree(sbi->write_io[i]);
3195 for (i = 0; i < MAXQUOTAS; i++)
3196 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
3202 if (sbi->s_chksum_driver)
3203 crypto_free_shash(sbi->s_chksum_driver);
3206 /* give only one another chance */
3209 shrink_dcache_sb(sb);
3215 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
3216 const char *dev_name, void *data)
3218 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
3221 static void kill_f2fs_super(struct super_block *sb)
3224 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3226 set_sbi_flag(sbi, SBI_IS_CLOSE);
3227 f2fs_stop_gc_thread(sbi);
3228 f2fs_stop_discard_thread(sbi);
3230 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
3231 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3232 struct cp_control cpc = {
3233 .reason = CP_UMOUNT,
3235 f2fs_write_checkpoint(sbi, &cpc);
3238 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
3239 sb->s_flags &= ~SB_RDONLY;
3241 kill_block_super(sb);
3244 static struct file_system_type f2fs_fs_type = {
3245 .owner = THIS_MODULE,
3247 .mount = f2fs_mount,
3248 .kill_sb = kill_f2fs_super,
3249 .fs_flags = FS_REQUIRES_DEV,
3251 MODULE_ALIAS_FS("f2fs");
3253 static int __init init_inodecache(void)
3255 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
3256 sizeof(struct f2fs_inode_info), 0,
3257 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
3258 if (!f2fs_inode_cachep)
3263 static void destroy_inodecache(void)
3266 * Make sure all delayed rcu free inodes are flushed before we
3270 kmem_cache_destroy(f2fs_inode_cachep);
3273 static int __init init_f2fs_fs(void)
3277 if (PAGE_SIZE != F2FS_BLKSIZE) {
3278 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
3279 PAGE_SIZE, F2FS_BLKSIZE);
3283 f2fs_build_trace_ios();
3285 err = init_inodecache();
3288 err = f2fs_create_node_manager_caches();
3290 goto free_inodecache;
3291 err = f2fs_create_segment_manager_caches();
3293 goto free_node_manager_caches;
3294 err = f2fs_create_checkpoint_caches();
3296 goto free_segment_manager_caches;
3297 err = f2fs_create_extent_cache();
3299 goto free_checkpoint_caches;
3300 err = f2fs_init_sysfs();
3302 goto free_extent_cache;
3303 err = register_shrinker(&f2fs_shrinker_info);
3306 err = register_filesystem(&f2fs_fs_type);
3309 err = f2fs_create_root_stats();
3311 goto free_filesystem;
3312 err = f2fs_init_post_read_processing();
3314 goto free_root_stats;
3318 f2fs_destroy_root_stats();
3320 unregister_filesystem(&f2fs_fs_type);
3322 unregister_shrinker(&f2fs_shrinker_info);
3326 f2fs_destroy_extent_cache();
3327 free_checkpoint_caches:
3328 f2fs_destroy_checkpoint_caches();
3329 free_segment_manager_caches:
3330 f2fs_destroy_segment_manager_caches();
3331 free_node_manager_caches:
3332 f2fs_destroy_node_manager_caches();
3334 destroy_inodecache();
3339 static void __exit exit_f2fs_fs(void)
3341 f2fs_destroy_post_read_processing();
3342 f2fs_destroy_root_stats();
3343 unregister_filesystem(&f2fs_fs_type);
3344 unregister_shrinker(&f2fs_shrinker_info);
3346 f2fs_destroy_extent_cache();
3347 f2fs_destroy_checkpoint_caches();
3348 f2fs_destroy_segment_manager_caches();
3349 f2fs_destroy_node_manager_caches();
3350 destroy_inodecache();
3351 f2fs_destroy_trace_ios();
3354 module_init(init_f2fs_fs)
3355 module_exit(exit_f2fs_fs)
3357 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3358 MODULE_DESCRIPTION("Flash Friendly File System");
3359 MODULE_LICENSE("GPL");