1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/sched/mm.h>
22 #include <linux/vmalloc.h>
23 #include <linux/bio.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/part_stat.h>
27 #include <crypto/hash.h>
29 #include <linux/fscrypt.h>
30 #include <linux/fsverity.h>
34 #ifdef CONFIG_F2FS_CHECK_FS
35 #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
37 #define f2fs_bug_on(sbi, condition) \
39 if (WARN_ON(condition)) \
40 set_sbi_flag(sbi, SBI_NEED_FSCK); \
49 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */
67 #ifdef CONFIG_F2FS_FAULT_INJECTION
68 #define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0))
70 struct f2fs_fault_info {
72 unsigned int inject_rate;
73 unsigned int inject_type;
76 extern const char *f2fs_fault_name[FAULT_MAX];
77 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
83 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000001
84 #define F2FS_MOUNT_DISCARD 0x00000002
85 #define F2FS_MOUNT_NOHEAP 0x00000004
86 #define F2FS_MOUNT_XATTR_USER 0x00000008
87 #define F2FS_MOUNT_POSIX_ACL 0x00000010
88 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000020
89 #define F2FS_MOUNT_INLINE_XATTR 0x00000040
90 #define F2FS_MOUNT_INLINE_DATA 0x00000080
91 #define F2FS_MOUNT_INLINE_DENTRY 0x00000100
92 #define F2FS_MOUNT_FLUSH_MERGE 0x00000200
93 #define F2FS_MOUNT_NOBARRIER 0x00000400
94 #define F2FS_MOUNT_FASTBOOT 0x00000800
95 #define F2FS_MOUNT_READ_EXTENT_CACHE 0x00001000
96 #define F2FS_MOUNT_DATA_FLUSH 0x00002000
97 #define F2FS_MOUNT_FAULT_INJECTION 0x00004000
98 #define F2FS_MOUNT_USRQUOTA 0x00008000
99 #define F2FS_MOUNT_GRPQUOTA 0x00010000
100 #define F2FS_MOUNT_PRJQUOTA 0x00020000
101 #define F2FS_MOUNT_QUOTA 0x00040000
102 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00080000
103 #define F2FS_MOUNT_RESERVE_ROOT 0x00100000
104 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x00200000
105 #define F2FS_MOUNT_NORECOVERY 0x00400000
106 #define F2FS_MOUNT_ATGC 0x00800000
107 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x01000000
108 #define F2FS_MOUNT_GC_MERGE 0x02000000
109 #define F2FS_MOUNT_COMPRESS_CACHE 0x04000000
110 #define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000
112 #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
113 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
114 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
115 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
117 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
118 typecheck(unsigned long long, b) && \
119 ((long long)((a) - (b)) > 0))
121 typedef u32 block_t; /*
122 * should not change u32, since it is the on-disk block
123 * address format, __le32.
127 #define COMPRESS_EXT_NUM 16
130 * An implementation of an rwsem that is explicitly unfair to readers. This
131 * prevents priority inversion when a low-priority reader acquires the read lock
132 * while sleeping on the write lock but the write lock is needed by
133 * higher-priority clients.
137 struct rw_semaphore internal_rwsem;
138 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
139 wait_queue_head_t read_waiters;
143 struct f2fs_mount_info {
145 int write_io_size_bits; /* Write IO size bits */
146 block_t root_reserved_blocks; /* root reserved blocks */
147 kuid_t s_resuid; /* reserved blocks for uid */
148 kgid_t s_resgid; /* reserved blocks for gid */
149 int active_logs; /* # of active logs */
150 int inline_xattr_size; /* inline xattr size */
151 #ifdef CONFIG_F2FS_FAULT_INJECTION
152 struct f2fs_fault_info fault_info; /* For fault injection */
155 /* Names of quota files with journalled quota */
156 char *s_qf_names[MAXQUOTAS];
157 int s_jquota_fmt; /* Format of quota to use */
159 /* For which write hints are passed down to block layer */
160 int alloc_mode; /* segment allocation policy */
161 int fsync_mode; /* fsync policy */
162 int fs_mode; /* fs mode: LFS or ADAPTIVE */
163 int bggc_mode; /* bggc mode: off, on or sync */
164 int memory_mode; /* memory mode */
165 int errors; /* errors parameter */
167 * discard command's offset/size should
168 * be aligned to this unit: block,
171 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
172 block_t unusable_cap_perc; /* percentage for cap */
173 block_t unusable_cap; /* Amount of space allowed to be
174 * unusable when disabling checkpoint
177 /* For compression */
178 unsigned char compress_algorithm; /* algorithm type */
179 unsigned char compress_log_size; /* cluster log size */
180 unsigned char compress_level; /* compress level */
181 bool compress_chksum; /* compressed data chksum */
182 unsigned char compress_ext_cnt; /* extension count */
183 unsigned char nocompress_ext_cnt; /* nocompress extension count */
184 int compress_mode; /* compression mode */
185 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
186 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
189 #define F2FS_FEATURE_ENCRYPT 0x00000001
190 #define F2FS_FEATURE_BLKZONED 0x00000002
191 #define F2FS_FEATURE_ATOMIC_WRITE 0x00000004
192 #define F2FS_FEATURE_EXTRA_ATTR 0x00000008
193 #define F2FS_FEATURE_PRJQUOTA 0x00000010
194 #define F2FS_FEATURE_INODE_CHKSUM 0x00000020
195 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x00000040
196 #define F2FS_FEATURE_QUOTA_INO 0x00000080
197 #define F2FS_FEATURE_INODE_CRTIME 0x00000100
198 #define F2FS_FEATURE_LOST_FOUND 0x00000200
199 #define F2FS_FEATURE_VERITY 0x00000400
200 #define F2FS_FEATURE_SB_CHKSUM 0x00000800
201 #define F2FS_FEATURE_CASEFOLD 0x00001000
202 #define F2FS_FEATURE_COMPRESSION 0x00002000
203 #define F2FS_FEATURE_RO 0x00004000
205 #define __F2FS_HAS_FEATURE(raw_super, mask) \
206 ((raw_super->feature & cpu_to_le32(mask)) != 0)
207 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
210 * Default values for user and/or group using reserved blocks
212 #define F2FS_DEF_RESUID 0
213 #define F2FS_DEF_RESGID 0
216 * For checkpoint manager
223 #define CP_UMOUNT 0x00000001
224 #define CP_FASTBOOT 0x00000002
225 #define CP_SYNC 0x00000004
226 #define CP_RECOVERY 0x00000008
227 #define CP_DISCARD 0x00000010
228 #define CP_TRIMMED 0x00000020
229 #define CP_PAUSE 0x00000040
230 #define CP_RESIZE 0x00000080
232 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
233 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
234 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
235 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
236 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
237 #define DEF_CP_INTERVAL 60 /* 60 secs */
238 #define DEF_IDLE_INTERVAL 5 /* 5 secs */
239 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */
240 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
241 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
251 * indicate meta/data type
260 DATA_GENERIC, /* check range only */
261 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */
262 DATA_GENERIC_ENHANCE_READ, /*
263 * strong check on range and segment
264 * bitmap but no warning due to race
265 * condition of read on truncated area
268 DATA_GENERIC_ENHANCE_UPDATE, /*
269 * strong check on range and segment
270 * bitmap for update case
275 /* for the list of ino */
277 ORPHAN_INO, /* for orphan ino list */
278 APPEND_INO, /* for append ino list */
279 UPDATE_INO, /* for update ino list */
280 TRANS_DIR_INO, /* for transactions dir ino list */
281 FLUSH_INO, /* for multiple device flushing */
282 MAX_INO_ENTRY, /* max. list */
286 struct list_head list; /* list head */
287 nid_t ino; /* inode number */
288 unsigned int dirty_device; /* dirty device bitmap */
291 /* for the list of inodes to be GCed */
293 struct list_head list; /* list head */
294 struct inode *inode; /* vfs inode pointer */
297 struct fsync_node_entry {
298 struct list_head list; /* list head */
299 struct page *page; /* warm node page pointer */
300 unsigned int seq_id; /* sequence id */
304 struct completion wait; /* completion for checkpoint done */
305 struct llist_node llnode; /* llist_node to be linked in wait queue */
306 int ret; /* return code of checkpoint */
307 ktime_t queue_time; /* request queued time */
310 struct ckpt_req_control {
311 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
312 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
313 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
314 atomic_t issued_ckpt; /* # of actually issued ckpts */
315 atomic_t total_ckpt; /* # of total ckpts */
316 atomic_t queued_ckpt; /* # of queued ckpts */
317 struct llist_head issue_list; /* list for command issue */
318 spinlock_t stat_lock; /* lock for below checkpoint time stats */
319 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
320 unsigned int peak_time; /* peak wait time in msec until now */
323 /* for the bitmap indicate blocks to be discarded */
324 struct discard_entry {
325 struct list_head list; /* list head */
326 block_t start_blkaddr; /* start blockaddr of current segment */
327 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
330 /* minimum discard granularity, unit: block count */
331 #define MIN_DISCARD_GRANULARITY 1
332 /* default discard granularity of inner discard thread, unit: block count */
333 #define DEFAULT_DISCARD_GRANULARITY 16
334 /* default maximum discard granularity of ordered discard, unit: block count */
335 #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16
337 /* max discard pend list number */
338 #define MAX_PLIST_NUM 512
339 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
340 (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
343 D_PREP, /* initial */
344 D_PARTIAL, /* partially submitted */
345 D_SUBMIT, /* all submitted */
346 D_DONE, /* finished */
349 struct discard_info {
350 block_t lstart; /* logical start address */
351 block_t len; /* length */
352 block_t start; /* actual start address in dev */
356 struct rb_node rb_node; /* rb node located in rb-tree */
357 struct discard_info di; /* discard info */
358 struct list_head list; /* command list */
359 struct completion wait; /* compleation */
360 struct block_device *bdev; /* bdev */
361 unsigned short ref; /* reference count */
362 unsigned char state; /* state */
363 unsigned char queued; /* queued discard */
364 int error; /* bio error */
365 spinlock_t lock; /* for state/bio_ref updating */
366 unsigned short bio_ref; /* bio reference count */
377 struct discard_policy {
378 int type; /* type of discard */
379 unsigned int min_interval; /* used for candidates exist */
380 unsigned int mid_interval; /* used for device busy */
381 unsigned int max_interval; /* used for candidates not exist */
382 unsigned int max_requests; /* # of discards issued per round */
383 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
384 bool io_aware; /* issue discard in idle time */
385 bool sync; /* submit discard with REQ_SYNC flag */
386 bool ordered; /* issue discard by lba order */
387 bool timeout; /* discard timeout for put_super */
388 unsigned int granularity; /* discard granularity */
391 struct discard_cmd_control {
392 struct task_struct *f2fs_issue_discard; /* discard thread */
393 struct list_head entry_list; /* 4KB discard entry list */
394 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
395 struct list_head wait_list; /* store on-flushing entries */
396 struct list_head fstrim_list; /* in-flight discard from fstrim */
397 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
398 struct mutex cmd_lock;
399 unsigned int nr_discards; /* # of discards in the list */
400 unsigned int max_discards; /* max. discards to be issued */
401 unsigned int max_discard_request; /* max. discard request per round */
402 unsigned int min_discard_issue_time; /* min. interval between discard issue */
403 unsigned int mid_discard_issue_time; /* mid. interval between discard issue */
404 unsigned int max_discard_issue_time; /* max. interval between discard issue */
405 unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */
406 unsigned int discard_urgent_util; /* utilization which issue discard proactively */
407 unsigned int discard_granularity; /* discard granularity */
408 unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */
409 unsigned int undiscard_blks; /* # of undiscard blocks */
410 unsigned int next_pos; /* next discard position */
411 atomic_t issued_discard; /* # of issued discard */
412 atomic_t queued_discard; /* # of queued discard */
413 atomic_t discard_cmd_cnt; /* # of cached cmd count */
414 struct rb_root_cached root; /* root of discard rb-tree */
415 bool rbtree_check; /* config for consistence check */
416 bool discard_wake; /* to wake up discard thread */
419 /* for the list of fsync inodes, used only during recovery */
420 struct fsync_inode_entry {
421 struct list_head list; /* list head */
422 struct inode *inode; /* vfs inode pointer */
423 block_t blkaddr; /* block address locating the last fsync */
424 block_t last_dentry; /* block address locating the last dentry */
427 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
428 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
430 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
431 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
432 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
433 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
435 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
436 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
438 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
440 int before = nats_in_cursum(journal);
442 journal->n_nats = cpu_to_le16(before + i);
446 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
448 int before = sits_in_cursum(journal);
450 journal->n_sits = cpu_to_le16(before + i);
454 static inline bool __has_cursum_space(struct f2fs_journal *journal,
457 if (type == NAT_JOURNAL)
458 return size <= MAX_NAT_JENTRIES(journal);
459 return size <= MAX_SIT_JENTRIES(journal);
462 /* for inline stuff */
463 #define DEF_INLINE_RESERVED_SIZE 1
464 static inline int get_extra_isize(struct inode *inode);
465 static inline int get_inline_xattr_addrs(struct inode *inode);
466 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
467 (CUR_ADDRS_PER_INODE(inode) - \
468 get_inline_xattr_addrs(inode) - \
469 DEF_INLINE_RESERVED_SIZE))
472 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
473 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
475 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
476 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
477 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
478 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
479 NR_INLINE_DENTRY(inode) + \
480 INLINE_DENTRY_BITMAP_SIZE(inode)))
483 * For INODE and NODE manager
485 /* for directory operations */
487 struct f2fs_filename {
489 * The filename the user specified. This is NULL for some
490 * filesystem-internal operations, e.g. converting an inline directory
491 * to a non-inline one, or roll-forward recovering an encrypted dentry.
493 const struct qstr *usr_fname;
496 * The on-disk filename. For encrypted directories, this is encrypted.
497 * This may be NULL for lookups in an encrypted dir without the key.
499 struct fscrypt_str disk_name;
501 /* The dirhash of this filename */
504 #ifdef CONFIG_FS_ENCRYPTION
506 * For lookups in encrypted directories: either the buffer backing
507 * disk_name, or a buffer that holds the decoded no-key name.
509 struct fscrypt_str crypto_buf;
511 #if IS_ENABLED(CONFIG_UNICODE)
513 * For casefolded directories: the casefolded name, but it's left NULL
514 * if the original name is not valid Unicode, if the original name is
515 * "." or "..", if the directory is both casefolded and encrypted and
516 * its encryption key is unavailable, or if the filesystem is doing an
517 * internal operation where usr_fname is also NULL. In all these cases
518 * we fall back to treating the name as an opaque byte sequence.
520 struct fscrypt_str cf_name;
524 struct f2fs_dentry_ptr {
527 struct f2fs_dir_entry *dentry;
528 __u8 (*filename)[F2FS_SLOT_LEN];
533 static inline void make_dentry_ptr_block(struct inode *inode,
534 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
537 d->max = NR_DENTRY_IN_BLOCK;
538 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
539 d->bitmap = t->dentry_bitmap;
540 d->dentry = t->dentry;
541 d->filename = t->filename;
544 static inline void make_dentry_ptr_inline(struct inode *inode,
545 struct f2fs_dentry_ptr *d, void *t)
547 int entry_cnt = NR_INLINE_DENTRY(inode);
548 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
549 int reserved_size = INLINE_RESERVED_SIZE(inode);
553 d->nr_bitmap = bitmap_size;
555 d->dentry = t + bitmap_size + reserved_size;
556 d->filename = t + bitmap_size + reserved_size +
557 SIZE_OF_DIR_ENTRY * entry_cnt;
561 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
562 * as its node offset to distinguish from index node blocks.
563 * But some bits are used to mark the node block.
565 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
568 ALLOC_NODE, /* allocate a new node page if needed */
569 LOOKUP_NODE, /* look up a node without readahead */
571 * look up a node with readahead called
576 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */
578 /* congestion wait timeout value, default: 20ms */
579 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
581 /* maximum retry quota flush count */
582 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
584 /* maximum retry of EIO'ed page */
585 #define MAX_RETRY_PAGE_EIO 100
587 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
589 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
591 /* dirty segments threshold for triggering CP */
592 #define DEFAULT_DIRTY_THRESHOLD 4
594 #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS
595 #define RECOVERY_MIN_RA_BLOCKS 1
597 #define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */
599 /* for in-memory extent cache entry */
600 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
602 /* number of extent info in extent cache we try to shrink */
603 #define READ_EXTENT_CACHE_SHRINK_NUMBER 128
605 /* number of age extent info in extent cache we try to shrink */
606 #define AGE_EXTENT_CACHE_SHRINK_NUMBER 128
607 #define LAST_AGE_WEIGHT 30
608 #define SAME_AGE_REGION 1024
611 * Define data block with age less than 1GB as hot data
612 * define data block with age less than 10GB but more than 1GB as warm data
614 #define DEF_HOT_DATA_AGE_THRESHOLD 262144
615 #define DEF_WARM_DATA_AGE_THRESHOLD 2621440
617 /* extent cache type */
625 unsigned int fofs; /* start offset in a file */
626 unsigned int len; /* length of the extent */
628 /* read extent_cache */
630 /* start block address of the extent */
632 #ifdef CONFIG_F2FS_FS_COMPRESSION
633 /* physical extent length of compressed blocks */
637 /* block age extent_cache */
639 /* block age of the extent */
640 unsigned long long age;
641 /* last total blocks allocated */
642 unsigned long long last_blocks;
648 struct rb_node rb_node; /* rb node located in rb-tree */
649 struct extent_info ei; /* extent info */
650 struct list_head list; /* node in global extent list of sbi */
651 struct extent_tree *et; /* extent tree pointer */
655 nid_t ino; /* inode number */
656 enum extent_type type; /* keep the extent tree type */
657 struct rb_root_cached root; /* root of extent info rb-tree */
658 struct extent_node *cached_en; /* recently accessed extent node */
659 struct list_head list; /* to be used by sbi->zombie_list */
660 rwlock_t lock; /* protect extent info rb-tree */
661 atomic_t node_cnt; /* # of extent node in rb-tree*/
662 bool largest_updated; /* largest extent updated */
663 struct extent_info largest; /* largest cached extent for EX_READ */
666 struct extent_tree_info {
667 struct radix_tree_root extent_tree_root;/* cache extent cache entries */
668 struct mutex extent_tree_lock; /* locking extent radix tree */
669 struct list_head extent_list; /* lru list for shrinker */
670 spinlock_t extent_lock; /* locking extent lru list */
671 atomic_t total_ext_tree; /* extent tree count */
672 struct list_head zombie_list; /* extent zombie tree list */
673 atomic_t total_zombie_tree; /* extent zombie tree count */
674 atomic_t total_ext_node; /* extent info count */
678 * State of block returned by f2fs_map_blocks.
680 #define F2FS_MAP_NEW (1U << 0)
681 #define F2FS_MAP_MAPPED (1U << 1)
682 #define F2FS_MAP_DELALLOC (1U << 2)
683 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
686 struct f2fs_map_blocks {
687 struct block_device *m_bdev; /* for multi-device dio */
691 unsigned int m_flags;
692 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
693 pgoff_t *m_next_extent; /* point to next possible extent */
695 bool m_may_create; /* indicate it is from write path */
696 bool m_multidev_dio; /* indicate it allows multi-device dio */
699 /* for flag in get_data_block */
701 F2FS_GET_BLOCK_DEFAULT,
702 F2FS_GET_BLOCK_FIEMAP,
705 F2FS_GET_BLOCK_PRE_DIO,
706 F2FS_GET_BLOCK_PRE_AIO,
707 F2FS_GET_BLOCK_PRECACHE,
711 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
713 #define FADVISE_COLD_BIT 0x01
714 #define FADVISE_LOST_PINO_BIT 0x02
715 #define FADVISE_ENCRYPT_BIT 0x04
716 #define FADVISE_ENC_NAME_BIT 0x08
717 #define FADVISE_KEEP_SIZE_BIT 0x10
718 #define FADVISE_HOT_BIT 0x20
719 #define FADVISE_VERITY_BIT 0x40
720 #define FADVISE_TRUNC_BIT 0x80
722 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
724 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
725 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
726 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
728 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
729 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
730 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
732 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
733 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT)
735 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
736 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
738 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
739 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
741 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
742 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
743 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
745 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
746 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
748 #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT)
749 #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT)
750 #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT)
752 #define DEF_DIR_LEVEL 0
759 /* used for f2fs_inode_info->flags */
761 FI_NEW_INODE, /* indicate newly allocated inode */
762 FI_DIRTY_INODE, /* indicate inode is dirty or not */
763 FI_AUTO_RECOVER, /* indicate inode is recoverable */
764 FI_DIRTY_DIR, /* indicate directory has dirty pages */
765 FI_INC_LINK, /* need to increment i_nlink */
766 FI_ACL_MODE, /* indicate acl mode */
767 FI_NO_ALLOC, /* should not allocate any blocks */
768 FI_FREE_NID, /* free allocated nide */
769 FI_NO_EXTENT, /* not to use the extent cache */
770 FI_INLINE_XATTR, /* used for inline xattr */
771 FI_INLINE_DATA, /* used for inline data*/
772 FI_INLINE_DENTRY, /* used for inline dentry */
773 FI_APPEND_WRITE, /* inode has appended data */
774 FI_UPDATE_WRITE, /* inode has in-place-update data */
775 FI_NEED_IPU, /* used for ipu per file */
776 FI_ATOMIC_FILE, /* indicate atomic file */
777 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
778 FI_DROP_CACHE, /* drop dirty page cache */
779 FI_DATA_EXIST, /* indicate data exists */
780 FI_INLINE_DOTS, /* indicate inline dot dentries */
781 FI_SKIP_WRITES, /* should skip data page writeback */
782 FI_OPU_WRITE, /* used for opu per file */
783 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
784 FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */
785 FI_HOT_DATA, /* indicate file is hot */
786 FI_EXTRA_ATTR, /* indicate file has extra attribute */
787 FI_PROJ_INHERIT, /* indicate file inherits projectid */
788 FI_PIN_FILE, /* indicate file should not be gced */
789 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
790 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
791 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
792 FI_MMAP_FILE, /* indicate file was mmapped */
793 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
794 FI_COMPRESS_RELEASED, /* compressed blocks were released */
795 FI_ALIGNED_WRITE, /* enable aligned write */
796 FI_COW_FILE, /* indicate COW file */
797 FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
798 FI_ATOMIC_REPLACE, /* indicate atomic replace */
799 FI_MAX, /* max flag, never be used */
802 struct f2fs_inode_info {
803 struct inode vfs_inode; /* serve a vfs inode */
804 unsigned long i_flags; /* keep an inode flags for ioctl */
805 unsigned char i_advise; /* use to give file attribute hints */
806 unsigned char i_dir_level; /* use for dentry level for large dir */
807 unsigned int i_current_depth; /* only for directory depth */
808 /* for gc failure statistic */
809 unsigned int i_gc_failures[MAX_GC_FAILURE];
810 unsigned int i_pino; /* parent inode number */
811 umode_t i_acl_mode; /* keep file acl mode temporarily */
813 /* Use below internally in f2fs*/
814 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
815 struct f2fs_rwsem i_sem; /* protect fi info */
816 atomic_t dirty_pages; /* # of dirty pages */
817 f2fs_hash_t chash; /* hash value of given file name */
818 unsigned int clevel; /* maximum level of given file name */
819 struct task_struct *task; /* lookup and create consistency */
820 struct task_struct *cp_task; /* separate cp/wb IO stats*/
821 struct task_struct *wb_task; /* indicate inode is in context of writeback */
822 nid_t i_xattr_nid; /* node id that contains xattrs */
823 loff_t last_disk_size; /* lastly written file size */
824 spinlock_t i_size_lock; /* protect last_disk_size */
827 struct dquot *i_dquot[MAXQUOTAS];
829 /* quota space reservation, managed internally by quota code */
830 qsize_t i_reserved_quota;
832 struct list_head dirty_list; /* dirty list for dirs and files */
833 struct list_head gdirty_list; /* linked in global dirty list */
834 struct task_struct *atomic_write_task; /* store atomic write task */
835 struct extent_tree *extent_tree[NR_EXTENT_CACHES];
836 /* cached extent_tree entry */
837 struct inode *cow_inode; /* copy-on-write inode for atomic write */
839 /* avoid racing between foreground op and gc */
840 struct f2fs_rwsem i_gc_rwsem[2];
841 struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
843 int i_extra_isize; /* size of extra space located in i_addr */
844 kprojid_t i_projid; /* id for project quota */
845 int i_inline_xattr_size; /* inline xattr size */
846 struct timespec64 i_crtime; /* inode creation time */
847 struct timespec64 i_disk_time[3];/* inode disk times */
849 /* for file compress */
850 atomic_t i_compr_blocks; /* # of compressed blocks */
851 unsigned char i_compress_algorithm; /* algorithm type */
852 unsigned char i_log_cluster_size; /* log of cluster size */
853 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
854 unsigned char i_compress_flag; /* compress flag */
855 unsigned int i_cluster_size; /* cluster size */
857 unsigned int atomic_write_cnt;
858 loff_t original_i_size; /* original i_size before atomic write */
861 static inline void get_read_extent_info(struct extent_info *ext,
862 struct f2fs_extent *i_ext)
864 ext->fofs = le32_to_cpu(i_ext->fofs);
865 ext->blk = le32_to_cpu(i_ext->blk);
866 ext->len = le32_to_cpu(i_ext->len);
869 static inline void set_raw_read_extent(struct extent_info *ext,
870 struct f2fs_extent *i_ext)
872 i_ext->fofs = cpu_to_le32(ext->fofs);
873 i_ext->blk = cpu_to_le32(ext->blk);
874 i_ext->len = cpu_to_le32(ext->len);
877 static inline bool __is_discard_mergeable(struct discard_info *back,
878 struct discard_info *front, unsigned int max_len)
880 return (back->lstart + back->len == front->lstart) &&
881 (back->len + front->len <= max_len);
884 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
885 struct discard_info *back, unsigned int max_len)
887 return __is_discard_mergeable(back, cur, max_len);
890 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
891 struct discard_info *front, unsigned int max_len)
893 return __is_discard_mergeable(cur, front, max_len);
897 * For free nid management
900 FREE_NID, /* newly added to free nid list */
901 PREALLOC_NID, /* it is preallocated */
912 struct f2fs_nm_info {
913 block_t nat_blkaddr; /* base disk address of NAT */
914 nid_t max_nid; /* maximum possible node ids */
915 nid_t available_nids; /* # of available node ids */
916 nid_t next_scan_nid; /* the next nid to be scanned */
917 nid_t max_rf_node_blocks; /* max # of nodes for recovery */
918 unsigned int ram_thresh; /* control the memory footprint */
919 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
920 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
922 /* NAT cache management */
923 struct radix_tree_root nat_root;/* root of the nat entry cache */
924 struct radix_tree_root nat_set_root;/* root of the nat set cache */
925 struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
926 struct list_head nat_entries; /* cached nat entry list (clean) */
927 spinlock_t nat_list_lock; /* protect clean nat entry list */
928 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
929 unsigned int nat_blocks; /* # of nat blocks */
931 /* free node ids management */
932 struct radix_tree_root free_nid_root;/* root of the free_nid cache */
933 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
934 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
935 spinlock_t nid_list_lock; /* protect nid lists ops */
936 struct mutex build_lock; /* lock for build free nids */
937 unsigned char **free_nid_bitmap;
938 unsigned char *nat_block_bitmap;
939 unsigned short *free_nid_count; /* free nid count of NAT block */
942 char *nat_bitmap; /* NAT bitmap pointer */
944 unsigned int nat_bits_blocks; /* # of nat bits blocks */
945 unsigned char *nat_bits; /* NAT bits blocks */
946 unsigned char *full_nat_bits; /* full NAT pages */
947 unsigned char *empty_nat_bits; /* empty NAT pages */
948 #ifdef CONFIG_F2FS_CHECK_FS
949 char *nat_bitmap_mir; /* NAT bitmap mirror */
951 int bitmap_size; /* bitmap size */
955 * this structure is used as one of function parameters.
956 * all the information are dedicated to a given direct node block determined
957 * by the data offset in a file.
959 struct dnode_of_data {
960 struct inode *inode; /* vfs inode pointer */
961 struct page *inode_page; /* its inode page, NULL is possible */
962 struct page *node_page; /* cached direct node page */
963 nid_t nid; /* node id of the direct node block */
964 unsigned int ofs_in_node; /* data offset in the node page */
965 bool inode_page_locked; /* inode page is locked or not */
966 bool node_changed; /* is node block changed */
967 char cur_level; /* level of hole node page */
968 char max_level; /* level of current page located */
969 block_t data_blkaddr; /* block address of the node block */
972 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
973 struct page *ipage, struct page *npage, nid_t nid)
975 memset(dn, 0, sizeof(*dn));
977 dn->inode_page = ipage;
978 dn->node_page = npage;
985 * By default, there are 6 active log areas across the whole main area.
986 * When considering hot and cold data separation to reduce cleaning overhead,
987 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
989 * In the current design, you should not change the numbers intentionally.
990 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
991 * logs individually according to the underlying devices. (default: 6)
992 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
993 * data and 8 for node logs.
995 #define NR_CURSEG_DATA_TYPE (3)
996 #define NR_CURSEG_NODE_TYPE (3)
997 #define NR_CURSEG_INMEM_TYPE (2)
998 #define NR_CURSEG_RO_TYPE (2)
999 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
1000 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
1003 CURSEG_HOT_DATA = 0, /* directory entry blocks */
1004 CURSEG_WARM_DATA, /* data blocks */
1005 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
1006 CURSEG_HOT_NODE, /* direct node blocks of directory files */
1007 CURSEG_WARM_NODE, /* direct node blocks of normal files */
1008 CURSEG_COLD_NODE, /* indirect node blocks */
1009 NR_PERSISTENT_LOG, /* number of persistent log */
1010 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
1011 /* pinned file that needs consecutive block address */
1012 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */
1013 NO_CHECK_TYPE, /* number of persistent & inmem log */
1017 struct completion wait;
1018 struct llist_node llnode;
1023 struct flush_cmd_control {
1024 struct task_struct *f2fs_issue_flush; /* flush thread */
1025 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
1026 atomic_t issued_flush; /* # of issued flushes */
1027 atomic_t queued_flush; /* # of queued flushes */
1028 struct llist_head issue_list; /* list for command issue */
1029 struct llist_node *dispatch_list; /* list for command dispatch */
1032 struct f2fs_sm_info {
1033 struct sit_info *sit_info; /* whole segment information */
1034 struct free_segmap_info *free_info; /* free segment information */
1035 struct dirty_seglist_info *dirty_info; /* dirty segment information */
1036 struct curseg_info *curseg_array; /* active segment information */
1038 struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
1040 block_t seg0_blkaddr; /* block address of 0'th segment */
1041 block_t main_blkaddr; /* start block address of main area */
1042 block_t ssa_blkaddr; /* start block address of SSA area */
1044 unsigned int segment_count; /* total # of segments */
1045 unsigned int main_segments; /* # of segments in main area */
1046 unsigned int reserved_segments; /* # of reserved segments */
1047 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
1048 unsigned int ovp_segments; /* # of overprovision segments */
1050 /* a threshold to reclaim prefree segments */
1051 unsigned int rec_prefree_segments;
1053 struct list_head sit_entry_set; /* sit entry set list */
1055 unsigned int ipu_policy; /* in-place-update policy */
1056 unsigned int min_ipu_util; /* in-place-update threshold */
1057 unsigned int min_fsync_blocks; /* threshold for fsync */
1058 unsigned int min_seq_blocks; /* threshold for sequential blocks */
1059 unsigned int min_hot_blocks; /* threshold for hot block allocation */
1060 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
1062 /* for flush command control */
1063 struct flush_cmd_control *fcc_info;
1065 /* for discard command control */
1066 struct discard_cmd_control *dcc_info;
1073 * COUNT_TYPE for monitoring
1075 * f2fs monitors the number of several block types such as on-writeback,
1076 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1078 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1097 * The below are the page types of bios used in submit_bio().
1098 * The available types are:
1099 * DATA User data pages. It operates as async mode.
1100 * NODE Node pages. It operates as async mode.
1101 * META FS metadata pages such as SIT, NAT, CP.
1102 * NR_PAGE_TYPE The number of page types.
1103 * META_FLUSH Make sure the previous pages are written
1104 * with waiting the bio's completion
1105 * ... Only can be used with META.
1107 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
1110 NODE = 1, /* should not change this */
1114 IPU, /* the below types are used by tracepoints only. */
1119 HOT = 0, /* must be zero for meta bio */
1125 enum need_lock_type {
1131 enum cp_reason_type {
1147 APP_DIRECT_IO, /* app direct write IOs */
1148 APP_BUFFERED_IO, /* app buffered write IOs */
1149 APP_WRITE_IO, /* app write IOs */
1150 APP_MAPPED_IO, /* app mapped IOs */
1151 APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */
1152 APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */
1153 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
1154 FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */
1155 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
1156 FS_META_IO, /* meta IOs from kworker/reclaimer */
1157 FS_GC_DATA_IO, /* data IOs from forground gc */
1158 FS_GC_NODE_IO, /* node IOs from forground gc */
1159 FS_CP_DATA_IO, /* data IOs from checkpoint */
1160 FS_CP_NODE_IO, /* node IOs from checkpoint */
1161 FS_CP_META_IO, /* meta IOs from checkpoint */
1164 APP_DIRECT_READ_IO, /* app direct read IOs */
1165 APP_BUFFERED_READ_IO, /* app buffered read IOs */
1166 APP_READ_IO, /* app read IOs */
1167 APP_MAPPED_READ_IO, /* app mapped read IOs */
1168 APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */
1169 APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */
1170 FS_DATA_READ_IO, /* data read IOs */
1171 FS_GDATA_READ_IO, /* data read IOs from background gc */
1172 FS_CDATA_READ_IO, /* compressed data read IOs */
1173 FS_NODE_READ_IO, /* node read IOs */
1174 FS_META_READ_IO, /* meta read IOs */
1177 FS_DISCARD_IO, /* discard */
1178 FS_FLUSH_IO, /* flush */
1179 FS_ZONE_RESET_IO, /* zone reset */
1183 struct f2fs_io_info {
1184 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
1185 nid_t ino; /* inode number */
1186 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
1187 enum temp_type temp; /* contains HOT/WARM/COLD */
1188 enum req_op op; /* contains REQ_OP_ */
1189 blk_opf_t op_flags; /* req_flag_bits */
1190 block_t new_blkaddr; /* new block address to be written */
1191 block_t old_blkaddr; /* old block address before Cow */
1192 struct page *page; /* page to be written */
1193 struct page *encrypted_page; /* encrypted page */
1194 struct page *compressed_page; /* compressed page */
1195 struct list_head list; /* serialize IOs */
1196 unsigned int compr_blocks; /* # of compressed block addresses */
1197 unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */
1198 unsigned int version:8; /* version of the node */
1199 unsigned int submitted:1; /* indicate IO submission */
1200 unsigned int in_list:1; /* indicate fio is in io_list */
1201 unsigned int is_por:1; /* indicate IO is from recovery or not */
1202 unsigned int retry:1; /* need to reallocate block address */
1203 unsigned int encrypted:1; /* indicate file is encrypted */
1204 unsigned int post_read:1; /* require post read */
1205 enum iostat_type io_type; /* io type */
1206 struct writeback_control *io_wbc; /* writeback control */
1207 struct bio **bio; /* bio for ipu */
1208 sector_t *last_block; /* last block number in bio */
1213 struct list_head list;
1216 #define is_read_io(rw) ((rw) == READ)
1217 struct f2fs_bio_info {
1218 struct f2fs_sb_info *sbi; /* f2fs superblock */
1219 struct bio *bio; /* bios to merge */
1220 sector_t last_block_in_bio; /* last block number */
1221 struct f2fs_io_info fio; /* store buffered io info. */
1222 #ifdef CONFIG_BLK_DEV_ZONED
1223 struct completion zone_wait; /* condition value for the previous open zone to close */
1224 struct bio *zone_pending_bio; /* pending bio for the previous zone */
1225 void *bi_private; /* previous bi_private for pending bio */
1227 struct f2fs_rwsem io_rwsem; /* blocking op for bio */
1228 spinlock_t io_lock; /* serialize DATA/NODE IOs */
1229 struct list_head io_list; /* track fios */
1230 struct list_head bio_list; /* bio entry list head */
1231 struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
1234 #define FDEV(i) (sbi->devs[i])
1235 #define RDEV(i) (raw_super->devs[i])
1236 struct f2fs_dev_info {
1237 struct block_device *bdev;
1238 char path[MAX_PATH_LEN];
1239 unsigned int total_segments;
1242 #ifdef CONFIG_BLK_DEV_ZONED
1243 unsigned int nr_blkz; /* Total number of zones */
1244 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
1249 DIR_INODE, /* for dirty dir inode */
1250 FILE_INODE, /* for dirty regular/symlink inode */
1251 DIRTY_META, /* for all dirtied inode metadata */
1255 /* for inner inode cache management */
1256 struct inode_management {
1257 struct radix_tree_root ino_root; /* ino entry array */
1258 spinlock_t ino_lock; /* for ino entry lock */
1259 struct list_head ino_list; /* inode list head */
1260 unsigned long ino_num; /* number of entries */
1264 struct atgc_management {
1265 bool atgc_enabled; /* ATGC is enabled or not */
1266 struct rb_root_cached root; /* root of victim rb-tree */
1267 struct list_head victim_list; /* linked with all victim entries */
1268 unsigned int victim_count; /* victim count in rb-tree */
1269 unsigned int candidate_ratio; /* candidate ratio */
1270 unsigned int max_candidate_count; /* max candidate count */
1271 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */
1272 unsigned long long age_threshold; /* age threshold */
1275 struct f2fs_gc_control {
1276 unsigned int victim_segno; /* target victim segment number */
1277 int init_gc_type; /* FG_GC or BG_GC */
1278 bool no_bg_gc; /* check the space and stop bg_gc */
1279 bool should_migrate_blocks; /* should migrate blocks */
1280 bool err_gc_skipped; /* return EAGAIN if GC skipped */
1281 unsigned int nr_free_secs; /* # of free sections to do GC */
1285 * For s_flag in struct f2fs_sb_info
1286 * Modification on enum should be synchronized with s_flag array
1289 SBI_IS_DIRTY, /* dirty flag for checkpoint */
1290 SBI_IS_CLOSE, /* specify unmounting */
1291 SBI_NEED_FSCK, /* need fsck.f2fs to fix */
1292 SBI_POR_DOING, /* recovery is doing or not */
1293 SBI_NEED_SB_WRITE, /* need to recover superblock */
1294 SBI_NEED_CP, /* need to checkpoint */
1295 SBI_IS_SHUTDOWN, /* shutdown by ioctl */
1296 SBI_IS_RECOVERED, /* recovered orphan/data */
1297 SBI_CP_DISABLED, /* CP was disabled last mount */
1298 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
1299 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
1300 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
1301 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
1302 SBI_IS_RESIZEFS, /* resizefs is in process */
1303 SBI_IS_FREEZING, /* freezefs is in process */
1304 SBI_IS_WRITABLE, /* remove ro mountoption transiently */
1314 UMOUNT_DISCARD_TIMEOUT,
1318 /* Note that you need to keep synchronization with this gc_mode_names array */
1331 BGGC_MODE_ON, /* background gc is on */
1332 BGGC_MODE_OFF, /* background gc is off */
1334 * background gc is on, migrating blocks
1335 * like foreground gc
1340 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
1341 FS_MODE_LFS, /* use lfs allocation only */
1342 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */
1343 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */
1347 ALLOC_MODE_DEFAULT, /* stay default */
1348 ALLOC_MODE_REUSE, /* reuse segments as much as possible */
1352 FSYNC_MODE_POSIX, /* fsync follows posix semantics */
1353 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
1354 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
1359 * automatically compress compression
1363 * automatical compression is disabled.
1364 * user can control the file compression
1370 DISCARD_UNIT_BLOCK, /* basic discard unit is block */
1371 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */
1372 DISCARD_UNIT_SECTION, /* basic discard unit is section */
1376 MEMORY_MODE_NORMAL, /* memory mode for normal devices */
1377 MEMORY_MODE_LOW, /* memory mode for low memry devices */
1380 enum errors_option {
1381 MOUNT_ERRORS_READONLY, /* remount fs ro on errors */
1382 MOUNT_ERRORS_CONTINUE, /* continue on errors */
1383 MOUNT_ERRORS_PANIC, /* panic on errors */
1386 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1387 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1388 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1391 * Layout of f2fs page.private:
1393 * Layout A: lowest bit should be 1
1394 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1395 * bit 0 PAGE_PRIVATE_NOT_POINTER
1396 * bit 1 PAGE_PRIVATE_DUMMY_WRITE
1397 * bit 2 PAGE_PRIVATE_ONGOING_MIGRATION
1398 * bit 3 PAGE_PRIVATE_INLINE_INODE
1399 * bit 4 PAGE_PRIVATE_REF_RESOURCE
1400 * bit 5- f2fs private data
1402 * Layout B: lowest bit should be 0
1403 * page.private is a wrapped pointer.
1406 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
1407 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
1408 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
1409 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
1410 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
1414 /* For compression */
1415 enum compress_algorithm_type {
1423 enum compress_flag {
1428 #define COMPRESS_WATERMARK 20
1429 #define COMPRESS_PERCENT 20
1431 #define COMPRESS_DATA_RESERVED_SIZE 4
1432 struct compress_data {
1433 __le32 clen; /* compressed data size */
1434 __le32 chksum; /* compressed data chksum */
1435 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
1436 u8 cdata[]; /* compressed data */
1439 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
1441 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
1443 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
1445 #define COMPRESS_LEVEL_OFFSET 8
1447 /* compress context */
1448 struct compress_ctx {
1449 struct inode *inode; /* inode the context belong to */
1450 pgoff_t cluster_idx; /* cluster index number */
1451 unsigned int cluster_size; /* page count in cluster */
1452 unsigned int log_cluster_size; /* log of cluster size */
1453 struct page **rpages; /* pages store raw data in cluster */
1454 unsigned int nr_rpages; /* total page number in rpages */
1455 struct page **cpages; /* pages store compressed data in cluster */
1456 unsigned int nr_cpages; /* total page number in cpages */
1457 unsigned int valid_nr_cpages; /* valid page number in cpages */
1458 void *rbuf; /* virtual mapped address on rpages */
1459 struct compress_data *cbuf; /* virtual mapped address on cpages */
1460 size_t rlen; /* valid data length in rbuf */
1461 size_t clen; /* valid data length in cbuf */
1462 void *private; /* payload buffer for specified compression algorithm */
1463 void *private2; /* extra payload buffer */
1466 /* compress context for write IO path */
1467 struct compress_io_ctx {
1468 u32 magic; /* magic number to indicate page is compressed */
1469 struct inode *inode; /* inode the context belong to */
1470 struct page **rpages; /* pages store raw data in cluster */
1471 unsigned int nr_rpages; /* total page number in rpages */
1472 atomic_t pending_pages; /* in-flight compressed page count */
1475 /* Context for decompressing one cluster on the read IO path */
1476 struct decompress_io_ctx {
1477 u32 magic; /* magic number to indicate page is compressed */
1478 struct inode *inode; /* inode the context belong to */
1479 pgoff_t cluster_idx; /* cluster index number */
1480 unsigned int cluster_size; /* page count in cluster */
1481 unsigned int log_cluster_size; /* log of cluster size */
1482 struct page **rpages; /* pages store raw data in cluster */
1483 unsigned int nr_rpages; /* total page number in rpages */
1484 struct page **cpages; /* pages store compressed data in cluster */
1485 unsigned int nr_cpages; /* total page number in cpages */
1486 struct page **tpages; /* temp pages to pad holes in cluster */
1487 void *rbuf; /* virtual mapped address on rpages */
1488 struct compress_data *cbuf; /* virtual mapped address on cpages */
1489 size_t rlen; /* valid data length in rbuf */
1490 size_t clen; /* valid data length in cbuf */
1493 * The number of compressed pages remaining to be read in this cluster.
1494 * This is initially nr_cpages. It is decremented by 1 each time a page
1495 * has been read (or failed to be read). When it reaches 0, the cluster
1496 * is decompressed (or an error is reported).
1498 * If an error occurs before all the pages have been submitted for I/O,
1499 * then this will never reach 0. In this case the I/O submitter is
1500 * responsible for calling f2fs_decompress_end_io() instead.
1502 atomic_t remaining_pages;
1505 * Number of references to this decompress_io_ctx.
1507 * One reference is held for I/O completion. This reference is dropped
1508 * after the pagecache pages are updated and unlocked -- either after
1509 * decompression (and verity if enabled), or after an error.
1511 * In addition, each compressed page holds a reference while it is in a
1512 * bio. These references are necessary prevent compressed pages from
1513 * being freed while they are still in a bio.
1517 bool failed; /* IO error occurred before decompression? */
1518 bool need_verity; /* need fs-verity verification after decompression? */
1519 void *private; /* payload buffer for specified decompression algorithm */
1520 void *private2; /* extra payload buffer */
1521 struct work_struct verity_work; /* work to verify the decompressed pages */
1522 struct work_struct free_work; /* work for late free this structure itself */
1525 #define NULL_CLUSTER ((unsigned int)(~0))
1526 #define MIN_COMPRESS_LOG_SIZE 2
1527 #define MAX_COMPRESS_LOG_SIZE 8
1528 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
1530 struct f2fs_sb_info {
1531 struct super_block *sb; /* pointer to VFS super block */
1532 struct proc_dir_entry *s_proc; /* proc entry */
1533 struct f2fs_super_block *raw_super; /* raw super block pointer */
1534 struct f2fs_rwsem sb_lock; /* lock for raw super block */
1535 int valid_super_block; /* valid super block no */
1536 unsigned long s_flag; /* flags for sbi */
1537 struct mutex writepages; /* mutex for writepages() */
1539 #ifdef CONFIG_BLK_DEV_ZONED
1540 unsigned int blocks_per_blkz; /* F2FS blocks per zone */
1543 /* for node-related operations */
1544 struct f2fs_nm_info *nm_info; /* node manager */
1545 struct inode *node_inode; /* cache node blocks */
1547 /* for segment-related operations */
1548 struct f2fs_sm_info *sm_info; /* segment manager */
1550 /* for bio operations */
1551 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
1552 /* keep migration IO order for LFS mode */
1553 struct f2fs_rwsem io_order_lock;
1554 mempool_t *write_io_dummy; /* Dummy pages */
1555 pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */
1556 int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */
1558 /* for checkpoint */
1559 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
1560 int cur_cp_pack; /* remain current cp pack */
1561 spinlock_t cp_lock; /* for flag in ckpt */
1562 struct inode *meta_inode; /* cache meta blocks */
1563 struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
1564 struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
1565 struct f2fs_rwsem node_write; /* locking node writes */
1566 struct f2fs_rwsem node_change; /* locking node change */
1567 wait_queue_head_t cp_wait;
1568 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
1569 long interval_time[MAX_TIME]; /* to store thresholds */
1570 struct ckpt_req_control cprc_info; /* for checkpoint request control */
1572 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
1574 spinlock_t fsync_node_lock; /* for node entry lock */
1575 struct list_head fsync_node_list; /* node list head */
1576 unsigned int fsync_seg_id; /* sequence id */
1577 unsigned int fsync_node_num; /* number of node entries */
1579 /* for orphan inode, use 0'th array */
1580 unsigned int max_orphans; /* max orphan inodes */
1582 /* for inode management */
1583 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
1584 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
1585 struct mutex flush_lock; /* for flush exclusion */
1587 /* for extent tree cache */
1588 struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
1589 atomic64_t allocated_data_blocks; /* for block age extent_cache */
1591 /* The threshold used for hot and warm data seperation*/
1592 unsigned int hot_data_age_threshold;
1593 unsigned int warm_data_age_threshold;
1594 unsigned int last_age_weight;
1596 /* basic filesystem units */
1597 unsigned int log_sectors_per_block; /* log2 sectors per block */
1598 unsigned int log_blocksize; /* log2 block size */
1599 unsigned int blocksize; /* block size */
1600 unsigned int root_ino_num; /* root inode number*/
1601 unsigned int node_ino_num; /* node inode number*/
1602 unsigned int meta_ino_num; /* meta inode number*/
1603 unsigned int log_blocks_per_seg; /* log2 blocks per segment */
1604 unsigned int blocks_per_seg; /* blocks per segment */
1605 unsigned int unusable_blocks_per_sec; /* unusable blocks per section */
1606 unsigned int segs_per_sec; /* segments per section */
1607 unsigned int secs_per_zone; /* sections per zone */
1608 unsigned int total_sections; /* total section count */
1609 unsigned int total_node_count; /* total node block count */
1610 unsigned int total_valid_node_count; /* valid node block count */
1611 int dir_level; /* directory level */
1612 bool readdir_ra; /* readahead inode in readdir */
1613 u64 max_io_bytes; /* max io bytes to merge IOs */
1615 block_t user_block_count; /* # of user blocks */
1616 block_t total_valid_block_count; /* # of valid blocks */
1617 block_t discard_blks; /* discard command candidats */
1618 block_t last_valid_block_count; /* for recovery */
1619 block_t reserved_blocks; /* configurable reserved blocks */
1620 block_t current_reserved_blocks; /* current reserved blocks */
1622 /* Additional tracking for no checkpoint mode */
1623 block_t unusable_block_count; /* # of blocks saved by last cp */
1625 unsigned int nquota_files; /* # of quota sysfile */
1626 struct f2fs_rwsem quota_sem; /* blocking cp for flags */
1628 /* # of pages, see count_type */
1629 atomic_t nr_pages[NR_COUNT_TYPE];
1630 /* # of allocated blocks */
1631 struct percpu_counter alloc_valid_block_count;
1632 /* # of node block writes as roll forward recovery */
1633 struct percpu_counter rf_node_block_count;
1635 /* writeback control */
1636 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
1638 /* valid inode count */
1639 struct percpu_counter total_valid_inode_count;
1641 struct f2fs_mount_info mount_opt; /* mount options */
1643 /* for cleaning operations */
1644 struct f2fs_rwsem gc_lock; /*
1645 * semaphore for GC, avoid
1646 * race between GC and GC or CP
1648 struct f2fs_gc_kthread *gc_thread; /* GC thread */
1649 struct atgc_management am; /* atgc management */
1650 unsigned int cur_victim_sec; /* current victim section num */
1651 unsigned int gc_mode; /* current GC state */
1652 unsigned int next_victim_seg[2]; /* next segment in victim section */
1653 spinlock_t gc_remaining_trials_lock;
1654 /* remaining trial count for GC_URGENT_* and GC_IDLE_* */
1655 unsigned int gc_remaining_trials;
1657 /* for skip statistic */
1658 unsigned long long skipped_gc_rwsem; /* FG_GC only */
1660 /* threshold for gc trials on pinned files */
1661 u64 gc_pin_file_threshold;
1662 struct f2fs_rwsem pin_sem;
1664 /* maximum # of trials to find a victim segment for SSR and GC */
1665 unsigned int max_victim_search;
1666 /* migration granularity of garbage collection, unit: segment */
1667 unsigned int migration_granularity;
1670 * for stat information.
1671 * one is for the LFS mode, and the other is for the SSR mode.
1673 #ifdef CONFIG_F2FS_STAT_FS
1674 struct f2fs_stat_info *stat_info; /* FS status information */
1675 atomic_t meta_count[META_MAX]; /* # of meta blocks */
1676 unsigned int segment_count[2]; /* # of allocated segments */
1677 unsigned int block_count[2]; /* # of allocated blocks */
1678 atomic_t inplace_count; /* # of inplace update */
1679 /* # of lookup extent cache */
1680 atomic64_t total_hit_ext[NR_EXTENT_CACHES];
1681 /* # of hit rbtree extent node */
1682 atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
1683 /* # of hit cached extent node */
1684 atomic64_t read_hit_cached[NR_EXTENT_CACHES];
1685 /* # of hit largest extent node in read extent cache */
1686 atomic64_t read_hit_largest;
1687 atomic_t inline_xattr; /* # of inline_xattr inodes */
1688 atomic_t inline_inode; /* # of inline_data inodes */
1689 atomic_t inline_dir; /* # of inline_dentry inodes */
1690 atomic_t compr_inode; /* # of compressed inodes */
1691 atomic64_t compr_blocks; /* # of compressed blocks */
1692 atomic_t swapfile_inode; /* # of swapfile inodes */
1693 atomic_t atomic_files; /* # of opened atomic file */
1694 atomic_t max_aw_cnt; /* max # of atomic writes */
1695 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
1696 unsigned int other_skip_bggc; /* skip background gc for other reasons */
1697 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
1699 spinlock_t stat_lock; /* lock for stat operations */
1701 /* to attach REQ_META|REQ_FUA flags */
1702 unsigned int data_io_flag;
1703 unsigned int node_io_flag;
1705 /* For sysfs support */
1706 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
1707 struct completion s_kobj_unregister;
1709 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
1710 struct completion s_stat_kobj_unregister;
1712 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */
1713 struct completion s_feature_list_kobj_unregister;
1715 /* For shrinker support */
1716 struct list_head s_list;
1717 struct mutex umount_mutex;
1718 unsigned int shrinker_run_no;
1720 /* For multi devices */
1721 int s_ndevs; /* number of devices */
1722 struct f2fs_dev_info *devs; /* for device list */
1723 unsigned int dirty_device; /* for checkpoint data flush */
1724 spinlock_t dev_lock; /* protect dirty_device */
1725 bool aligned_blksize; /* all devices has the same logical blksize */
1727 /* For write statistics */
1728 u64 sectors_written_start;
1731 /* Reference to checksum algorithm driver via cryptoapi */
1732 struct crypto_shash *s_chksum_driver;
1734 /* Precomputed FS UUID checksum for seeding other checksums */
1735 __u32 s_chksum_seed;
1737 struct workqueue_struct *post_read_wq; /* post read workqueue */
1740 * If we are in irq context, let's update error information into
1741 * on-disk superblock in the work.
1743 struct work_struct s_error_work;
1744 unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */
1745 unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */
1746 spinlock_t error_lock; /* protect errors/stop_reason array */
1747 bool error_dirty; /* errors of sb is dirty */
1749 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
1750 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
1752 /* For reclaimed segs statistics per each GC mode */
1753 unsigned int gc_segment_mode; /* GC state for reclaimed segments */
1754 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */
1756 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */
1758 int max_fragment_chunk; /* max chunk size for block fragmentation mode */
1759 int max_fragment_hole; /* max hole size for block fragmentation mode */
1761 /* For atomic write statistics */
1762 atomic64_t current_atomic_write;
1763 s64 peak_atomic_write;
1764 u64 committed_atomic_block;
1765 u64 revoked_atomic_block;
1767 #ifdef CONFIG_F2FS_FS_COMPRESSION
1768 struct kmem_cache *page_array_slab; /* page array entry */
1769 unsigned int page_array_slab_size; /* default page array slab size */
1771 /* For runtime compression statistics */
1772 u64 compr_written_block;
1773 u64 compr_saved_block;
1774 u32 compr_new_inode;
1776 /* For compressed block cache */
1777 struct inode *compress_inode; /* cache compressed blocks */
1778 unsigned int compress_percent; /* cache page percentage */
1779 unsigned int compress_watermark; /* cache page watermark */
1780 atomic_t compress_page_hit; /* cache hit count */
1783 #ifdef CONFIG_F2FS_IOSTAT
1784 /* For app/fs IO statistics */
1785 spinlock_t iostat_lock;
1786 unsigned long long iostat_count[NR_IO_TYPE];
1787 unsigned long long iostat_bytes[NR_IO_TYPE];
1788 unsigned long long prev_iostat_bytes[NR_IO_TYPE];
1790 unsigned long iostat_next_period;
1791 unsigned int iostat_period_ms;
1793 /* For io latency related statistics info in one iostat period */
1794 spinlock_t iostat_lat_lock;
1795 struct iostat_lat_info *iostat_io_lat;
1799 #ifdef CONFIG_F2FS_FAULT_INJECTION
1800 #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \
1801 __builtin_return_address(0))
1802 static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
1803 const char *func, const char *parent_func)
1805 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1807 if (!ffi->inject_rate)
1810 if (!IS_FAULT_SET(ffi, type))
1813 atomic_inc(&ffi->inject_ops);
1814 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1815 atomic_set(&ffi->inject_ops, 0);
1816 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",
1817 KERN_INFO, sbi->sb->s_id, f2fs_fault_name[type],
1824 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1831 * Test if the mounted volume is a multi-device volume.
1832 * - For a single regular disk volume, sbi->s_ndevs is 0.
1833 * - For a single zoned disk volume, sbi->s_ndevs is 1.
1834 * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1836 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1838 return sbi->s_ndevs > 1;
1841 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1843 unsigned long now = jiffies;
1845 sbi->last_time[type] = now;
1847 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1848 if (type == REQ_TIME) {
1849 sbi->last_time[DISCARD_TIME] = now;
1850 sbi->last_time[GC_TIME] = now;
1854 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1856 unsigned long interval = sbi->interval_time[type] * HZ;
1858 return time_after(jiffies, sbi->last_time[type] + interval);
1861 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1864 unsigned long interval = sbi->interval_time[type] * HZ;
1865 unsigned int wait_ms = 0;
1868 delta = (sbi->last_time[type] + interval) - jiffies;
1870 wait_ms = jiffies_to_msecs(delta);
1878 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1879 const void *address, unsigned int length)
1882 struct shash_desc shash;
1887 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1889 desc.shash.tfm = sbi->s_chksum_driver;
1890 *(u32 *)desc.ctx = crc;
1892 err = crypto_shash_update(&desc.shash, address, length);
1895 return *(u32 *)desc.ctx;
1898 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1899 unsigned int length)
1901 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1904 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1905 void *buf, size_t buf_size)
1907 return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1910 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1911 const void *address, unsigned int length)
1913 return __f2fs_crc32(sbi, crc, address, length);
1916 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1918 return container_of(inode, struct f2fs_inode_info, vfs_inode);
1921 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1923 return sb->s_fs_info;
1926 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1928 return F2FS_SB(inode->i_sb);
1931 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1933 return F2FS_I_SB(mapping->host);
1936 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1938 return F2FS_M_SB(page_file_mapping(page));
1941 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1943 return (struct f2fs_super_block *)(sbi->raw_super);
1946 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1948 return (struct f2fs_checkpoint *)(sbi->ckpt);
1951 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1953 return (struct f2fs_node *)page_address(page);
1956 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1958 return &((struct f2fs_node *)page_address(page))->i;
1961 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1963 return (struct f2fs_nm_info *)(sbi->nm_info);
1966 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1968 return (struct f2fs_sm_info *)(sbi->sm_info);
1971 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1973 return (struct sit_info *)(SM_I(sbi)->sit_info);
1976 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1978 return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1981 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1983 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1986 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1988 return sbi->meta_inode->i_mapping;
1991 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
1993 return sbi->node_inode->i_mapping;
1996 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
1998 return test_bit(type, &sbi->s_flag);
2001 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2003 set_bit(type, &sbi->s_flag);
2006 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2008 clear_bit(type, &sbi->s_flag);
2011 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2013 return le64_to_cpu(cp->checkpoint_ver);
2016 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2018 if (type < F2FS_MAX_QUOTAS)
2019 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2023 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2025 size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2026 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2029 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2031 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2033 return ckpt_flags & f;
2036 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2038 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2041 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2043 unsigned int ckpt_flags;
2045 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2047 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2050 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2052 unsigned long flags;
2054 spin_lock_irqsave(&sbi->cp_lock, flags);
2055 __set_ckpt_flags(F2FS_CKPT(sbi), f);
2056 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2059 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2061 unsigned int ckpt_flags;
2063 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2065 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2068 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2070 unsigned long flags;
2072 spin_lock_irqsave(&sbi->cp_lock, flags);
2073 __clear_ckpt_flags(F2FS_CKPT(sbi), f);
2074 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2077 #define init_f2fs_rwsem(sem) \
2079 static struct lock_class_key __key; \
2081 __init_f2fs_rwsem((sem), #sem, &__key); \
2084 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
2085 const char *sem_name, struct lock_class_key *key)
2087 __init_rwsem(&sem->internal_rwsem, sem_name, key);
2088 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2089 init_waitqueue_head(&sem->read_waiters);
2093 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
2095 return rwsem_is_locked(&sem->internal_rwsem);
2098 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
2100 return rwsem_is_contended(&sem->internal_rwsem);
2103 static inline void f2fs_down_read(struct f2fs_rwsem *sem)
2105 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2106 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
2108 down_read(&sem->internal_rwsem);
2112 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
2114 return down_read_trylock(&sem->internal_rwsem);
2117 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2118 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
2120 down_read_nested(&sem->internal_rwsem, subclass);
2123 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
2126 static inline void f2fs_up_read(struct f2fs_rwsem *sem)
2128 up_read(&sem->internal_rwsem);
2131 static inline void f2fs_down_write(struct f2fs_rwsem *sem)
2133 down_write(&sem->internal_rwsem);
2136 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
2138 return down_write_trylock(&sem->internal_rwsem);
2141 static inline void f2fs_up_write(struct f2fs_rwsem *sem)
2143 up_write(&sem->internal_rwsem);
2144 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2145 wake_up_all(&sem->read_waiters);
2149 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2151 f2fs_down_read(&sbi->cp_rwsem);
2154 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2156 if (time_to_inject(sbi, FAULT_LOCK_OP))
2158 return f2fs_down_read_trylock(&sbi->cp_rwsem);
2161 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2163 f2fs_up_read(&sbi->cp_rwsem);
2166 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2168 f2fs_down_write(&sbi->cp_rwsem);
2171 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2173 f2fs_up_write(&sbi->cp_rwsem);
2176 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2178 int reason = CP_SYNC;
2180 if (test_opt(sbi, FASTBOOT))
2181 reason = CP_FASTBOOT;
2182 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2187 static inline bool __remain_node_summaries(int reason)
2189 return (reason & (CP_UMOUNT | CP_FASTBOOT));
2192 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2194 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2195 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2199 * Check whether the inode has blocks or not
2201 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2203 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2205 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2208 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2210 return ofs == XATTR_NODE_OFFSET;
2213 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2214 struct inode *inode, bool cap)
2218 if (!test_opt(sbi, RESERVE_ROOT))
2220 if (IS_NOQUOTA(inode))
2222 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2224 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2225 in_group_p(F2FS_OPTION(sbi).s_resgid))
2227 if (cap && capable(CAP_SYS_RESOURCE))
2232 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
2233 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2234 struct inode *inode, blkcnt_t *count)
2236 blkcnt_t diff = 0, release = 0;
2237 block_t avail_user_block_count;
2240 ret = dquot_reserve_block(inode, *count);
2244 if (time_to_inject(sbi, FAULT_BLOCK)) {
2250 * let's increase this in prior to actual block count change in order
2251 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2253 percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2255 spin_lock(&sbi->stat_lock);
2256 sbi->total_valid_block_count += (block_t)(*count);
2257 avail_user_block_count = sbi->user_block_count -
2258 sbi->current_reserved_blocks;
2260 if (!__allow_reserved_blocks(sbi, inode, true))
2261 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2263 if (F2FS_IO_ALIGNED(sbi))
2264 avail_user_block_count -= sbi->blocks_per_seg *
2265 SM_I(sbi)->additional_reserved_segments;
2267 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2268 if (avail_user_block_count > sbi->unusable_block_count)
2269 avail_user_block_count -= sbi->unusable_block_count;
2271 avail_user_block_count = 0;
2273 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2274 diff = sbi->total_valid_block_count - avail_user_block_count;
2279 sbi->total_valid_block_count -= diff;
2281 spin_unlock(&sbi->stat_lock);
2285 spin_unlock(&sbi->stat_lock);
2287 if (unlikely(release)) {
2288 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2289 dquot_release_reservation_block(inode, release);
2291 f2fs_i_blocks_write(inode, *count, true, true);
2295 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2297 dquot_release_reservation_block(inode, release);
2302 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2304 #define f2fs_err(sbi, fmt, ...) \
2305 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2306 #define f2fs_warn(sbi, fmt, ...) \
2307 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2308 #define f2fs_notice(sbi, fmt, ...) \
2309 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2310 #define f2fs_info(sbi, fmt, ...) \
2311 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2312 #define f2fs_debug(sbi, fmt, ...) \
2313 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2315 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
2316 static inline bool page_private_##name(struct page *page) \
2318 return PagePrivate(page) && \
2319 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
2320 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2323 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
2324 static inline void set_page_private_##name(struct page *page) \
2326 if (!PagePrivate(page)) \
2327 attach_page_private(page, (void *)0); \
2328 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
2329 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2332 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
2333 static inline void clear_page_private_##name(struct page *page) \
2335 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2336 if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
2337 detach_page_private(page); \
2340 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
2341 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
2342 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
2343 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
2345 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
2346 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
2347 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
2348 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
2350 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
2351 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
2352 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
2353 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
2355 static inline unsigned long get_page_private_data(struct page *page)
2357 unsigned long data = page_private(page);
2359 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
2361 return data >> PAGE_PRIVATE_MAX;
2364 static inline void set_page_private_data(struct page *page, unsigned long data)
2366 if (!PagePrivate(page))
2367 attach_page_private(page, (void *)0);
2368 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
2369 page_private(page) |= data << PAGE_PRIVATE_MAX;
2372 static inline void clear_page_private_data(struct page *page)
2374 page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
2375 if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER))
2376 detach_page_private(page);
2379 static inline void clear_page_private_all(struct page *page)
2381 clear_page_private_data(page);
2382 clear_page_private_reference(page);
2383 clear_page_private_gcing(page);
2384 clear_page_private_inline(page);
2386 f2fs_bug_on(F2FS_P_SB(page), page_private(page));
2389 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2390 struct inode *inode,
2393 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2395 spin_lock(&sbi->stat_lock);
2396 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2397 sbi->total_valid_block_count -= (block_t)count;
2398 if (sbi->reserved_blocks &&
2399 sbi->current_reserved_blocks < sbi->reserved_blocks)
2400 sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2401 sbi->current_reserved_blocks + count);
2402 spin_unlock(&sbi->stat_lock);
2403 if (unlikely(inode->i_blocks < sectors)) {
2404 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2406 (unsigned long long)inode->i_blocks,
2407 (unsigned long long)sectors);
2408 set_sbi_flag(sbi, SBI_NEED_FSCK);
2411 f2fs_i_blocks_write(inode, count, false, true);
2414 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2416 atomic_inc(&sbi->nr_pages[count_type]);
2418 if (count_type == F2FS_DIRTY_DENTS ||
2419 count_type == F2FS_DIRTY_NODES ||
2420 count_type == F2FS_DIRTY_META ||
2421 count_type == F2FS_DIRTY_QDATA ||
2422 count_type == F2FS_DIRTY_IMETA)
2423 set_sbi_flag(sbi, SBI_IS_DIRTY);
2426 static inline void inode_inc_dirty_pages(struct inode *inode)
2428 atomic_inc(&F2FS_I(inode)->dirty_pages);
2429 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2430 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2431 if (IS_NOQUOTA(inode))
2432 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2435 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2437 atomic_dec(&sbi->nr_pages[count_type]);
2440 static inline void inode_dec_dirty_pages(struct inode *inode)
2442 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2443 !S_ISLNK(inode->i_mode))
2446 atomic_dec(&F2FS_I(inode)->dirty_pages);
2447 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2448 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2449 if (IS_NOQUOTA(inode))
2450 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2453 static inline void inc_atomic_write_cnt(struct inode *inode)
2455 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2456 struct f2fs_inode_info *fi = F2FS_I(inode);
2459 fi->atomic_write_cnt++;
2460 atomic64_inc(&sbi->current_atomic_write);
2461 current_write = atomic64_read(&sbi->current_atomic_write);
2462 if (current_write > sbi->peak_atomic_write)
2463 sbi->peak_atomic_write = current_write;
2466 static inline void release_atomic_write_cnt(struct inode *inode)
2468 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2469 struct f2fs_inode_info *fi = F2FS_I(inode);
2471 atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write);
2472 fi->atomic_write_cnt = 0;
2475 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2477 return atomic_read(&sbi->nr_pages[count_type]);
2480 static inline int get_dirty_pages(struct inode *inode)
2482 return atomic_read(&F2FS_I(inode)->dirty_pages);
2485 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2487 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2488 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2489 sbi->log_blocks_per_seg;
2491 return segs / sbi->segs_per_sec;
2494 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2496 return sbi->total_valid_block_count;
2499 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2501 return sbi->discard_blks;
2504 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2506 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2508 /* return NAT or SIT bitmap */
2509 if (flag == NAT_BITMAP)
2510 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2511 else if (flag == SIT_BITMAP)
2512 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2517 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2519 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2522 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2524 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2525 void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2528 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2529 offset = (flag == SIT_BITMAP) ?
2530 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2532 * if large_nat_bitmap feature is enabled, leave checksum
2533 * protection for all nat/sit bitmaps.
2535 return tmp_ptr + offset + sizeof(__le32);
2538 if (__cp_payload(sbi) > 0) {
2539 if (flag == NAT_BITMAP)
2542 return (unsigned char *)ckpt + F2FS_BLKSIZE;
2544 offset = (flag == NAT_BITMAP) ?
2545 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2546 return tmp_ptr + offset;
2550 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2552 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2554 if (sbi->cur_cp_pack == 2)
2555 start_addr += sbi->blocks_per_seg;
2559 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2561 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2563 if (sbi->cur_cp_pack == 1)
2564 start_addr += sbi->blocks_per_seg;
2568 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2570 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2573 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2575 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2578 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
2579 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2580 struct inode *inode, bool is_inode)
2582 block_t valid_block_count;
2583 unsigned int valid_node_count, user_block_count;
2588 err = dquot_alloc_inode(inode);
2593 err = dquot_reserve_block(inode, 1);
2598 if (time_to_inject(sbi, FAULT_BLOCK))
2601 spin_lock(&sbi->stat_lock);
2603 valid_block_count = sbi->total_valid_block_count +
2604 sbi->current_reserved_blocks + 1;
2606 if (!__allow_reserved_blocks(sbi, inode, false))
2607 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2609 if (F2FS_IO_ALIGNED(sbi))
2610 valid_block_count += sbi->blocks_per_seg *
2611 SM_I(sbi)->additional_reserved_segments;
2613 user_block_count = sbi->user_block_count;
2614 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2615 user_block_count -= sbi->unusable_block_count;
2617 if (unlikely(valid_block_count > user_block_count)) {
2618 spin_unlock(&sbi->stat_lock);
2622 valid_node_count = sbi->total_valid_node_count + 1;
2623 if (unlikely(valid_node_count > sbi->total_node_count)) {
2624 spin_unlock(&sbi->stat_lock);
2628 sbi->total_valid_node_count++;
2629 sbi->total_valid_block_count++;
2630 spin_unlock(&sbi->stat_lock);
2634 f2fs_mark_inode_dirty_sync(inode, true);
2636 f2fs_i_blocks_write(inode, 1, true, true);
2639 percpu_counter_inc(&sbi->alloc_valid_block_count);
2645 dquot_free_inode(inode);
2647 dquot_release_reservation_block(inode, 1);
2652 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2653 struct inode *inode, bool is_inode)
2655 spin_lock(&sbi->stat_lock);
2657 if (unlikely(!sbi->total_valid_block_count ||
2658 !sbi->total_valid_node_count)) {
2659 f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
2660 sbi->total_valid_block_count,
2661 sbi->total_valid_node_count);
2662 set_sbi_flag(sbi, SBI_NEED_FSCK);
2664 sbi->total_valid_block_count--;
2665 sbi->total_valid_node_count--;
2668 if (sbi->reserved_blocks &&
2669 sbi->current_reserved_blocks < sbi->reserved_blocks)
2670 sbi->current_reserved_blocks++;
2672 spin_unlock(&sbi->stat_lock);
2675 dquot_free_inode(inode);
2677 if (unlikely(inode->i_blocks == 0)) {
2678 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2680 (unsigned long long)inode->i_blocks);
2681 set_sbi_flag(sbi, SBI_NEED_FSCK);
2684 f2fs_i_blocks_write(inode, 1, false, true);
2688 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2690 return sbi->total_valid_node_count;
2693 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2695 percpu_counter_inc(&sbi->total_valid_inode_count);
2698 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2700 percpu_counter_dec(&sbi->total_valid_inode_count);
2703 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2705 return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2708 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2709 pgoff_t index, bool for_write)
2714 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2716 page = find_get_page_flags(mapping, index,
2717 FGP_LOCK | FGP_ACCESSED);
2719 page = find_lock_page(mapping, index);
2723 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
2728 return grab_cache_page(mapping, index);
2730 flags = memalloc_nofs_save();
2731 page = grab_cache_page_write_begin(mapping, index);
2732 memalloc_nofs_restore(flags);
2737 static inline struct page *f2fs_pagecache_get_page(
2738 struct address_space *mapping, pgoff_t index,
2739 int fgp_flags, gfp_t gfp_mask)
2741 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
2744 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2747 static inline void f2fs_put_page(struct page *page, int unlock)
2753 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2759 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2762 f2fs_put_page(dn->node_page, 1);
2763 if (dn->inode_page && dn->node_page != dn->inode_page)
2764 f2fs_put_page(dn->inode_page, 0);
2765 dn->node_page = NULL;
2766 dn->inode_page = NULL;
2769 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2772 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2775 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
2780 entry = kmem_cache_alloc(cachep, flags);
2782 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2786 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2787 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
2790 return f2fs_kmem_cache_alloc_nofail(cachep, flags);
2792 if (time_to_inject(sbi, FAULT_SLAB_ALLOC))
2795 return kmem_cache_alloc(cachep, flags);
2798 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2800 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2801 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2802 get_pages(sbi, F2FS_WB_CP_DATA) ||
2803 get_pages(sbi, F2FS_DIO_READ) ||
2804 get_pages(sbi, F2FS_DIO_WRITE))
2807 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2808 atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2811 if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2812 atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2817 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2819 if (sbi->gc_mode == GC_URGENT_HIGH)
2822 if (is_inflight_io(sbi, type))
2825 if (sbi->gc_mode == GC_URGENT_MID)
2828 if (sbi->gc_mode == GC_URGENT_LOW &&
2829 (type == DISCARD_TIME || type == GC_TIME))
2832 return f2fs_time_over(sbi, type);
2835 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2836 unsigned long index, void *item)
2838 while (radix_tree_insert(root, index, item))
2842 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
2844 static inline bool IS_INODE(struct page *page)
2846 struct f2fs_node *p = F2FS_NODE(page);
2848 return RAW_IS_INODE(p);
2851 static inline int offset_in_addr(struct f2fs_inode *i)
2853 return (i->i_inline & F2FS_EXTRA_ATTR) ?
2854 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2857 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2859 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2862 static inline int f2fs_has_extra_attr(struct inode *inode);
2863 static inline block_t data_blkaddr(struct inode *inode,
2864 struct page *node_page, unsigned int offset)
2866 struct f2fs_node *raw_node;
2869 bool is_inode = IS_INODE(node_page);
2871 raw_node = F2FS_NODE(node_page);
2875 /* from GC path only */
2876 base = offset_in_addr(&raw_node->i);
2877 else if (f2fs_has_extra_attr(inode))
2878 base = get_extra_isize(inode);
2881 addr_array = blkaddr_in_node(raw_node);
2882 return le32_to_cpu(addr_array[base + offset]);
2885 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2887 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2890 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2895 mask = BIT(7 - (nr & 0x07));
2896 return mask & *addr;
2899 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2904 mask = BIT(7 - (nr & 0x07));
2908 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2913 mask = BIT(7 - (nr & 0x07));
2917 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2923 mask = BIT(7 - (nr & 0x07));
2929 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2935 mask = BIT(7 - (nr & 0x07));
2941 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2946 mask = BIT(7 - (nr & 0x07));
2951 * On-disk inode flags (f2fs_inode::i_flags)
2953 #define F2FS_COMPR_FL 0x00000004 /* Compress file */
2954 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
2955 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
2956 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
2957 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
2958 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
2959 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
2960 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
2961 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
2962 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
2963 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
2965 #define F2FS_QUOTA_DEFAULT_FL (F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL)
2967 /* Flags that should be inherited by new inodes from their parent. */
2968 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2969 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2972 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2973 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2976 /* Flags that are appropriate for non-directories/regular files. */
2977 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2979 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2983 else if (S_ISREG(mode))
2984 return flags & F2FS_REG_FLMASK;
2986 return flags & F2FS_OTHER_FLMASK;
2989 static inline void __mark_inode_dirty_flag(struct inode *inode,
2993 case FI_INLINE_XATTR:
2994 case FI_INLINE_DATA:
2995 case FI_INLINE_DENTRY:
3001 case FI_INLINE_DOTS:
3003 case FI_COMPRESS_RELEASED:
3004 f2fs_mark_inode_dirty_sync(inode, true);
3008 static inline void set_inode_flag(struct inode *inode, int flag)
3010 set_bit(flag, F2FS_I(inode)->flags);
3011 __mark_inode_dirty_flag(inode, flag, true);
3014 static inline int is_inode_flag_set(struct inode *inode, int flag)
3016 return test_bit(flag, F2FS_I(inode)->flags);
3019 static inline void clear_inode_flag(struct inode *inode, int flag)
3021 clear_bit(flag, F2FS_I(inode)->flags);
3022 __mark_inode_dirty_flag(inode, flag, false);
3025 static inline bool f2fs_verity_in_progress(struct inode *inode)
3027 return IS_ENABLED(CONFIG_FS_VERITY) &&
3028 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
3031 static inline void set_acl_inode(struct inode *inode, umode_t mode)
3033 F2FS_I(inode)->i_acl_mode = mode;
3034 set_inode_flag(inode, FI_ACL_MODE);
3035 f2fs_mark_inode_dirty_sync(inode, false);
3038 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
3044 f2fs_mark_inode_dirty_sync(inode, true);
3047 static inline void f2fs_i_blocks_write(struct inode *inode,
3048 block_t diff, bool add, bool claim)
3050 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3051 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3053 /* add = 1, claim = 1 should be dquot_reserve_block in pair */
3056 dquot_claim_block(inode, diff);
3058 dquot_alloc_block_nofail(inode, diff);
3060 dquot_free_block(inode, diff);
3063 f2fs_mark_inode_dirty_sync(inode, true);
3064 if (clean || recover)
3065 set_inode_flag(inode, FI_AUTO_RECOVER);
3068 static inline bool f2fs_is_atomic_file(struct inode *inode);
3070 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
3072 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3073 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3075 if (i_size_read(inode) == i_size)
3078 i_size_write(inode, i_size);
3080 if (f2fs_is_atomic_file(inode))
3083 f2fs_mark_inode_dirty_sync(inode, true);
3084 if (clean || recover)
3085 set_inode_flag(inode, FI_AUTO_RECOVER);
3088 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
3090 F2FS_I(inode)->i_current_depth = depth;
3091 f2fs_mark_inode_dirty_sync(inode, true);
3094 static inline void f2fs_i_gc_failures_write(struct inode *inode,
3097 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
3098 f2fs_mark_inode_dirty_sync(inode, true);
3101 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
3103 F2FS_I(inode)->i_xattr_nid = xnid;
3104 f2fs_mark_inode_dirty_sync(inode, true);
3107 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
3109 F2FS_I(inode)->i_pino = pino;
3110 f2fs_mark_inode_dirty_sync(inode, true);
3113 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
3115 struct f2fs_inode_info *fi = F2FS_I(inode);
3117 if (ri->i_inline & F2FS_INLINE_XATTR)
3118 set_bit(FI_INLINE_XATTR, fi->flags);
3119 if (ri->i_inline & F2FS_INLINE_DATA)
3120 set_bit(FI_INLINE_DATA, fi->flags);
3121 if (ri->i_inline & F2FS_INLINE_DENTRY)
3122 set_bit(FI_INLINE_DENTRY, fi->flags);
3123 if (ri->i_inline & F2FS_DATA_EXIST)
3124 set_bit(FI_DATA_EXIST, fi->flags);
3125 if (ri->i_inline & F2FS_INLINE_DOTS)
3126 set_bit(FI_INLINE_DOTS, fi->flags);
3127 if (ri->i_inline & F2FS_EXTRA_ATTR)
3128 set_bit(FI_EXTRA_ATTR, fi->flags);
3129 if (ri->i_inline & F2FS_PIN_FILE)
3130 set_bit(FI_PIN_FILE, fi->flags);
3131 if (ri->i_inline & F2FS_COMPRESS_RELEASED)
3132 set_bit(FI_COMPRESS_RELEASED, fi->flags);
3135 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
3139 if (is_inode_flag_set(inode, FI_INLINE_XATTR))
3140 ri->i_inline |= F2FS_INLINE_XATTR;
3141 if (is_inode_flag_set(inode, FI_INLINE_DATA))
3142 ri->i_inline |= F2FS_INLINE_DATA;
3143 if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
3144 ri->i_inline |= F2FS_INLINE_DENTRY;
3145 if (is_inode_flag_set(inode, FI_DATA_EXIST))
3146 ri->i_inline |= F2FS_DATA_EXIST;
3147 if (is_inode_flag_set(inode, FI_INLINE_DOTS))
3148 ri->i_inline |= F2FS_INLINE_DOTS;
3149 if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
3150 ri->i_inline |= F2FS_EXTRA_ATTR;
3151 if (is_inode_flag_set(inode, FI_PIN_FILE))
3152 ri->i_inline |= F2FS_PIN_FILE;
3153 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
3154 ri->i_inline |= F2FS_COMPRESS_RELEASED;
3157 static inline int f2fs_has_extra_attr(struct inode *inode)
3159 return is_inode_flag_set(inode, FI_EXTRA_ATTR);
3162 static inline int f2fs_has_inline_xattr(struct inode *inode)
3164 return is_inode_flag_set(inode, FI_INLINE_XATTR);
3167 static inline int f2fs_compressed_file(struct inode *inode)
3169 return S_ISREG(inode->i_mode) &&
3170 is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3173 static inline bool f2fs_need_compress_data(struct inode *inode)
3175 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3177 if (!f2fs_compressed_file(inode))
3180 if (compress_mode == COMPR_MODE_FS)
3182 else if (compress_mode == COMPR_MODE_USER &&
3183 is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3189 static inline unsigned int addrs_per_inode(struct inode *inode)
3191 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
3192 get_inline_xattr_addrs(inode);
3194 if (!f2fs_compressed_file(inode))
3196 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3199 static inline unsigned int addrs_per_block(struct inode *inode)
3201 if (!f2fs_compressed_file(inode))
3202 return DEF_ADDRS_PER_BLOCK;
3203 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
3206 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
3208 struct f2fs_inode *ri = F2FS_INODE(page);
3210 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3211 get_inline_xattr_addrs(inode)]);
3214 static inline int inline_xattr_size(struct inode *inode)
3216 if (f2fs_has_inline_xattr(inode))
3217 return get_inline_xattr_addrs(inode) * sizeof(__le32);
3222 * Notice: check inline_data flag without inode page lock is unsafe.
3223 * It could change at any time by f2fs_convert_inline_page().
3225 static inline int f2fs_has_inline_data(struct inode *inode)
3227 return is_inode_flag_set(inode, FI_INLINE_DATA);
3230 static inline int f2fs_exist_data(struct inode *inode)
3232 return is_inode_flag_set(inode, FI_DATA_EXIST);
3235 static inline int f2fs_has_inline_dots(struct inode *inode)
3237 return is_inode_flag_set(inode, FI_INLINE_DOTS);
3240 static inline int f2fs_is_mmap_file(struct inode *inode)
3242 return is_inode_flag_set(inode, FI_MMAP_FILE);
3245 static inline bool f2fs_is_pinned_file(struct inode *inode)
3247 return is_inode_flag_set(inode, FI_PIN_FILE);
3250 static inline bool f2fs_is_atomic_file(struct inode *inode)
3252 return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3255 static inline bool f2fs_is_cow_file(struct inode *inode)
3257 return is_inode_flag_set(inode, FI_COW_FILE);
3260 static inline bool f2fs_is_first_block_written(struct inode *inode)
3262 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
3265 static inline bool f2fs_is_drop_cache(struct inode *inode)
3267 return is_inode_flag_set(inode, FI_DROP_CACHE);
3270 static inline void *inline_data_addr(struct inode *inode, struct page *page)
3272 struct f2fs_inode *ri = F2FS_INODE(page);
3273 int extra_size = get_extra_isize(inode);
3275 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
3278 static inline int f2fs_has_inline_dentry(struct inode *inode)
3280 return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3283 static inline int is_file(struct inode *inode, int type)
3285 return F2FS_I(inode)->i_advise & type;
3288 static inline void set_file(struct inode *inode, int type)
3290 if (is_file(inode, type))
3292 F2FS_I(inode)->i_advise |= type;
3293 f2fs_mark_inode_dirty_sync(inode, true);
3296 static inline void clear_file(struct inode *inode, int type)
3298 if (!is_file(inode, type))
3300 F2FS_I(inode)->i_advise &= ~type;
3301 f2fs_mark_inode_dirty_sync(inode, true);
3304 static inline bool f2fs_is_time_consistent(struct inode *inode)
3306 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
3308 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
3310 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
3315 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3320 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3322 spin_lock(&sbi->inode_lock[DIRTY_META]);
3323 ret = list_empty(&F2FS_I(inode)->gdirty_list);
3324 spin_unlock(&sbi->inode_lock[DIRTY_META]);
3327 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3328 file_keep_isize(inode) ||
3329 i_size_read(inode) & ~PAGE_MASK)
3332 if (!f2fs_is_time_consistent(inode))
3335 spin_lock(&F2FS_I(inode)->i_size_lock);
3336 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3337 spin_unlock(&F2FS_I(inode)->i_size_lock);
3342 static inline bool f2fs_readonly(struct super_block *sb)
3344 return sb_rdonly(sb);
3347 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3349 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3352 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3354 if (len == 1 && name[0] == '.')
3357 if (len == 2 && name[0] == '.' && name[1] == '.')
3363 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3364 size_t size, gfp_t flags)
3366 if (time_to_inject(sbi, FAULT_KMALLOC))
3369 return kmalloc(size, flags);
3372 static inline void *f2fs_getname(struct f2fs_sb_info *sbi)
3374 if (time_to_inject(sbi, FAULT_KMALLOC))
3380 static inline void f2fs_putname(char *buf)
3385 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3386 size_t size, gfp_t flags)
3388 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3391 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3392 size_t size, gfp_t flags)
3394 if (time_to_inject(sbi, FAULT_KVMALLOC))
3397 return kvmalloc(size, flags);
3400 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3401 size_t size, gfp_t flags)
3403 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3406 static inline int get_extra_isize(struct inode *inode)
3408 return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3411 static inline int get_inline_xattr_addrs(struct inode *inode)
3413 return F2FS_I(inode)->i_inline_xattr_size;
3416 #define f2fs_get_inode_mode(i) \
3417 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3418 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3420 #define F2FS_MIN_EXTRA_ATTR_SIZE (sizeof(__le32))
3422 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \
3423 (offsetof(struct f2fs_inode, i_extra_end) - \
3424 offsetof(struct f2fs_inode, i_extra_isize)) \
3426 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
3427 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
3428 ((offsetof(typeof(*(f2fs_inode)), field) + \
3429 sizeof((f2fs_inode)->field)) \
3430 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
3432 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
3434 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3436 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3437 block_t blkaddr, int type);
3438 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3439 block_t blkaddr, int type)
3441 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3442 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3444 f2fs_bug_on(sbi, 1);
3448 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3450 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3451 blkaddr == COMPRESS_ADDR)
3459 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3460 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3461 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3462 int f2fs_truncate(struct inode *inode);
3463 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
3464 struct kstat *stat, u32 request_mask, unsigned int flags);
3465 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
3466 struct iattr *attr);
3467 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3468 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3469 int f2fs_precache_extents(struct inode *inode);
3470 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3471 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3472 struct dentry *dentry, struct fileattr *fa);
3473 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3474 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3475 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3476 int f2fs_pin_file_control(struct inode *inode, bool inc);
3481 void f2fs_set_inode_flags(struct inode *inode);
3482 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3483 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3484 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3485 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3486 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3487 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3488 void f2fs_update_inode_page(struct inode *inode);
3489 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3490 void f2fs_evict_inode(struct inode *inode);
3491 void f2fs_handle_failed_inode(struct inode *inode);
3496 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3497 bool hot, bool set);
3498 struct dentry *f2fs_get_parent(struct dentry *child);
3499 int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3500 struct inode **new_inode);
3505 int f2fs_init_casefolded_name(const struct inode *dir,
3506 struct f2fs_filename *fname);
3507 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3508 int lookup, struct f2fs_filename *fname);
3509 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3510 struct f2fs_filename *fname);
3511 void f2fs_free_filename(struct f2fs_filename *fname);
3512 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3513 const struct f2fs_filename *fname, int *max_slots);
3514 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3515 unsigned int start_pos, struct fscrypt_str *fstr);
3516 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3517 struct f2fs_dentry_ptr *d);
3518 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3519 const struct f2fs_filename *fname, struct page *dpage);
3520 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3521 unsigned int current_depth);
3522 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3523 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3524 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3525 const struct f2fs_filename *fname,
3526 struct page **res_page);
3527 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3528 const struct qstr *child, struct page **res_page);
3529 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3530 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3531 struct page **page);
3532 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3533 struct page *page, struct inode *inode);
3534 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3535 const struct f2fs_filename *fname);
3536 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3537 const struct fscrypt_str *name, f2fs_hash_t name_hash,
3538 unsigned int bit_pos);
3539 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3540 struct inode *inode, nid_t ino, umode_t mode);
3541 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3542 struct inode *inode, nid_t ino, umode_t mode);
3543 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3544 struct inode *inode, nid_t ino, umode_t mode);
3545 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3546 struct inode *dir, struct inode *inode);
3547 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3548 bool f2fs_empty_dir(struct inode *dir);
3550 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3552 if (fscrypt_is_nokey_name(dentry))
3554 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3555 inode, inode->i_ino, inode->i_mode);
3561 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3562 void f2fs_inode_synced(struct inode *inode);
3563 int f2fs_dquot_initialize(struct inode *inode);
3564 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3565 int f2fs_quota_sync(struct super_block *sb, int type);
3566 loff_t max_file_blocks(struct inode *inode);
3567 void f2fs_quota_off_umount(struct super_block *sb);
3568 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
3569 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
3571 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
3572 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
3573 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3574 int f2fs_sync_fs(struct super_block *sb, int sync);
3575 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3580 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3587 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3588 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3589 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3590 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3591 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3592 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3593 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3594 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3595 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3596 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3597 struct node_info *ni, bool checkpoint_context);
3598 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3599 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3600 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3601 int f2fs_truncate_xattr_node(struct inode *inode);
3602 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3603 unsigned int seq_id);
3604 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
3605 int f2fs_remove_inode_page(struct inode *inode);
3606 struct page *f2fs_new_inode_page(struct inode *inode);
3607 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3608 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3609 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3610 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3611 int f2fs_move_node_page(struct page *node_page, int gc_type);
3612 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3613 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3614 struct writeback_control *wbc, bool atomic,
3615 unsigned int *seq_id);
3616 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3617 struct writeback_control *wbc,
3618 bool do_balance, enum iostat_type io_type);
3619 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3620 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3621 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3622 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3623 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3624 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3625 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3626 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3627 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3628 unsigned int segno, struct f2fs_summary_block *sum);
3629 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
3630 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3631 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3632 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3633 int __init f2fs_create_node_manager_caches(void);
3634 void f2fs_destroy_node_manager_caches(void);
3639 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3640 int f2fs_commit_atomic_write(struct inode *inode);
3641 void f2fs_abort_atomic_write(struct inode *inode, bool clean);
3642 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3643 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3644 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3645 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3646 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3647 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3648 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3649 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3650 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
3651 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3652 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3653 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3654 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3655 struct cp_control *cpc);
3656 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3657 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3658 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3659 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3660 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3661 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3662 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3663 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3664 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3665 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3666 unsigned int *newseg, bool new_sec, int dir);
3667 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3668 unsigned int start, unsigned int end);
3669 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3670 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3671 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3672 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3673 struct cp_control *cpc);
3674 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3675 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3677 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3678 enum iostat_type io_type);
3679 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3680 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3681 struct f2fs_io_info *fio);
3682 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3683 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3684 block_t old_blkaddr, block_t new_blkaddr,
3685 bool recover_curseg, bool recover_newaddr,
3687 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3688 block_t old_addr, block_t new_addr,
3689 unsigned char version, bool recover_curseg,
3690 bool recover_newaddr);
3691 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3692 block_t old_blkaddr, block_t *new_blkaddr,
3693 struct f2fs_summary *sum, int type,
3694 struct f2fs_io_info *fio);
3695 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3696 block_t blkaddr, unsigned int blkcnt);
3697 void f2fs_wait_on_page_writeback(struct page *page,
3698 enum page_type type, bool ordered, bool locked);
3699 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3700 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3702 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3703 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3704 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3705 unsigned int val, int alloc);
3706 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3707 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3708 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3709 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3710 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3711 int __init f2fs_create_segment_manager_caches(void);
3712 void f2fs_destroy_segment_manager_caches(void);
3713 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3714 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3715 unsigned int segno);
3716 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3717 unsigned int segno);
3719 #define DEF_FRAGMENT_SIZE 4
3720 #define MIN_FRAGMENT_SIZE 1
3721 #define MAX_FRAGMENT_SIZE 512
3723 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
3725 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
3726 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
3732 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
3733 unsigned char reason);
3734 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
3735 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3736 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3737 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3738 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3739 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3740 block_t blkaddr, int type);
3741 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3742 int type, bool sync);
3743 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
3744 unsigned int ra_blocks);
3745 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3746 long nr_to_write, enum iostat_type io_type);
3747 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3748 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3749 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3750 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3751 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3752 unsigned int devidx, int type);
3753 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3754 unsigned int devidx, int type);
3755 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3756 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3757 void f2fs_add_orphan_inode(struct inode *inode);
3758 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3759 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3760 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3761 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
3762 void f2fs_remove_dirty_inode(struct inode *inode);
3763 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
3765 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3766 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3767 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3768 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3769 int __init f2fs_create_checkpoint_caches(void);
3770 void f2fs_destroy_checkpoint_caches(void);
3771 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3772 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3773 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3774 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3779 int __init f2fs_init_bioset(void);
3780 void f2fs_destroy_bioset(void);
3781 int f2fs_init_bio_entry_cache(void);
3782 void f2fs_destroy_bio_entry_cache(void);
3783 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
3784 enum page_type type);
3785 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
3786 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3787 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3788 struct inode *inode, struct page *page,
3789 nid_t ino, enum page_type type);
3790 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3791 struct bio **bio, struct page *page);
3792 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3793 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3794 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3795 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3796 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3797 block_t blk_addr, sector_t *sector);
3798 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3799 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3800 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3801 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3802 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3803 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
3804 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3805 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3806 blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
3807 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
3808 pgoff_t *next_pgofs);
3809 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3811 struct page *f2fs_get_new_data_page(struct inode *inode,
3812 struct page *ipage, pgoff_t index, bool new_i_size);
3813 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3814 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
3815 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3816 u64 start, u64 len);
3817 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3818 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3819 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3820 int f2fs_write_single_data_page(struct page *page, int *submitted,
3821 struct bio **bio, sector_t *last_block,
3822 struct writeback_control *wbc,
3823 enum iostat_type io_type,
3824 int compr_blocks, bool allow_balance);
3825 void f2fs_write_failed(struct inode *inode, loff_t to);
3826 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
3827 bool f2fs_release_folio(struct folio *folio, gfp_t wait);
3828 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3829 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3830 int f2fs_init_post_read_processing(void);
3831 void f2fs_destroy_post_read_processing(void);
3832 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3833 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3834 extern const struct iomap_ops f2fs_iomap_ops;
3839 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3840 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3841 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3842 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
3843 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3844 int f2fs_resize_fs(struct file *filp, __u64 block_count);
3845 int __init f2fs_create_garbage_collection_cache(void);
3846 void f2fs_destroy_garbage_collection_cache(void);
3847 /* victim selection function for cleaning and SSR */
3848 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
3849 int gc_type, int type, char alloc_mode,
3850 unsigned long long age);
3855 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3856 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3857 int __init f2fs_create_recovery_cache(void);
3858 void f2fs_destroy_recovery_cache(void);
3863 #ifdef CONFIG_F2FS_STAT_FS
3864 struct f2fs_stat_info {
3865 struct list_head stat_list;
3866 struct f2fs_sb_info *sbi;
3867 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3868 int main_area_segs, main_area_sections, main_area_zones;
3869 unsigned long long hit_cached[NR_EXTENT_CACHES];
3870 unsigned long long hit_rbtree[NR_EXTENT_CACHES];
3871 unsigned long long total_ext[NR_EXTENT_CACHES];
3872 unsigned long long hit_total[NR_EXTENT_CACHES];
3873 int ext_tree[NR_EXTENT_CACHES];
3874 int zombie_tree[NR_EXTENT_CACHES];
3875 int ext_node[NR_EXTENT_CACHES];
3876 /* to count memory footprint */
3877 unsigned long long ext_mem[NR_EXTENT_CACHES];
3878 /* for read extent cache */
3879 unsigned long long hit_largest;
3880 /* for block age extent cache */
3881 unsigned long long allocated_data_blocks;
3882 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3883 int ndirty_data, ndirty_qdata;
3884 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3885 int nats, dirty_nats, sits, dirty_sits;
3886 int free_nids, avail_nids, alloc_nids;
3887 int total_count, utilization;
3888 int bg_gc, nr_wb_cp_data, nr_wb_data;
3889 int nr_rd_data, nr_rd_node, nr_rd_meta;
3890 int nr_dio_read, nr_dio_write;
3891 unsigned int io_skip_bggc, other_skip_bggc;
3892 int nr_flushing, nr_flushed, flush_list_empty;
3893 int nr_discarding, nr_discarded;
3895 unsigned int undiscard_blks;
3896 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3897 unsigned int cur_ckpt_time, peak_ckpt_time;
3898 int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3899 int compr_inode, swapfile_inode;
3900 unsigned long long compr_blocks;
3901 int aw_cnt, max_aw_cnt;
3902 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3903 unsigned int bimodal, avg_vblocks;
3904 int util_free, util_valid, util_invalid;
3905 int rsvd_segs, overp_segs;
3906 int dirty_count, node_pages, meta_pages, compress_pages;
3907 int compress_page_hit;
3908 int prefree_count, call_count, cp_count, bg_cp_count;
3909 int tot_segs, node_segs, data_segs, free_segs, free_secs;
3910 int bg_node_segs, bg_data_segs;
3911 int tot_blks, data_blks, node_blks;
3912 int bg_data_blks, bg_node_blks;
3913 int curseg[NR_CURSEG_TYPE];
3914 int cursec[NR_CURSEG_TYPE];
3915 int curzone[NR_CURSEG_TYPE];
3916 unsigned int dirty_seg[NR_CURSEG_TYPE];
3917 unsigned int full_seg[NR_CURSEG_TYPE];
3918 unsigned int valid_blks[NR_CURSEG_TYPE];
3920 unsigned int meta_count[META_MAX];
3921 unsigned int segment_count[2];
3922 unsigned int block_count[2];
3923 unsigned int inplace_count;
3924 unsigned long long base_mem, cache_mem, page_mem;
3927 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3929 return (struct f2fs_stat_info *)sbi->stat_info;
3932 #define stat_inc_cp_count(si) ((si)->cp_count++)
3933 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
3934 #define stat_inc_call_count(si) ((si)->call_count++)
3935 #define stat_inc_bggc_count(si) ((si)->bg_gc++)
3936 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
3937 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
3938 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
3939 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
3940 #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type]))
3941 #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type]))
3942 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
3943 #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type]))
3944 #define stat_inc_inline_xattr(inode) \
3946 if (f2fs_has_inline_xattr(inode)) \
3947 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
3949 #define stat_dec_inline_xattr(inode) \
3951 if (f2fs_has_inline_xattr(inode)) \
3952 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
3954 #define stat_inc_inline_inode(inode) \
3956 if (f2fs_has_inline_data(inode)) \
3957 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
3959 #define stat_dec_inline_inode(inode) \
3961 if (f2fs_has_inline_data(inode)) \
3962 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
3964 #define stat_inc_inline_dir(inode) \
3966 if (f2fs_has_inline_dentry(inode)) \
3967 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
3969 #define stat_dec_inline_dir(inode) \
3971 if (f2fs_has_inline_dentry(inode)) \
3972 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
3974 #define stat_inc_compr_inode(inode) \
3976 if (f2fs_compressed_file(inode)) \
3977 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
3979 #define stat_dec_compr_inode(inode) \
3981 if (f2fs_compressed_file(inode)) \
3982 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
3984 #define stat_add_compr_blocks(inode, blocks) \
3985 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3986 #define stat_sub_compr_blocks(inode, blocks) \
3987 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3988 #define stat_inc_swapfile_inode(inode) \
3989 (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode))
3990 #define stat_dec_swapfile_inode(inode) \
3991 (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode))
3992 #define stat_inc_atomic_inode(inode) \
3993 (atomic_inc(&F2FS_I_SB(inode)->atomic_files))
3994 #define stat_dec_atomic_inode(inode) \
3995 (atomic_dec(&F2FS_I_SB(inode)->atomic_files))
3996 #define stat_inc_meta_count(sbi, blkaddr) \
3998 if (blkaddr < SIT_I(sbi)->sit_base_addr) \
3999 atomic_inc(&(sbi)->meta_count[META_CP]); \
4000 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
4001 atomic_inc(&(sbi)->meta_count[META_SIT]); \
4002 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
4003 atomic_inc(&(sbi)->meta_count[META_NAT]); \
4004 else if (blkaddr < SM_I(sbi)->main_blkaddr) \
4005 atomic_inc(&(sbi)->meta_count[META_SSA]); \
4007 #define stat_inc_seg_type(sbi, curseg) \
4008 ((sbi)->segment_count[(curseg)->alloc_type]++)
4009 #define stat_inc_block_count(sbi, curseg) \
4010 ((sbi)->block_count[(curseg)->alloc_type]++)
4011 #define stat_inc_inplace_blocks(sbi) \
4012 (atomic_inc(&(sbi)->inplace_count))
4013 #define stat_update_max_atomic_write(inode) \
4015 int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \
4016 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
4018 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
4020 #define stat_inc_seg_count(sbi, type, gc_type) \
4022 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
4024 if ((type) == SUM_TYPE_DATA) { \
4026 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
4029 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
4033 #define stat_inc_tot_blk_count(si, blks) \
4034 ((si)->tot_blks += (blks))
4036 #define stat_inc_data_blk_count(sbi, blks, gc_type) \
4038 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
4039 stat_inc_tot_blk_count(si, blks); \
4040 si->data_blks += (blks); \
4041 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
4044 #define stat_inc_node_blk_count(sbi, blks, gc_type) \
4046 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
4047 stat_inc_tot_blk_count(si, blks); \
4048 si->node_blks += (blks); \
4049 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
4052 int f2fs_build_stats(struct f2fs_sb_info *sbi);
4053 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
4054 void __init f2fs_create_root_stats(void);
4055 void f2fs_destroy_root_stats(void);
4056 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
4058 #define stat_inc_cp_count(si) do { } while (0)
4059 #define stat_inc_bg_cp_count(si) do { } while (0)
4060 #define stat_inc_call_count(si) do { } while (0)
4061 #define stat_inc_bggc_count(si) do { } while (0)
4062 #define stat_io_skip_bggc_count(sbi) do { } while (0)
4063 #define stat_other_skip_bggc_count(sbi) do { } while (0)
4064 #define stat_inc_dirty_inode(sbi, type) do { } while (0)
4065 #define stat_dec_dirty_inode(sbi, type) do { } while (0)
4066 #define stat_inc_total_hit(sbi, type) do { } while (0)
4067 #define stat_inc_rbtree_node_hit(sbi, type) do { } while (0)
4068 #define stat_inc_largest_node_hit(sbi) do { } while (0)
4069 #define stat_inc_cached_node_hit(sbi, type) do { } while (0)
4070 #define stat_inc_inline_xattr(inode) do { } while (0)
4071 #define stat_dec_inline_xattr(inode) do { } while (0)
4072 #define stat_inc_inline_inode(inode) do { } while (0)
4073 #define stat_dec_inline_inode(inode) do { } while (0)
4074 #define stat_inc_inline_dir(inode) do { } while (0)
4075 #define stat_dec_inline_dir(inode) do { } while (0)
4076 #define stat_inc_compr_inode(inode) do { } while (0)
4077 #define stat_dec_compr_inode(inode) do { } while (0)
4078 #define stat_add_compr_blocks(inode, blocks) do { } while (0)
4079 #define stat_sub_compr_blocks(inode, blocks) do { } while (0)
4080 #define stat_inc_swapfile_inode(inode) do { } while (0)
4081 #define stat_dec_swapfile_inode(inode) do { } while (0)
4082 #define stat_inc_atomic_inode(inode) do { } while (0)
4083 #define stat_dec_atomic_inode(inode) do { } while (0)
4084 #define stat_update_max_atomic_write(inode) do { } while (0)
4085 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
4086 #define stat_inc_seg_type(sbi, curseg) do { } while (0)
4087 #define stat_inc_block_count(sbi, curseg) do { } while (0)
4088 #define stat_inc_inplace_blocks(sbi) do { } while (0)
4089 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
4090 #define stat_inc_tot_blk_count(si, blks) do { } while (0)
4091 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
4092 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
4094 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
4095 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
4096 static inline void __init f2fs_create_root_stats(void) { }
4097 static inline void f2fs_destroy_root_stats(void) { }
4098 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
4101 extern const struct file_operations f2fs_dir_operations;
4102 extern const struct file_operations f2fs_file_operations;
4103 extern const struct inode_operations f2fs_file_inode_operations;
4104 extern const struct address_space_operations f2fs_dblock_aops;
4105 extern const struct address_space_operations f2fs_node_aops;
4106 extern const struct address_space_operations f2fs_meta_aops;
4107 extern const struct inode_operations f2fs_dir_inode_operations;
4108 extern const struct inode_operations f2fs_symlink_inode_operations;
4109 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
4110 extern const struct inode_operations f2fs_special_inode_operations;
4111 extern struct kmem_cache *f2fs_inode_entry_slab;
4116 bool f2fs_may_inline_data(struct inode *inode);
4117 bool f2fs_sanity_check_inline_data(struct inode *inode);
4118 bool f2fs_may_inline_dentry(struct inode *inode);
4119 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
4120 void f2fs_truncate_inline_inode(struct inode *inode,
4121 struct page *ipage, u64 from);
4122 int f2fs_read_inline_data(struct inode *inode, struct page *page);
4123 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
4124 int f2fs_convert_inline_inode(struct inode *inode);
4125 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
4126 int f2fs_write_inline_data(struct inode *inode, struct page *page);
4127 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
4128 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
4129 const struct f2fs_filename *fname,
4130 struct page **res_page);
4131 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
4132 struct page *ipage);
4133 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
4134 struct inode *inode, nid_t ino, umode_t mode);
4135 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
4136 struct page *page, struct inode *dir,
4137 struct inode *inode);
4138 bool f2fs_empty_inline_dir(struct inode *dir);
4139 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4140 struct fscrypt_str *fstr);
4141 int f2fs_inline_data_fiemap(struct inode *inode,
4142 struct fiemap_extent_info *fieinfo,
4143 __u64 start, __u64 len);
4148 unsigned long f2fs_shrink_count(struct shrinker *shrink,
4149 struct shrink_control *sc);
4150 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
4151 struct shrink_control *sc);
4152 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
4153 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
4158 bool sanity_check_extent_cache(struct inode *inode);
4159 void f2fs_init_extent_tree(struct inode *inode);
4160 void f2fs_drop_extent_tree(struct inode *inode);
4161 void f2fs_destroy_extent_node(struct inode *inode);
4162 void f2fs_destroy_extent_tree(struct inode *inode);
4163 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
4164 int __init f2fs_create_extent_cache(void);
4165 void f2fs_destroy_extent_cache(void);
4167 /* read extent cache ops */
4168 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
4169 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
4170 struct extent_info *ei);
4171 bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
4173 void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
4174 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
4175 pgoff_t fofs, block_t blkaddr, unsigned int len);
4176 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
4179 /* block age extent cache ops */
4180 void f2fs_init_age_extent_tree(struct inode *inode);
4181 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
4182 struct extent_info *ei);
4183 void f2fs_update_age_extent_cache(struct dnode_of_data *dn);
4184 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
4185 pgoff_t fofs, unsigned int len);
4186 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi,
4192 #define MIN_RA_MUL 2
4193 #define MAX_RA_MUL 256
4195 int __init f2fs_init_sysfs(void);
4196 void f2fs_exit_sysfs(void);
4197 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4198 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4201 extern const struct fsverity_operations f2fs_verityops;
4206 static inline bool f2fs_encrypted_file(struct inode *inode)
4208 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4211 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4213 #ifdef CONFIG_FS_ENCRYPTION
4214 file_set_encrypt(inode);
4215 f2fs_set_inode_flags(inode);
4220 * Returns true if the reads of the inode's data need to undergo some
4221 * postprocessing step, like decryption or authenticity verification.
4223 static inline bool f2fs_post_read_required(struct inode *inode)
4225 return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4226 f2fs_compressed_file(inode);
4232 #ifdef CONFIG_F2FS_FS_COMPRESSION
4233 bool f2fs_is_compressed_page(struct page *page);
4234 struct page *f2fs_compress_control_page(struct page *page);
4235 int f2fs_prepare_compress_overwrite(struct inode *inode,
4236 struct page **pagep, pgoff_t index, void **fsdata);
4237 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4238 pgoff_t index, unsigned copied);
4239 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4240 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
4241 bool f2fs_is_compress_backend_ready(struct inode *inode);
4242 bool f2fs_is_compress_level_valid(int alg, int lvl);
4243 int __init f2fs_init_compress_mempool(void);
4244 void f2fs_destroy_compress_mempool(void);
4245 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
4246 void f2fs_end_read_compressed_page(struct page *page, bool failed,
4247 block_t blkaddr, bool in_task);
4248 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4249 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4250 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
4251 int index, int nr_pages, bool uptodate);
4252 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
4253 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
4254 int f2fs_write_multi_pages(struct compress_ctx *cc,
4256 struct writeback_control *wbc,
4257 enum iostat_type io_type);
4258 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4259 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
4260 pgoff_t fofs, block_t blkaddr,
4261 unsigned int llen, unsigned int c_len);
4262 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4263 unsigned nr_pages, sector_t *last_block_in_bio,
4264 bool is_readahead, bool for_write);
4265 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4266 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
4268 void f2fs_put_page_dic(struct page *page, bool in_task);
4269 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
4270 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4271 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4272 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4273 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4274 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4275 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4276 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4277 int __init f2fs_init_compress_cache(void);
4278 void f2fs_destroy_compress_cache(void);
4279 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4280 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
4281 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4282 nid_t ino, block_t blkaddr);
4283 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4285 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4286 #define inc_compr_inode_stat(inode) \
4288 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
4289 sbi->compr_new_inode++; \
4291 #define add_compr_block_stat(inode, blocks) \
4293 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
4294 int diff = F2FS_I(inode)->i_cluster_size - blocks; \
4295 sbi->compr_written_block += blocks; \
4296 sbi->compr_saved_block += diff; \
4299 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
4300 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4302 if (!f2fs_compressed_file(inode))
4304 /* not support compression */
4307 static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
4308 static inline struct page *f2fs_compress_control_page(struct page *page)
4311 return ERR_PTR(-EINVAL);
4313 static inline int __init f2fs_init_compress_mempool(void) { return 0; }
4314 static inline void f2fs_destroy_compress_mempool(void) { }
4315 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
4317 static inline void f2fs_end_read_compressed_page(struct page *page,
4318 bool failed, block_t blkaddr, bool in_task)
4322 static inline void f2fs_put_page_dic(struct page *page, bool in_task)
4326 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
4327 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
4328 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
4329 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
4330 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
4331 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
4332 static inline int __init f2fs_init_compress_cache(void) { return 0; }
4333 static inline void f2fs_destroy_compress_cache(void) { }
4334 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
4335 block_t blkaddr) { }
4336 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
4337 struct page *page, nid_t ino, block_t blkaddr) { }
4338 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
4339 struct page *page, block_t blkaddr) { return false; }
4340 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4342 #define inc_compr_inode_stat(inode) do { } while (0)
4343 static inline void f2fs_update_read_extent_tree_range_compressed(
4344 struct inode *inode,
4345 pgoff_t fofs, block_t blkaddr,
4346 unsigned int llen, unsigned int c_len) { }
4349 static inline int set_compress_context(struct inode *inode)
4351 #ifdef CONFIG_F2FS_FS_COMPRESSION
4352 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4354 F2FS_I(inode)->i_compress_algorithm =
4355 F2FS_OPTION(sbi).compress_algorithm;
4356 F2FS_I(inode)->i_log_cluster_size =
4357 F2FS_OPTION(sbi).compress_log_size;
4358 F2FS_I(inode)->i_compress_flag =
4359 F2FS_OPTION(sbi).compress_chksum ?
4360 BIT(COMPRESS_CHKSUM) : 0;
4361 F2FS_I(inode)->i_cluster_size =
4362 BIT(F2FS_I(inode)->i_log_cluster_size);
4363 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
4364 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
4365 F2FS_OPTION(sbi).compress_level)
4366 F2FS_I(inode)->i_compress_level =
4367 F2FS_OPTION(sbi).compress_level;
4368 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4369 set_inode_flag(inode, FI_COMPRESSED_FILE);
4370 stat_inc_compr_inode(inode);
4371 inc_compr_inode_stat(inode);
4372 f2fs_mark_inode_dirty_sync(inode, true);
4379 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4381 struct f2fs_inode_info *fi = F2FS_I(inode);
4383 if (!f2fs_compressed_file(inode))
4385 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
4388 fi->i_flags &= ~F2FS_COMPR_FL;
4389 stat_dec_compr_inode(inode);
4390 clear_inode_flag(inode, FI_COMPRESSED_FILE);
4391 f2fs_mark_inode_dirty_sync(inode, true);
4395 #define F2FS_FEATURE_FUNCS(name, flagname) \
4396 static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4398 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4401 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4402 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4403 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4404 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4405 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4406 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4407 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4408 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4409 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4410 F2FS_FEATURE_FUNCS(verity, VERITY);
4411 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4412 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4413 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4414 F2FS_FEATURE_FUNCS(readonly, RO);
4416 #ifdef CONFIG_BLK_DEV_ZONED
4417 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4420 unsigned int zno = blkaddr / sbi->blocks_per_blkz;
4422 return test_bit(zno, FDEV(devi).blkz_seq);
4426 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4428 return f2fs_sb_has_blkzoned(sbi);
4431 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4433 return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
4436 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4440 if (!f2fs_is_multi_device(sbi))
4441 return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4443 for (i = 0; i < sbi->s_ndevs; i++)
4444 if (f2fs_bdev_support_discard(FDEV(i).bdev))
4449 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4451 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4452 f2fs_hw_should_discard(sbi);
4455 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4459 if (!f2fs_is_multi_device(sbi))
4460 return bdev_read_only(sbi->sb->s_bdev);
4462 for (i = 0; i < sbi->s_ndevs; i++)
4463 if (bdev_read_only(FDEV(i).bdev))
4468 static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi)
4470 return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi);
4473 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4475 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4478 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
4480 return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
4483 static inline bool f2fs_may_compress(struct inode *inode)
4485 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4486 f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode))
4488 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4491 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4492 u64 blocks, bool add)
4494 struct f2fs_inode_info *fi = F2FS_I(inode);
4495 int diff = fi->i_cluster_size - blocks;
4497 /* don't update i_compr_blocks if saved blocks were released */
4498 if (!add && !atomic_read(&fi->i_compr_blocks))
4502 atomic_add(diff, &fi->i_compr_blocks);
4503 stat_add_compr_blocks(inode, diff);
4505 atomic_sub(diff, &fi->i_compr_blocks);
4506 stat_sub_compr_blocks(inode, diff);
4508 f2fs_mark_inode_dirty_sync(inode, true);
4511 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
4514 if (!f2fs_is_multi_device(sbi))
4516 if (flag != F2FS_GET_BLOCK_DIO)
4518 return sbi->aligned_blksize;
4521 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4523 return fsverity_active(inode) &&
4524 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4527 #ifdef CONFIG_F2FS_FAULT_INJECTION
4528 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4531 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
4534 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4537 if (f2fs_sb_has_quota_ino(sbi))
4539 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4540 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4541 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4547 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
4549 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
4552 static inline void f2fs_io_schedule_timeout(long timeout)
4554 set_current_state(TASK_UNINTERRUPTIBLE);
4555 io_schedule_timeout(timeout);
4558 static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
4559 enum page_type type)
4561 if (unlikely(f2fs_cp_error(sbi)))
4564 if (ofs == sbi->page_eio_ofs[type]) {
4565 if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
4566 set_ckpt_flags(sbi, CP_ERROR_FLAG);
4568 sbi->page_eio_ofs[type] = ofs;
4569 sbi->page_eio_cnt[type] = 0;
4573 static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
4575 return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
4578 #define EFSBADCRC EBADMSG /* Bad CRC detected */
4579 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
4581 #endif /* _LINUX_F2FS_H */