#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/fscrypto.h>
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#include <linux/fscrypt_supp.h>
+#else
+#include <linux/fscrypt_notsupp.h>
+#endif
#include <crypto/hash.h>
#ifdef CONFIG_F2FS_CHECK_FS
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
#define F2FS_SET_FEATURE(sb, mask) \
- F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask)
+ (F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask))
#define F2FS_CLEAR_FEATURE(sb, mask) \
- F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)
+ (F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask))
/*
* For checkpoint manager
CP_DISCARD,
};
- #define DEF_BATCHED_TRIM_SECTIONS 2
+ #define DEF_BATCHED_TRIM_SECTIONS 2048
#define BATCHED_TRIM_SEGMENTS(sbi) \
(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
+ #define MAX_DISCARD_BLOCKS(sbi) \
+ ((1 << (sbi)->log_blocks_per_seg) * (sbi)->segs_per_sec)
+ #define DISCARD_ISSUE_RATE 8
#define DEF_CP_INTERVAL 60 /* 60 secs */
#define DEF_IDLE_INTERVAL 5 /* 5 secs */
int len; /* # of consecutive blocks of the discard */
};
- struct bio_entry {
- struct list_head list;
- struct bio *bio;
- struct completion event;
- int error;
+ enum {
+ D_PREP,
+ D_SUBMIT,
+ D_DONE,
+ };
+
+ struct discard_cmd {
+ struct list_head list; /* command list */
+ struct completion wait; /* compleation */
+ block_t lstart; /* logical start address */
+ block_t len; /* length */
+ struct bio *bio; /* bio */
+ int state; /* state */
+ };
+
+ struct discard_cmd_control {
+ struct task_struct *f2fs_issue_discard; /* discard thread */
+ struct list_head discard_entry_list; /* 4KB discard entry list */
+ int nr_discards; /* # of discards in the list */
+ struct list_head discard_cmd_list; /* discard cmd list */
+ wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
+ struct mutex cmd_lock;
+ int max_discards; /* max. discards to be issued */
+ atomic_t submit_discard; /* # of issued discard */
};
/* for the list of fsync inodes, used only during recovery */
static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
{
int before = nats_in_cursum(journal);
+
journal->n_nats = cpu_to_le16(before + i);
return before;
}
static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
{
int before = sits_in_cursum(journal);
+
journal->n_sits = cpu_to_le16(before + i);
return before;
}
if (type == 1) {
struct f2fs_dentry_block *t = (struct f2fs_dentry_block *)src;
+
d->max = NR_DENTRY_IN_BLOCK;
d->bitmap = &t->dentry_bitmap;
d->dentry = t->dentry;
d->filename = t->filename;
} else {
struct f2fs_inline_dentry *t = (struct f2fs_inline_dentry *)src;
+
d->max = NR_INLINE_DENTRY;
d->bitmap = &t->dentry_bitmap;
d->dentry = t->dentry;
atomic_t dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */
+ struct task_struct *task; /* lookup and create consistency */
nid_t i_xattr_nid; /* node id that contains xattrs */
- unsigned long long xattr_ver; /* cp version of xattr modification */
loff_t last_disk_size; /* lastly written file size */
struct list_head dirty_list; /* dirty list for dirs and files */
ei->len = len;
}
- static inline bool __is_extent_same(struct extent_info *ei1,
- struct extent_info *ei2)
- {
- return (ei1->fofs == ei2->fofs && ei1->blk == ei2->blk &&
- ei1->len == ei2->len);
- }
-
static inline bool __is_extent_mergeable(struct extent_info *back,
struct extent_info *front)
{
return __is_extent_mergeable(cur, front);
}
- extern void f2fs_mark_inode_dirty_sync(struct inode *, bool);
+ extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
static inline void __try_update_largest_extent(struct inode *inode,
struct extent_tree *et, struct extent_node *en)
{
struct list_head nat_entries; /* cached nat entry list (clean) */
unsigned int nat_cnt; /* the # of cached nat entries */
unsigned int dirty_nat_cnt; /* total num of nat entries in set */
+ unsigned int nat_blocks; /* # of nat blocks */
/* free node ids management */
struct radix_tree_root free_nid_root;/* root of the free_nid cache */
unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */
spinlock_t nid_list_lock; /* protect nid lists ops */
struct mutex build_lock; /* lock for build free nids */
+ unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
+ unsigned char *nat_block_bitmap;
/* for checkpoint */
char *nat_bitmap; /* NAT bitmap pointer */
+
+ unsigned int nat_bits_blocks; /* # of nat bits blocks */
+ unsigned char *nat_bits; /* NAT bits blocks */
+ unsigned char *full_nat_bits; /* full NAT pages */
+ unsigned char *empty_nat_bits; /* empty NAT pages */
+ #ifdef CONFIG_F2FS_CHECK_FS
+ char *nat_bitmap_mir; /* NAT bitmap mirror */
+ #endif
int bitmap_size; /* bitmap size */
};
/* a threshold to reclaim prefree segments */
unsigned int rec_prefree_segments;
- /* for small discard management */
- struct list_head discard_list; /* 4KB discard list */
- struct list_head wait_list; /* linked with issued discard bio */
- int nr_discards; /* # of discards in the list */
- int max_discards; /* max. discards to be issued */
-
/* for batched trimming */
unsigned int trim_sections; /* # of sections to trim */
unsigned int min_fsync_blocks; /* threshold for fsync */
/* for flush command control */
- struct flush_cmd_control *cmd_control_info;
+ struct flush_cmd_control *fcc_info;
+ /* for discard command control */
+ struct discard_cmd_control *dcc_info;
};
/*
block_t old_blkaddr; /* old block address before Cow */
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
+ bool submitted; /* indicate IO submission */
};
#define is_read_io(rw) (rw == READ)
MAX_TIME,
};
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
-#define F2FS_KEY_DESC_PREFIX "f2fs:"
-#define F2FS_KEY_DESC_PREFIX_SIZE 5
-#endif
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
int valid_super_block; /* valid super block no */
unsigned long s_flag; /* flags for sbi */
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
- u8 key_prefix_size;
-#endif
-
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
struct f2fs_bio_info read_io; /* for read bios */
struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */
struct mutex wio_mutex[NODE + 1]; /* bio ordering for NODE/DATA */
+ int write_io_size_bits; /* Write IO size bits */
+ mempool_t *write_io_dummy; /* Dummy pages */
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
/* for extent tree cache */
struct radix_tree_root extent_tree_root;/* cache extent cache entries */
- struct rw_semaphore extent_tree_lock; /* locking extent radix tree */
+ struct mutex extent_tree_lock; /* locking extent radix tree */
struct list_head extent_list; /* lru list for shrinker */
spinlock_t extent_lock; /* locking extent lru list */
atomic_t total_ext_tree; /* extent tree count */
struct f2fs_gc_kthread *gc_thread; /* GC thread */
unsigned int cur_victim_sec; /* current victim section num */
+ /* threshold for converting bg victims for fg */
+ u64 fggc_threshold;
+
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
atomic_t inline_xattr; /* # of inline_xattr inodes */
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
+ atomic_t aw_cnt; /* # of atomic writes */
+ atomic_t max_aw_cnt; /* max # of atomic writes */
int bg_gc; /* background gc calls */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
#endif
};
#ifdef CONFIG_F2FS_FAULT_INJECTION
+ #define f2fs_show_injection_info(type) \
+ printk("%sF2FS-fs : inject %s in %s of %pF\n", \
+ KERN_INFO, fault_name[type], \
+ __func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
struct f2fs_fault_info *ffi = &sbi->fault_info;
atomic_inc(&ffi->inject_ops);
if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
atomic_set(&ffi->inject_ops, 0);
- printk("%sF2FS-fs : inject %s in %pF\n",
- KERN_INFO,
- fault_name[type],
- __builtin_return_address(0));
return true;
}
return false;
return le64_to_cpu(cp->checkpoint_ver);
}
+ static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
+ {
+ size_t crc_offset = le32_to_cpu(cp->checksum_offset);
+ return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
+ }
+
static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
spin_unlock(&sbi->cp_lock);
}
+ static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
+ {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+
+ if (lock)
+ spin_lock(&sbi->cp_lock);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
+ kfree(NM_I(sbi)->nat_bits);
+ NM_I(sbi)->nat_bits = NULL;
+ if (lock)
+ spin_unlock(&sbi->cp_lock);
+ }
+
+ static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
+ struct cp_control *cpc)
+ {
+ bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+
+ return (cpc) ? (cpc->reason == CP_UMOUNT) && set : set;
+ }
+
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
down_read(&sbi->cp_rwsem);
blkcnt_t diff;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(sbi, FAULT_BLOCK))
+ if (time_to_inject(sbi, FAULT_BLOCK)) {
+ f2fs_show_injection_info(FAULT_BLOCK);
return false;
+ }
#endif
/*
* let's increase this in prior to actual block count change in order
{
#ifdef CONFIG_F2FS_FAULT_INJECTION
struct page *page = find_lock_page(mapping, index);
+
if (page)
return page;
- if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
+ f2fs_show_injection_info(FAULT_PAGE_ALLOC);
return NULL;
+ }
#endif
if (!for_write)
return grab_cache_page(mapping, index);
static inline bool IS_INODE(struct page *page)
{
struct f2fs_node *p = F2FS_NODE(page);
+
return RAW_IS_INODE(p);
}
{
struct f2fs_node *raw_node;
__le32 *addr_array;
+
raw_node = F2FS_NODE(node_page);
addr_array = blkaddr_in_node(raw_node);
return le32_to_cpu(addr_array[offset]);
FI_UPDATE_WRITE, /* inode has in-place-update data */
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
+ FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
FI_VOLATILE_FILE, /* indicate volatile file */
FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
FI_DROP_CACHE, /* drop dirty page cache */
FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_DO_DEFRAG, /* indicate defragment is running */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
+ FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
};
static inline void __mark_inode_dirty_flag(struct inode *inode,
static inline void *inline_xattr_addr(struct page *page)
{
struct f2fs_inode *ri = F2FS_INODE(page);
+
return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
F2FS_INLINE_XATTR_ADDRS]);
}
return is_inode_flag_set(inode, FI_ATOMIC_FILE);
}
+ static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
+ {
+ return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
+ }
+
static inline bool f2fs_is_volatile_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_VOLATILE_FILE);
static inline void *inline_data_addr(struct page *page)
{
struct f2fs_inode *ri = F2FS_INODE(page);
+
return (void *)&(ri->i_addr[1]);
}
size_t size, gfp_t flags)
{
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(sbi, FAULT_KMALLOC))
+ if (time_to_inject(sbi, FAULT_KMALLOC)) {
+ f2fs_show_injection_info(FAULT_KMALLOC);
return NULL;
+ }
#endif
return kmalloc(size, flags);
}
/*
* file.c
*/
- int f2fs_sync_file(struct file *, loff_t, loff_t, int);
- void truncate_data_blocks(struct dnode_of_data *);
- int truncate_blocks(struct inode *, u64, bool);
- int f2fs_truncate(struct inode *);
- int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
- int f2fs_setattr(struct dentry *, struct iattr *);
- int truncate_hole(struct inode *, pgoff_t, pgoff_t);
- int truncate_data_blocks_range(struct dnode_of_data *, int);
- long f2fs_ioctl(struct file *, unsigned int, unsigned long);
- long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
+ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
+ void truncate_data_blocks(struct dnode_of_data *dn);
+ int truncate_blocks(struct inode *inode, u64 from, bool lock);
+ int f2fs_truncate(struct inode *inode);
+ int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat);
+ int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
+ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
+ int truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/*
* inode.c
*/
- void f2fs_set_inode_flags(struct inode *);
- struct inode *f2fs_iget(struct super_block *, unsigned long);
- struct inode *f2fs_iget_retry(struct super_block *, unsigned long);
- int try_to_free_nats(struct f2fs_sb_info *, int);
- int update_inode(struct inode *, struct page *);
- int update_inode_page(struct inode *);
- int f2fs_write_inode(struct inode *, struct writeback_control *);
- void f2fs_evict_inode(struct inode *);
- void handle_failed_inode(struct inode *);
+ void f2fs_set_inode_flags(struct inode *inode);
+ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
+ struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
+ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
+ int update_inode(struct inode *inode, struct page *node_page);
+ int update_inode_page(struct inode *inode);
+ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
+ void f2fs_evict_inode(struct inode *inode);
+ void handle_failed_inode(struct inode *inode);
/*
* namei.c
/*
* dir.c
*/
- void set_de_type(struct f2fs_dir_entry *, umode_t);
- unsigned char get_de_type(struct f2fs_dir_entry *);
- struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
- f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
- int f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
- unsigned int, struct fscrypt_str *);
- void do_make_empty_dir(struct inode *, struct inode *,
- struct f2fs_dentry_ptr *);
- struct page *init_inode_metadata(struct inode *, struct inode *,
- const struct qstr *, const struct qstr *, struct page *);
- void update_parent_metadata(struct inode *, struct inode *, unsigned int);
- int room_for_filename(const void *, int, int);
- void f2fs_drop_nlink(struct inode *, struct inode *);
- struct f2fs_dir_entry *__f2fs_find_entry(struct inode *, struct fscrypt_name *,
- struct page **);
- struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
- struct page **);
- struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
- ino_t f2fs_inode_by_name(struct inode *, const struct qstr *, struct page **);
- void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
- struct page *, struct inode *);
- int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
- void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
- const struct qstr *, f2fs_hash_t , unsigned int);
- int f2fs_add_regular_entry(struct inode *, const struct qstr *,
- const struct qstr *, struct inode *, nid_t, umode_t);
- int __f2fs_do_add_link(struct inode *, struct fscrypt_name*, struct inode *,
- nid_t, umode_t);
- int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
- umode_t);
- void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
- struct inode *);
- int f2fs_do_tmpfile(struct inode *, struct inode *);
- bool f2fs_empty_dir(struct inode *);
+ void set_de_type(struct f2fs_dir_entry *de, umode_t mode);
+ unsigned char get_de_type(struct f2fs_dir_entry *de);
+ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
+ f2fs_hash_t namehash, int *max_slots,
+ struct f2fs_dentry_ptr *d);
+ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ unsigned int start_pos, struct fscrypt_str *fstr);
+ void do_make_empty_dir(struct inode *inode, struct inode *parent,
+ struct f2fs_dentry_ptr *d);
+ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
+ const struct qstr *new_name,
+ const struct qstr *orig_name, struct page *dpage);
+ void update_parent_metadata(struct inode *dir, struct inode *inode,
+ unsigned int current_depth);
+ int room_for_filename(const void *bitmap, int slots, int max_slots);
+ void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
+ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page);
+ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
+ const struct qstr *child, struct page **res_page);
+ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
+ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
+ struct page **page);
+ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
+ struct page *page, struct inode *inode);
+ int update_dent_inode(struct inode *inode, struct inode *to,
+ const struct qstr *name);
+ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
+ const struct qstr *name, f2fs_hash_t name_hash,
+ unsigned int bit_pos);
+ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
+ struct inode *inode, nid_t ino, umode_t mode);
+ int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+ struct inode *inode, nid_t ino, umode_t mode);
+ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+ struct inode *inode, nid_t ino, umode_t mode);
+ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+ struct inode *dir, struct inode *inode);
+ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
+ bool f2fs_empty_dir(struct inode *dir);
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
{
/*
* super.c
*/
- int f2fs_inode_dirtied(struct inode *, bool);
- void f2fs_inode_synced(struct inode *);
- int f2fs_commit_super(struct f2fs_sb_info *, bool);
- int f2fs_sync_fs(struct super_block *, int);
+ int f2fs_inode_dirtied(struct inode *inode, bool sync);
+ void f2fs_inode_synced(struct inode *inode);
+ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
+ int f2fs_sync_fs(struct super_block *sb, int sync);
extern __printf(3, 4)
- void f2fs_msg(struct super_block *, const char *, const char *, ...);
+ void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
int sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
*/
- f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
+ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info);
/*
* node.c
struct dnode_of_data;
struct node_info;
- bool available_free_memory(struct f2fs_sb_info *, int);
- int need_dentry_mark(struct f2fs_sb_info *, nid_t);
- bool is_checkpointed_node(struct f2fs_sb_info *, nid_t);
- bool need_inode_block_update(struct f2fs_sb_info *, nid_t);
- void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
- pgoff_t get_next_page_offset(struct dnode_of_data *, pgoff_t);
- int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
- int truncate_inode_blocks(struct inode *, pgoff_t);
- int truncate_xattr_node(struct inode *, struct page *);
- int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
- int remove_inode_page(struct inode *);
- struct page *new_inode_page(struct inode *);
- struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
- void ra_node_page(struct f2fs_sb_info *, nid_t);
- struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
- struct page *get_node_page_ra(struct page *, int);
- void move_node_page(struct page *, int);
- int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
- struct writeback_control *, bool);
- int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
- void build_free_nids(struct f2fs_sb_info *, bool);
- bool alloc_nid(struct f2fs_sb_info *, nid_t *);
- void alloc_nid_done(struct f2fs_sb_info *, nid_t);
- void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
- int try_to_free_nids(struct f2fs_sb_info *, int);
- void recover_inline_xattr(struct inode *, struct page *);
- void recover_xattr_data(struct inode *, struct page *, block_t);
- int recover_inode_page(struct f2fs_sb_info *, struct page *);
- int restore_node_summary(struct f2fs_sb_info *, unsigned int,
- struct f2fs_summary_block *);
- void flush_nat_entries(struct f2fs_sb_info *);
- int build_node_manager(struct f2fs_sb_info *);
- void destroy_node_manager(struct f2fs_sb_info *);
+ bool available_free_memory(struct f2fs_sb_info *sbi, int type);
+ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
+ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
+ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
+ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni);
+ pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
+ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
+ int truncate_inode_blocks(struct inode *inode, pgoff_t from);
+ int truncate_xattr_node(struct inode *inode, struct page *page);
+ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
+ int remove_inode_page(struct inode *inode);
+ struct page *new_inode_page(struct inode *inode);
+ struct page *new_node_page(struct dnode_of_data *dn,
+ unsigned int ofs, struct page *ipage);
+ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
+ struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
+ struct page *get_node_page_ra(struct page *parent, int start);
+ void move_node_page(struct page *node_page, int gc_type);
+ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct writeback_control *wbc, bool atomic);
+ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc);
+ void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
+ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
+ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
+ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
+ void recover_inline_xattr(struct inode *inode, struct page *page);
+ int recover_xattr_data(struct inode *inode, struct page *page,
+ block_t blkaddr);
+ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
+ int restore_node_summary(struct f2fs_sb_info *sbi,
+ unsigned int segno, struct f2fs_summary_block *sum);
+ void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+ int build_node_manager(struct f2fs_sb_info *sbi);
+ void destroy_node_manager(struct f2fs_sb_info *sbi);
int __init create_node_manager_caches(void);
void destroy_node_manager_caches(void);
/*
* segment.c
*/
- void register_inmem_page(struct inode *, struct page *);
- void drop_inmem_pages(struct inode *);
- int commit_inmem_pages(struct inode *);
- void f2fs_balance_fs(struct f2fs_sb_info *, bool);
- void f2fs_balance_fs_bg(struct f2fs_sb_info *);
- int f2fs_issue_flush(struct f2fs_sb_info *);
- int create_flush_cmd_control(struct f2fs_sb_info *);
- void destroy_flush_cmd_control(struct f2fs_sb_info *, bool);
- void invalidate_blocks(struct f2fs_sb_info *, block_t);
- bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
- void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
- void f2fs_wait_all_discard_bio(struct f2fs_sb_info *);
- void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
- void release_discard_addrs(struct f2fs_sb_info *);
- int npages_for_summary_flush(struct f2fs_sb_info *, bool);
- void allocate_new_segments(struct f2fs_sb_info *);
- int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
- struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
- void update_meta_page(struct f2fs_sb_info *, void *, block_t);
- void write_meta_page(struct f2fs_sb_info *, struct page *);
- void write_node_page(unsigned int, struct f2fs_io_info *);
- void write_data_page(struct dnode_of_data *, struct f2fs_io_info *);
- void rewrite_data_page(struct f2fs_io_info *);
- void __f2fs_replace_block(struct f2fs_sb_info *, struct f2fs_summary *,
- block_t, block_t, bool, bool);
- void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
- block_t, block_t, unsigned char, bool, bool);
- void allocate_data_block(struct f2fs_sb_info *, struct page *,
- block_t, block_t *, struct f2fs_summary *, int);
- void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
- void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
- void write_data_summaries(struct f2fs_sb_info *, block_t);
- void write_node_summaries(struct f2fs_sb_info *, block_t);
- int lookup_journal_in_cursum(struct f2fs_journal *, int, unsigned int, int);
- void flush_sit_entries(struct f2fs_sb_info *, struct cp_control *);
- int build_segment_manager(struct f2fs_sb_info *);
- void destroy_segment_manager(struct f2fs_sb_info *);
+ void register_inmem_page(struct inode *inode, struct page *page);
+ void drop_inmem_pages(struct inode *inode);
+ int commit_inmem_pages(struct inode *inode);
+ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
+ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi);
+ int f2fs_issue_flush(struct f2fs_sb_info *sbi);
+ int create_flush_cmd_control(struct f2fs_sb_info *sbi);
+ void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
+ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
+ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
+ void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
+ void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr);
+ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+ void release_discard_addrs(struct f2fs_sb_info *sbi);
+ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
+ void allocate_new_segments(struct f2fs_sb_info *sbi);
+ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
+ bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
+ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr);
+ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page);
+ void write_node_page(unsigned int nid, struct f2fs_io_info *fio);
+ void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio);
+ void rewrite_data_page(struct f2fs_io_info *fio);
+ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ block_t old_blkaddr, block_t new_blkaddr,
+ bool recover_curseg, bool recover_newaddr);
+ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
+ block_t old_addr, block_t new_addr,
+ unsigned char version, bool recover_curseg,
+ bool recover_newaddr);
+ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ block_t old_blkaddr, block_t *new_blkaddr,
+ struct f2fs_summary *sum, int type);
+ void f2fs_wait_on_page_writeback(struct page *page,
+ enum page_type type, bool ordered);
+ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
+ block_t blkaddr);
+ void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
+ void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
+ int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
+ unsigned int val, int alloc);
+ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+ int build_segment_manager(struct f2fs_sb_info *sbi);
+ void destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init create_segment_manager_caches(void);
void destroy_segment_manager_caches(void);
/*
* checkpoint.c
*/
- void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool);
- struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
- struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
- struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
- bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
- int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
- void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
- long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
- void add_ino_entry(struct f2fs_sb_info *, nid_t, int type);
- void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type);
- void release_ino_entry(struct f2fs_sb_info *, bool);
- bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
- int f2fs_sync_inode_meta(struct f2fs_sb_info *);
- int acquire_orphan_inode(struct f2fs_sb_info *);
- void release_orphan_inode(struct f2fs_sb_info *);
- void add_orphan_inode(struct inode *);
- void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
- int recover_orphan_inodes(struct f2fs_sb_info *);
- int get_valid_checkpoint(struct f2fs_sb_info *);
- void update_dirty_page(struct inode *, struct page *);
- void remove_dirty_inode(struct inode *);
- int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type);
- int write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
- void init_ino_entry_info(struct f2fs_sb_info *);
+ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
+ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+ struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
+ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type);
+ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+ int type, bool sync);
+ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
+ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
+ long nr_to_write);
+ void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
+ void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
+ void release_ino_entry(struct f2fs_sb_info *sbi, bool all);
+ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
+ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
+ int acquire_orphan_inode(struct f2fs_sb_info *sbi);
+ void release_orphan_inode(struct f2fs_sb_info *sbi);
+ void add_orphan_inode(struct inode *inode);
+ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
+ int recover_orphan_inodes(struct f2fs_sb_info *sbi);
+ int get_valid_checkpoint(struct f2fs_sb_info *sbi);
+ void update_dirty_page(struct inode *inode, struct page *page);
+ void remove_dirty_inode(struct inode *inode);
+ int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+ void init_ino_entry_info(struct f2fs_sb_info *sbi);
int __init create_checkpoint_caches(void);
void destroy_checkpoint_caches(void);
/*
* data.c
*/
- void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
- void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
- struct page *, nid_t, enum page_type, int);
- void f2fs_flush_merged_bios(struct f2fs_sb_info *);
- int f2fs_submit_page_bio(struct f2fs_io_info *);
- void f2fs_submit_page_mbio(struct f2fs_io_info *);
- struct block_device *f2fs_target_device(struct f2fs_sb_info *,
- block_t, struct bio *);
- int f2fs_target_device_index(struct f2fs_sb_info *, block_t);
- void set_data_blkaddr(struct dnode_of_data *);
- void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
- int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
- int reserve_new_block(struct dnode_of_data *);
- int f2fs_get_block(struct dnode_of_data *, pgoff_t);
- int f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
- int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
- struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
- struct page *find_data_page(struct inode *, pgoff_t);
- struct page *get_lock_data_page(struct inode *, pgoff_t, bool);
- struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
- int do_write_data_page(struct f2fs_io_info *);
- int f2fs_map_blocks(struct inode *, struct f2fs_map_blocks *, int, int);
- int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
- void f2fs_set_page_dirty_nobuffers(struct page *);
- void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
- int f2fs_release_page(struct page *, gfp_t);
+ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
+ int rw);
+ void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
+ struct inode *inode, nid_t ino, pgoff_t idx,
+ enum page_type type, int rw);
+ void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi);
+ int f2fs_submit_page_bio(struct f2fs_io_info *fio);
+ int f2fs_submit_page_mbio(struct f2fs_io_info *fio);
+ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio);
+ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
+ void set_data_blkaddr(struct dnode_of_data *dn);
+ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
+ int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
+ int reserve_new_block(struct dnode_of_data *dn);
+ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
+ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
+ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
+ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
+ int op_flags, bool for_write);
+ struct page *find_data_page(struct inode *inode, pgoff_t index);
+ struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
+ bool for_write);
+ struct page *get_new_data_page(struct inode *inode,
+ struct page *ipage, pgoff_t index, bool new_i_size);
+ int do_write_data_page(struct f2fs_io_info *fio);
+ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+ int create, int flag);
+ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len);
+ void f2fs_set_page_dirty_nobuffers(struct page *page);
+ void f2fs_invalidate_page(struct page *page, unsigned int offset,
+ unsigned int length);
+ int f2fs_release_page(struct page *page, gfp_t wait);
#ifdef CONFIG_MIGRATION
- int f2fs_migrate_page(struct address_space *, struct page *, struct page *,
- enum migrate_mode);
+ int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
#endif
/*
* gc.c
*/
- int start_gc_thread(struct f2fs_sb_info *);
- void stop_gc_thread(struct f2fs_sb_info *);
- block_t start_bidx_of_node(unsigned int, struct inode *);
- int f2fs_gc(struct f2fs_sb_info *, bool, bool);
- void build_gc_manager(struct f2fs_sb_info *);
+ int start_gc_thread(struct f2fs_sb_info *sbi);
+ void stop_gc_thread(struct f2fs_sb_info *sbi);
+ block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
+ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background);
+ void build_gc_manager(struct f2fs_sb_info *sbi);
/*
* recovery.c
*/
- int recover_fsync_data(struct f2fs_sb_info *, bool);
- bool space_for_roll_forward(struct f2fs_sb_info *);
+ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
+ bool space_for_roll_forward(struct f2fs_sb_info *sbi);
/*
* debug.c
unsigned int ndirty_dirs, ndirty_files, ndirty_all;
int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, nr_wb_cp_data, nr_wb_data;
- int inline_xattr, inline_inode, inline_dir, orphans;
+ int bg_gc, nr_wb_cp_data, nr_wb_data, nr_flush, nr_discard;
+ int inline_xattr, inline_inode, inline_dir, append, update, orphans;
+ int aw_cnt, max_aw_cnt;
unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
((sbi)->block_count[(curseg)->alloc_type]++)
#define stat_inc_inplace_blocks(sbi) \
(atomic_inc(&(sbi)->inplace_count))
+ #define stat_inc_atomic_write(inode) \
+ (atomic_inc(&F2FS_I_SB(inode)->aw_cnt))
+ #define stat_dec_atomic_write(inode) \
+ (atomic_dec(&F2FS_I_SB(inode)->aw_cnt))
+ #define stat_update_max_atomic_write(inode) \
+ do { \
+ int cur = atomic_read(&F2FS_I_SB(inode)->aw_cnt); \
+ int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
+ if (cur > max) \
+ atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
+ } while (0)
#define stat_inc_seg_count(sbi, type, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
si->bg_node_blks += (gc_type == BG_GC) ? (blks) : 0; \
} while (0)
- int f2fs_build_stats(struct f2fs_sb_info *);
- void f2fs_destroy_stats(struct f2fs_sb_info *);
+ int f2fs_build_stats(struct f2fs_sb_info *sbi);
+ void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
int __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
#else
#define stat_dec_inline_inode(inode)
#define stat_inc_inline_dir(inode)
#define stat_dec_inline_dir(inode)
+ #define stat_inc_atomic_write(inode)
+ #define stat_dec_atomic_write(inode)
+ #define stat_update_max_atomic_write(inode)
#define stat_inc_seg_type(sbi, curseg)
#define stat_inc_block_count(sbi, curseg)
#define stat_inc_inplace_blocks(sbi)
/*
* inline.c
*/
- bool f2fs_may_inline_data(struct inode *);
- bool f2fs_may_inline_dentry(struct inode *);
- void read_inline_data(struct page *, struct page *);
- bool truncate_inline_inode(struct page *, u64);
- int f2fs_read_inline_data(struct inode *, struct page *);
- int f2fs_convert_inline_page(struct dnode_of_data *, struct page *);
- int f2fs_convert_inline_inode(struct inode *);
- int f2fs_write_inline_data(struct inode *, struct page *);
- bool recover_inline_data(struct inode *, struct page *);
- struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
- struct fscrypt_name *, struct page **);
- int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
- int f2fs_add_inline_entry(struct inode *, const struct qstr *,
- const struct qstr *, struct inode *, nid_t, umode_t);
- void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
- struct inode *, struct inode *);
- bool f2fs_empty_inline_dir(struct inode *);
- int f2fs_read_inline_dir(struct file *, struct dir_context *,
- struct fscrypt_str *);
- int f2fs_inline_data_fiemap(struct inode *,
- struct fiemap_extent_info *, __u64, __u64);
+ bool f2fs_may_inline_data(struct inode *inode);
+ bool f2fs_may_inline_dentry(struct inode *inode);
+ void read_inline_data(struct page *page, struct page *ipage);
+ bool truncate_inline_inode(struct page *ipage, u64 from);
+ int f2fs_read_inline_data(struct inode *inode, struct page *page);
+ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
+ int f2fs_convert_inline_inode(struct inode *inode);
+ int f2fs_write_inline_data(struct inode *inode, struct page *page);
+ bool recover_inline_data(struct inode *inode, struct page *npage);
+ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page);
+ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
+ struct page *ipage);
+ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
+ struct inode *inode, nid_t ino, umode_t mode);
+ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
+ struct inode *dir, struct inode *inode);
+ bool f2fs_empty_inline_dir(struct inode *dir);
+ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
+ struct fscrypt_str *fstr);
+ int f2fs_inline_data_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len);
/*
* shrinker.c
*/
- unsigned long f2fs_shrink_count(struct shrinker *, struct shrink_control *);
- unsigned long f2fs_shrink_scan(struct shrinker *, struct shrink_control *);
- void f2fs_join_shrinker(struct f2fs_sb_info *);
- void f2fs_leave_shrinker(struct f2fs_sb_info *);
+ unsigned long f2fs_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc);
+ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc);
+ void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
+ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
- unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
- bool f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
- void f2fs_drop_extent_tree(struct inode *);
- unsigned int f2fs_destroy_extent_node(struct inode *);
- void f2fs_destroy_extent_tree(struct inode *);
- bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
- void f2fs_update_extent_cache(struct dnode_of_data *);
+ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
+ bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
+ void f2fs_drop_extent_tree(struct inode *inode);
+ unsigned int f2fs_destroy_extent_node(struct inode *inode);
+ void f2fs_destroy_extent_tree(struct inode *inode);
+ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei);
+ void f2fs_update_extent_cache(struct dnode_of_data *dn);
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
- pgoff_t, block_t, unsigned int);
- void init_extent_cache_info(struct f2fs_sb_info *);
+ pgoff_t fofs, block_t blkaddr, unsigned int len);
+ void init_extent_cache_info(struct f2fs_sb_info *sbi);
int __init create_extent_cache(void);
void destroy_extent_cache(void);
#endif
}
-#ifndef CONFIG_F2FS_FS_ENCRYPTION
-#define fscrypt_set_d_op(i)
-#define fscrypt_get_ctx fscrypt_notsupp_get_ctx
-#define fscrypt_release_ctx fscrypt_notsupp_release_ctx
-#define fscrypt_encrypt_page fscrypt_notsupp_encrypt_page
-#define fscrypt_decrypt_page fscrypt_notsupp_decrypt_page
-#define fscrypt_decrypt_bio_pages fscrypt_notsupp_decrypt_bio_pages
-#define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page
-#define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page
-#define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range
-#define fscrypt_ioctl_set_policy fscrypt_notsupp_ioctl_set_policy
-#define fscrypt_ioctl_get_policy fscrypt_notsupp_ioctl_get_policy
-#define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context
-#define fscrypt_inherit_context fscrypt_notsupp_inherit_context
-#define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info
-#define fscrypt_put_encryption_info fscrypt_notsupp_put_encryption_info
-#define fscrypt_setup_filename fscrypt_notsupp_setup_filename
-#define fscrypt_free_filename fscrypt_notsupp_free_filename
-#define fscrypt_fname_encrypted_size fscrypt_notsupp_fname_encrypted_size
-#define fscrypt_fname_alloc_buffer fscrypt_notsupp_fname_alloc_buffer
-#define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer
-#define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr
-#define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk
-#endif
#endif
Opt_active_logs,
Opt_disable_ext_identify,
Opt_inline_xattr,
+ Opt_noinline_xattr,
Opt_inline_data,
Opt_inline_dentry,
Opt_noinline_dentry,
Opt_noinline_data,
Opt_data_flush,
Opt_mode,
+ Opt_io_size_bits,
Opt_fault_injection,
Opt_lazytime,
Opt_nolazytime,
{Opt_active_logs, "active_logs=%u"},
{Opt_disable_ext_identify, "disable_ext_identify"},
{Opt_inline_xattr, "inline_xattr"},
+ {Opt_noinline_xattr, "noinline_xattr"},
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
{Opt_noinline_dentry, "noinline_dentry"},
{Opt_noinline_data, "noinline_data"},
{Opt_data_flush, "data_flush"},
{Opt_mode, "mode=%s"},
+ {Opt_io_size_bits, "io_bits=%u"},
{Opt_fault_injection, "fault_injection=%u"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
enum {
GC_THREAD, /* struct f2fs_gc_thread */
SM_INFO, /* struct f2fs_sm_info */
+ DCC_INFO, /* struct discard_cmd_control */
NM_INFO, /* struct f2fs_nm_info */
F2FS_SBI, /* struct f2fs_sb_info */
#ifdef CONFIG_F2FS_FAULT_INJECTION
return (unsigned char *)sbi->gc_thread;
else if (struct_type == SM_INFO)
return (unsigned char *)SM_I(sbi);
+ else if (struct_type == DCC_INFO)
+ return (unsigned char *)SM_I(sbi)->dcc_info;
else if (struct_type == NM_INFO)
return (unsigned char *)NM_I(sbi);
else if (struct_type == F2FS_SBI)
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
- F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
+ F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
case Opt_inline_xattr:
set_opt(sbi, INLINE_XATTR);
break;
+ case Opt_noinline_xattr:
+ clear_opt(sbi, INLINE_XATTR);
+ break;
#else
case Opt_user_xattr:
f2fs_msg(sb, KERN_INFO,
f2fs_msg(sb, KERN_INFO,
"inline_xattr options not supported");
break;
+ case Opt_noinline_xattr:
+ f2fs_msg(sb, KERN_INFO,
+ "noinline_xattr options not supported");
+ break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
case Opt_acl:
}
kfree(name);
break;
+ case Opt_io_size_bits:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "Not support %d, larger than %d",
+ 1 << arg, BIO_MAX_PAGES);
+ return -EINVAL;
+ }
+ sbi->write_io_size_bits = arg;
+ break;
case Opt_fault_injection:
if (args->from && match_int(args, &arg))
return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, arg);
+ set_opt(sbi, FAULT_INJECTION);
#else
f2fs_msg(sb, KERN_INFO,
"FAULT_INJECTION was not selected");
return -EINVAL;
}
}
+
+ if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Should set mode=lfs with %uKB-sized IO",
+ F2FS_IO_SIZE_KB(sbi));
+ return -EINVAL;
+ }
return 0;
}
static int f2fs_drop_inode(struct inode *inode)
{
+ int ret;
/*
* This is to avoid a deadlock condition like below.
* writeback_single_inode(inode)
spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
}
+ trace_f2fs_drop_inode(inode, 0);
return 0;
}
-
- return generic_drop_inode(inode);
+ ret = generic_drop_inode(inode);
+ trace_f2fs_drop_inode(inode, ret);
+ return ret;
}
int f2fs_inode_dirtied(struct inode *inode, bool sync)
write_checkpoint(sbi, &cpc);
}
+ /* be sure to wait for any on-going discard commands */
+ f2fs_wait_discard_bio(sbi, NULL_ADDR);
+
/* write_checkpoint can update stat informaion */
f2fs_destroy_stats(sbi);
kfree(sbi->raw_super);
destroy_device_list(sbi);
-
+ mempool_destroy(sbi->write_io_dummy);
destroy_percpu_info(sbi);
kfree(sbi);
}
seq_puts(seq, ",nouser_xattr");
if (test_opt(sbi, INLINE_XATTR))
seq_puts(seq, ",inline_xattr");
+ else
+ seq_puts(seq, ",noinline_xattr");
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
if (test_opt(sbi, POSIX_ACL))
else if (test_opt(sbi, LFS))
seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
+ if (F2FS_IO_SIZE_BITS(sbi))
+ seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (test_opt(sbi, FAULT_INJECTION))
+ seq_puts(seq, ",fault_injection");
+ #endif
return 0;
}
sbi->active_logs = NR_CURSEG_TYPE;
set_opt(sbi, BG_GC);
+ set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
ctx, len, NULL);
}
-static int f2fs_key_prefix(struct inode *inode, u8 **key)
-{
- *key = F2FS_I_SB(inode)->key_prefix;
- return F2FS_I_SB(inode)->key_prefix_size;
-}
-
static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data)
{
}
static const struct fscrypt_operations f2fs_cryptops = {
+ .key_prefix = "f2fs:",
.get_context = f2fs_get_context,
- .key_prefix = f2fs_key_prefix,
.set_context = f2fs_set_context,
.is_encrypted = f2fs_encrypted_inode,
.empty_dir = f2fs_empty_dir,
mutex_init(&sbi->wio_mutex[NODE]);
mutex_init(&sbi->wio_mutex[DATA]);
spin_lock_init(&sbi->cp_lock);
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
- F2FS_KEY_DESC_PREFIX_SIZE);
- sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
-#endif
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ unsigned int max_devices = MAX_DEVICES;
int i;
- for (i = 0; i < MAX_DEVICES; i++) {
- if (!RDEV(i).path[0])
+ /* Initialize single device information */
+ if (!RDEV(0).path[0]) {
+ if (!bdev_is_zoned(sbi->sb->s_bdev))
return 0;
+ max_devices = 1;
+ }
- if (i == 0) {
- sbi->devs = kzalloc(sizeof(struct f2fs_dev_info) *
- MAX_DEVICES, GFP_KERNEL);
- if (!sbi->devs)
- return -ENOMEM;
- }
+ /*
+ * Initialize multiple devices information, or single
+ * zoned block device information.
+ */
+ sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
+ GFP_KERNEL);
+ if (!sbi->devs)
+ return -ENOMEM;
- memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
- FDEV(i).total_segments = le32_to_cpu(RDEV(i).total_segments);
- if (i == 0) {
- FDEV(i).start_blk = 0;
- FDEV(i).end_blk = FDEV(i).start_blk +
- (FDEV(i).total_segments <<
- sbi->log_blocks_per_seg) - 1 +
- le32_to_cpu(raw_super->segment0_blkaddr);
- } else {
- FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
- FDEV(i).end_blk = FDEV(i).start_blk +
- (FDEV(i).total_segments <<
- sbi->log_blocks_per_seg) - 1;
- }
+ for (i = 0; i < max_devices; i++) {
- FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+ if (i > 0 && !RDEV(i).path[0])
+ break;
+
+ if (max_devices == 1) {
+ /* Single zoned block device mount */
+ FDEV(0).bdev =
+ blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
+ sbi->sb->s_mode, sbi->sb->s_type);
+ } else {
+ /* Multi-device mount */
+ memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+ FDEV(i).total_segments =
+ le32_to_cpu(RDEV(i).total_segments);
+ if (i == 0) {
+ FDEV(i).start_blk = 0;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1 +
+ le32_to_cpu(raw_super->segment0_blkaddr);
+ } else {
+ FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1;
+ }
+ FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
sbi->sb->s_mode, sbi->sb->s_type);
+ }
if (IS_ERR(FDEV(i).bdev))
return PTR_ERR(FDEV(i).bdev);
"Failed to initialize F2FS blkzone information");
return -EINVAL;
}
+ if (max_devices == 1)
+ break;
f2fs_msg(sbi->sb, KERN_INFO,
"Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
i, FDEV(i).path,
FDEV(i).total_segments,
FDEV(i).start_blk, FDEV(i).end_blk);
}
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
return 0;
}
if (err)
goto free_options;
+ if (F2FS_IO_SIZE(sbi) > 1) {
+ sbi->write_io_dummy =
+ mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
+ if (!sbi->write_io_dummy)
+ goto free_options;
+ }
+
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode);
- goto free_options;
+ goto free_io_dummy;
}
err = get_valid_checkpoint(sbi);
sbi->valid_super_block ? 1 : 2, err);
}
+ f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
+ cur_cp_version(F2FS_CKPT(sbi)));
f2fs_update_time(sbi, CP_TIME);
f2fs_update_time(sbi, REQ_TIME);
return 0;
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
+ free_io_dummy:
+ mempool_destroy(sbi->write_io_dummy);
free_options:
destroy_percpu_info(sbi);
kfree(options);