*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
struct writeback_control *wbc,
- int npages, bool is_read)
+ int npages, bool is_read,
+ enum page_type type, enum temp_type temp)
{
struct bio *bio;
bio = f2fs_bio_alloc(sbi, npages, true);
f2fs_target_device(sbi, blk_addr, bio);
- bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
- bio->bi_private = is_read ? NULL : sbi;
+ if (is_read) {
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = NULL;
+ } else {
+ bio->bi_end_io = f2fs_write_end_io;
+ bio->bi_private = sbi;
+ bio->bi_write_hint = io_type_to_rw_hint(sbi, type, temp);
+ }
if (wbc)
wbc_init_bio(wbc, bio);
/* Allocate a new bio */
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
- 1, is_read_io(fio->op));
+ 1, is_read_io(fio->op), fio->type, fio->temp);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
goto out_fail;
}
io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
- BIO_MAX_PAGES, false);
+ BIO_MAX_PAGES, false,
+ fio->type, fio->temp);
io->fio = *fio;
}
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
int rw = iov_iter_rw(iter);
int err;
+ enum rw_hint hint = iocb->ki_hint;
+ int whint_mode = sbi->whint_mode;
err = check_direct_IO(inode, iter, offset);
if (err)
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+ if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
+ iocb->ki_hint = WRITE_LIFE_NOT_SET;
+
down_read(&F2FS_I(inode)->dio_rwsem[rw]);
err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
up_read(&F2FS_I(inode)->dio_rwsem[rw]);
if (rw == WRITE) {
+ if (whint_mode == WHINT_MODE_OFF)
+ iocb->ki_hint = hint;
if (err > 0) {
f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
err);
MAX_TIME,
};
+enum {
+ WHINT_MODE_OFF, /* not pass down write hints */
+ WHINT_MODE_USER, /* try to pass down hints given by users */
+};
+
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
char *s_qf_names[MAXQUOTAS];
int s_jquota_fmt; /* Format of quota to use */
#endif
+ /* For which write hints are passed down to block layer */
+ int whint_mode;
};
#ifdef CONFIG_F2FS_FAULT_INJECTION
int __init create_segment_manager_caches(void);
void destroy_segment_manager_caches(void);
int rw_hint_to_seg_type(enum rw_hint hint);
+enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi, enum page_type type,
+ enum temp_type temp);
/*
* checkpoint.c
}
}
+/* This returns write hints for each segment type. This hints will be
+ * passed down to block layer. There are mapping tables which depend on
+ * the mount option 'whint_mode'.
+ *
+ * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
+ *
+ * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
+ *
+ * User F2FS Block
+ * ---- ---- -----
+ * META WRITE_LIFE_NOT_SET
+ * HOT_NODE "
+ * WARM_NODE "
+ * COLD_NODE "
+ * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
+ * extension list " "
+ *
+ * -- buffered io
+ * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
+ * WRITE_LIFE_NONE " "
+ * WRITE_LIFE_MEDIUM " "
+ * WRITE_LIFE_LONG " "
+ *
+ * -- direct io
+ * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
+ * WRITE_LIFE_NONE " WRITE_LIFE_NONE
+ * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
+ * WRITE_LIFE_LONG " WRITE_LIFE_LONG
+ *
+ */
+
+enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi,
+ enum page_type type, enum temp_type temp)
+{
+ if (sbi->whint_mode == WHINT_MODE_USER) {
+ if (type == DATA) {
+ switch (temp) {
+ case COLD:
+ return WRITE_LIFE_EXTREME;
+ case HOT:
+ return WRITE_LIFE_SHORT;
+ default:
+ return WRITE_LIFE_NOT_SET;
+ }
+ } else {
+ return WRITE_LIFE_NOT_SET;
+ }
+ } else {
+ return WRITE_LIFE_NOT_SET;
+ }
+}
+
static int __get_segment_type_2(struct f2fs_io_info *fio)
{
if (fio->type == DATA)
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
+ .temp = HOT,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
.old_blkaddr = page->index,
int err;
fio->new_blkaddr = fio->old_blkaddr;
+ /* i/o temperature is needed for passing down write hints */
+ __get_segment_type(fio);
stat_inc_inplace_blocks(fio->sbi);
err = f2fs_submit_page_bio(fio);
Opt_jqfmt_vfsold,
Opt_jqfmt_vfsv0,
Opt_jqfmt_vfsv1,
+ Opt_whint,
Opt_err,
};
{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
+ {Opt_whint, "whint_mode=%s"},
{Opt_err, NULL},
};
"quota operations not supported");
break;
#endif
+ case Opt_whint:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 10 &&
+ !strncmp(name, "user-based", 10)) {
+ sbi->whint_mode = WHINT_MODE_USER;
+ } else if (strlen(name) == 3 &&
+ !strncmp(name, "off", 3)) {
+ sbi->whint_mode = WHINT_MODE_OFF;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
return -EINVAL;
}
}
+
+ /* Not pass down write hints if the number of active logs is lesser
+ * than NR_CURSEG_TYPE.
+ */
+ if (sbi->active_logs != NR_CURSEG_TYPE)
+ sbi->whint_mode = WHINT_MODE_OFF;
return 0;
}
seq_puts(seq, ",prjquota");
#endif
f2fs_show_quota_options(seq, sbi->sb);
+ if (sbi->whint_mode == WHINT_MODE_USER)
+ seq_printf(seq, ",whint_mode=%s", "user-based");
return 0;
}
/* init some FS parameters */
sbi->active_logs = NR_CURSEG_TYPE;
sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+ sbi->whint_mode = WHINT_MODE_OFF;
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
+ int old_whint_mode = sbi->whint_mode;
#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info ffi = sbi->fault_info;
#endif
need_stop_gc = true;
}
- if (*flags & SB_RDONLY) {
+ if (*flags & SB_RDONLY || sbi->whint_mode != old_whint_mode) {
writeback_inodes_sb(sb, WB_REASON_SYNC);
sync_inodes_sb(sb);
sbi->s_qf_names[i] = s_qf_names[i];
}
#endif
+ sbi->whint_mode = old_whint_mode;
sbi->mount_opt = org_mount_opt;
sbi->active_logs = active_logs;
sb->s_flags = old_sb_flags;