1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/blk-crypto.h>
18 #include <linux/swap.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
21 #include <linux/cleancache.h>
22 #include <linux/sched/signal.h>
23 #include <linux/fiemap.h>
28 #include <trace/events/f2fs.h>
30 #define NUM_PREALLOC_POST_READ_CTXS 128
32 static struct kmem_cache *bio_post_read_ctx_cache;
33 static struct kmem_cache *bio_entry_slab;
34 static mempool_t *bio_post_read_ctx_pool;
35 static struct bio_set f2fs_bioset;
37 #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39 int __init f2fs_init_bioset(void)
41 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
42 0, BIOSET_NEED_BVECS))
47 void f2fs_destroy_bioset(void)
49 bioset_exit(&f2fs_bioset);
52 static bool __is_cp_guaranteed(struct page *page)
54 struct address_space *mapping = page->mapping;
56 struct f2fs_sb_info *sbi;
61 if (f2fs_is_compressed_page(page))
64 inode = mapping->host;
65 sbi = F2FS_I_SB(inode);
67 if (inode->i_ino == F2FS_META_INO(sbi) ||
68 inode->i_ino == F2FS_NODE_INO(sbi) ||
69 S_ISDIR(inode->i_mode) ||
70 (S_ISREG(inode->i_mode) &&
71 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
77 static enum count_type __read_io_type(struct page *page)
79 struct address_space *mapping = page_file_mapping(page);
82 struct inode *inode = mapping->host;
83 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
85 if (inode->i_ino == F2FS_META_INO(sbi))
88 if (inode->i_ino == F2FS_NODE_INO(sbi))
94 /* postprocessing steps for read bios */
95 enum bio_post_read_step {
96 #ifdef CONFIG_FS_ENCRYPTION
97 STEP_DECRYPT = 1 << 0,
99 STEP_DECRYPT = 0, /* compile out the decryption-related code */
101 #ifdef CONFIG_F2FS_FS_COMPRESSION
102 STEP_DECOMPRESS = 1 << 1,
104 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
106 #ifdef CONFIG_FS_VERITY
107 STEP_VERITY = 1 << 2,
109 STEP_VERITY = 0, /* compile out the verity-related code */
113 struct bio_post_read_ctx {
115 struct f2fs_sb_info *sbi;
116 struct work_struct work;
117 unsigned int enabled_steps;
120 static void f2fs_finish_read_bio(struct bio *bio)
123 struct bvec_iter_all iter_all;
126 * Update and unlock the bio's pagecache pages, and put the
127 * decompression context for any compressed pages.
129 bio_for_each_segment_all(bv, bio, iter_all) {
130 struct page *page = bv->bv_page;
132 if (f2fs_is_compressed_page(page)) {
134 f2fs_end_read_compressed_page(page, true);
135 f2fs_put_page_dic(page);
139 /* PG_error was set if decryption or verity failed. */
140 if (bio->bi_status || PageError(page)) {
141 ClearPageUptodate(page);
142 /* will re-read again later */
143 ClearPageError(page);
145 SetPageUptodate(page);
147 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
152 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
156 static void f2fs_verify_bio(struct work_struct *work)
158 struct bio_post_read_ctx *ctx =
159 container_of(work, struct bio_post_read_ctx, work);
160 struct bio *bio = ctx->bio;
161 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
164 * fsverity_verify_bio() may call readpages() again, and while verity
165 * will be disabled for this, decryption and/or decompression may still
166 * be needed, resulting in another bio_post_read_ctx being allocated.
167 * So to prevent deadlocks we need to release the current ctx to the
168 * mempool first. This assumes that verity is the last post-read step.
170 mempool_free(ctx, bio_post_read_ctx_pool);
171 bio->bi_private = NULL;
174 * Verify the bio's pages with fs-verity. Exclude compressed pages,
175 * as those were handled separately by f2fs_end_read_compressed_page().
177 if (may_have_compressed_pages) {
179 struct bvec_iter_all iter_all;
181 bio_for_each_segment_all(bv, bio, iter_all) {
182 struct page *page = bv->bv_page;
184 if (!f2fs_is_compressed_page(page) &&
185 !PageError(page) && !fsverity_verify_page(page))
189 fsverity_verify_bio(bio);
192 f2fs_finish_read_bio(bio);
196 * If the bio's data needs to be verified with fs-verity, then enqueue the
197 * verity work for the bio. Otherwise finish the bio now.
199 * Note that to avoid deadlocks, the verity work can't be done on the
200 * decryption/decompression workqueue. This is because verifying the data pages
201 * can involve reading verity metadata pages from the file, and these verity
202 * metadata pages may be encrypted and/or compressed.
204 static void f2fs_verify_and_finish_bio(struct bio *bio)
206 struct bio_post_read_ctx *ctx = bio->bi_private;
208 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
209 INIT_WORK(&ctx->work, f2fs_verify_bio);
210 fsverity_enqueue_verify_work(&ctx->work);
212 f2fs_finish_read_bio(bio);
217 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
218 * remaining page was read by @ctx->bio.
220 * Note that a bio may span clusters (even a mix of compressed and uncompressed
221 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
222 * that the bio includes at least one compressed page. The actual decompression
223 * is done on a per-cluster basis, not a per-bio basis.
225 static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
228 struct bvec_iter_all iter_all;
229 bool all_compressed = true;
231 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
232 struct page *page = bv->bv_page;
234 /* PG_error was set if decryption failed. */
235 if (f2fs_is_compressed_page(page))
236 f2fs_end_read_compressed_page(page, PageError(page));
238 all_compressed = false;
242 * Optimization: if all the bio's pages are compressed, then scheduling
243 * the per-bio verity work is unnecessary, as verity will be fully
244 * handled at the compression cluster level.
247 ctx->enabled_steps &= ~STEP_VERITY;
250 static void f2fs_post_read_work(struct work_struct *work)
252 struct bio_post_read_ctx *ctx =
253 container_of(work, struct bio_post_read_ctx, work);
255 if (ctx->enabled_steps & STEP_DECRYPT)
256 fscrypt_decrypt_bio(ctx->bio);
258 if (ctx->enabled_steps & STEP_DECOMPRESS)
259 f2fs_handle_step_decompress(ctx);
261 f2fs_verify_and_finish_bio(ctx->bio);
264 static void f2fs_read_end_io(struct bio *bio)
266 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
267 struct bio_post_read_ctx *ctx = bio->bi_private;
269 if (time_to_inject(sbi, FAULT_READ_IO)) {
270 f2fs_show_injection_info(sbi, FAULT_READ_IO);
271 bio->bi_status = BLK_STS_IOERR;
274 if (bio->bi_status) {
275 f2fs_finish_read_bio(bio);
279 if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
280 INIT_WORK(&ctx->work, f2fs_post_read_work);
281 queue_work(ctx->sbi->post_read_wq, &ctx->work);
283 f2fs_verify_and_finish_bio(bio);
287 static void f2fs_write_end_io(struct bio *bio)
289 struct f2fs_sb_info *sbi = bio->bi_private;
290 struct bio_vec *bvec;
291 struct bvec_iter_all iter_all;
293 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
294 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
295 bio->bi_status = BLK_STS_IOERR;
298 bio_for_each_segment_all(bvec, bio, iter_all) {
299 struct page *page = bvec->bv_page;
300 enum count_type type = WB_DATA_TYPE(page);
302 if (IS_DUMMY_WRITTEN_PAGE(page)) {
303 set_page_private(page, (unsigned long)NULL);
304 ClearPagePrivate(page);
306 mempool_free(page, sbi->write_io_dummy);
308 if (unlikely(bio->bi_status))
309 f2fs_stop_checkpoint(sbi, true);
313 fscrypt_finalize_bounce_page(&page);
315 #ifdef CONFIG_F2FS_FS_COMPRESSION
316 if (f2fs_is_compressed_page(page)) {
317 f2fs_compress_write_end_io(bio, page);
322 if (unlikely(bio->bi_status)) {
323 mapping_set_error(page->mapping, -EIO);
324 if (type == F2FS_WB_CP_DATA)
325 f2fs_stop_checkpoint(sbi, true);
328 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
329 page->index != nid_of_node(page));
331 dec_page_count(sbi, type);
332 if (f2fs_in_warm_node_list(sbi, page))
333 f2fs_del_fsync_node_entry(sbi, page);
334 clear_cold_data(page);
335 end_page_writeback(page);
337 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
338 wq_has_sleeper(&sbi->cp_wait))
339 wake_up(&sbi->cp_wait);
344 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
345 block_t blk_addr, struct bio *bio)
347 struct block_device *bdev = sbi->sb->s_bdev;
350 if (f2fs_is_multi_device(sbi)) {
351 for (i = 0; i < sbi->s_ndevs; i++) {
352 if (FDEV(i).start_blk <= blk_addr &&
353 FDEV(i).end_blk >= blk_addr) {
354 blk_addr -= FDEV(i).start_blk;
361 bio_set_dev(bio, bdev);
362 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
367 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
371 if (!f2fs_is_multi_device(sbi))
374 for (i = 0; i < sbi->s_ndevs; i++)
375 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
380 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
382 struct f2fs_sb_info *sbi = fio->sbi;
385 bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
387 f2fs_target_device(sbi, fio->new_blkaddr, bio);
388 if (is_read_io(fio->op)) {
389 bio->bi_end_io = f2fs_read_end_io;
390 bio->bi_private = NULL;
392 bio->bi_end_io = f2fs_write_end_io;
393 bio->bi_private = sbi;
394 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
395 fio->type, fio->temp);
398 wbc_init_bio(fio->io_wbc, bio);
403 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
405 const struct f2fs_io_info *fio,
409 * The f2fs garbage collector sets ->encrypted_page when it wants to
410 * read/write raw data without encryption.
412 if (!fio || !fio->encrypted_page)
413 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
416 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
418 const struct f2fs_io_info *fio)
421 * The f2fs garbage collector sets ->encrypted_page when it wants to
422 * read/write raw data without encryption.
424 if (fio && fio->encrypted_page)
425 return !bio_has_crypt_ctx(bio);
427 return fscrypt_mergeable_bio(bio, inode, next_idx);
430 static inline void __submit_bio(struct f2fs_sb_info *sbi,
431 struct bio *bio, enum page_type type)
433 if (!is_read_io(bio_op(bio))) {
436 if (type != DATA && type != NODE)
439 if (f2fs_lfs_mode(sbi) && current->plug)
440 blk_finish_plug(current->plug);
442 if (!F2FS_IO_ALIGNED(sbi))
445 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
446 start %= F2FS_IO_SIZE(sbi);
451 /* fill dummy pages */
452 for (; start < F2FS_IO_SIZE(sbi); start++) {
454 mempool_alloc(sbi->write_io_dummy,
455 GFP_NOIO | __GFP_NOFAIL);
456 f2fs_bug_on(sbi, !page);
458 zero_user_segment(page, 0, PAGE_SIZE);
459 SetPagePrivate(page);
460 set_page_private(page, DUMMY_WRITTEN_PAGE);
462 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
466 * In the NODE case, we lose next block address chain. So, we
467 * need to do checkpoint in f2fs_sync_file.
470 set_sbi_flag(sbi, SBI_NEED_CP);
473 if (is_read_io(bio_op(bio)))
474 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
476 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
480 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
481 struct bio *bio, enum page_type type)
483 __submit_bio(sbi, bio, type);
486 static void __attach_io_flag(struct f2fs_io_info *fio)
488 struct f2fs_sb_info *sbi = fio->sbi;
489 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
490 unsigned int io_flag, fua_flag, meta_flag;
492 if (fio->type == DATA)
493 io_flag = sbi->data_io_flag;
494 else if (fio->type == NODE)
495 io_flag = sbi->node_io_flag;
499 fua_flag = io_flag & temp_mask;
500 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
503 * data/node io flag bits per temp:
504 * REQ_META | REQ_FUA |
505 * 5 | 4 | 3 | 2 | 1 | 0 |
506 * Cold | Warm | Hot | Cold | Warm | Hot |
508 if ((1 << fio->temp) & meta_flag)
509 fio->op_flags |= REQ_META;
510 if ((1 << fio->temp) & fua_flag)
511 fio->op_flags |= REQ_FUA;
514 static void __submit_merged_bio(struct f2fs_bio_info *io)
516 struct f2fs_io_info *fio = &io->fio;
521 __attach_io_flag(fio);
522 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
524 if (is_read_io(fio->op))
525 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
527 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
529 __submit_bio(io->sbi, io->bio, fio->type);
533 static bool __has_merged_page(struct bio *bio, struct inode *inode,
534 struct page *page, nid_t ino)
536 struct bio_vec *bvec;
537 struct bvec_iter_all iter_all;
542 if (!inode && !page && !ino)
545 bio_for_each_segment_all(bvec, bio, iter_all) {
546 struct page *target = bvec->bv_page;
548 if (fscrypt_is_bounce_page(target)) {
549 target = fscrypt_pagecache_page(target);
553 if (f2fs_is_compressed_page(target)) {
554 target = f2fs_compress_control_page(target);
559 if (inode && inode == target->mapping->host)
561 if (page && page == target)
563 if (ino && ino == ino_of_node(target))
570 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
571 enum page_type type, enum temp_type temp)
573 enum page_type btype = PAGE_TYPE_OF_BIO(type);
574 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
576 down_write(&io->io_rwsem);
578 /* change META to META_FLUSH in the checkpoint procedure */
579 if (type >= META_FLUSH) {
580 io->fio.type = META_FLUSH;
581 io->fio.op = REQ_OP_WRITE;
582 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
583 if (!test_opt(sbi, NOBARRIER))
584 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
586 __submit_merged_bio(io);
587 up_write(&io->io_rwsem);
590 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
591 struct inode *inode, struct page *page,
592 nid_t ino, enum page_type type, bool force)
597 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
599 enum page_type btype = PAGE_TYPE_OF_BIO(type);
600 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
602 down_read(&io->io_rwsem);
603 ret = __has_merged_page(io->bio, inode, page, ino);
604 up_read(&io->io_rwsem);
607 __f2fs_submit_merged_write(sbi, type, temp);
609 /* TODO: use HOT temp only for meta pages now. */
615 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
617 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
620 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
621 struct inode *inode, struct page *page,
622 nid_t ino, enum page_type type)
624 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
627 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
629 f2fs_submit_merged_write(sbi, DATA);
630 f2fs_submit_merged_write(sbi, NODE);
631 f2fs_submit_merged_write(sbi, META);
635 * Fill the locked page with data located in the block address.
636 * A caller needs to unlock the page on failure.
638 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
641 struct page *page = fio->encrypted_page ?
642 fio->encrypted_page : fio->page;
644 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
645 fio->is_por ? META_POR : (__is_meta_io(fio) ?
646 META_GENERIC : DATA_GENERIC_ENHANCE)))
647 return -EFSCORRUPTED;
649 trace_f2fs_submit_page_bio(page, fio);
651 /* Allocate a new bio */
652 bio = __bio_alloc(fio, 1);
654 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
655 fio->page->index, fio, GFP_NOIO);
657 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
662 if (fio->io_wbc && !is_read_io(fio->op))
663 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
665 __attach_io_flag(fio);
666 bio_set_op_attrs(bio, fio->op, fio->op_flags);
668 inc_page_count(fio->sbi, is_read_io(fio->op) ?
669 __read_io_type(page): WB_DATA_TYPE(fio->page));
671 __submit_bio(fio->sbi, bio, fio->type);
675 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
676 block_t last_blkaddr, block_t cur_blkaddr)
678 if (unlikely(sbi->max_io_bytes &&
679 bio->bi_iter.bi_size >= sbi->max_io_bytes))
681 if (last_blkaddr + 1 != cur_blkaddr)
683 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
686 static bool io_type_is_mergeable(struct f2fs_bio_info *io,
687 struct f2fs_io_info *fio)
689 if (io->fio.op != fio->op)
691 return io->fio.op_flags == fio->op_flags;
694 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
695 struct f2fs_bio_info *io,
696 struct f2fs_io_info *fio,
697 block_t last_blkaddr,
700 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
701 unsigned int filled_blocks =
702 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
703 unsigned int io_size = F2FS_IO_SIZE(sbi);
704 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
706 /* IOs in bio is aligned and left space of vectors is not enough */
707 if (!(filled_blocks % io_size) && left_vecs < io_size)
710 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
712 return io_type_is_mergeable(io, fio);
715 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
716 struct page *page, enum temp_type temp)
718 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
719 struct bio_entry *be;
721 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
725 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
728 down_write(&io->bio_list_lock);
729 list_add_tail(&be->list, &io->bio_list);
730 up_write(&io->bio_list_lock);
733 static void del_bio_entry(struct bio_entry *be)
736 kmem_cache_free(bio_entry_slab, be);
739 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
742 struct f2fs_sb_info *sbi = fio->sbi;
747 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
748 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
749 struct list_head *head = &io->bio_list;
750 struct bio_entry *be;
752 down_write(&io->bio_list_lock);
753 list_for_each_entry(be, head, list) {
759 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
762 if (f2fs_crypt_mergeable_bio(*bio,
763 fio->page->mapping->host,
764 fio->page->index, fio) &&
765 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
771 /* page can't be merged into bio; submit the bio */
773 __submit_bio(sbi, *bio, DATA);
776 up_write(&io->bio_list_lock);
787 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
788 struct bio **bio, struct page *page)
792 struct bio *target = bio ? *bio : NULL;
794 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
795 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
796 struct list_head *head = &io->bio_list;
797 struct bio_entry *be;
799 if (list_empty(head))
802 down_read(&io->bio_list_lock);
803 list_for_each_entry(be, head, list) {
805 found = (target == be->bio);
807 found = __has_merged_page(be->bio, NULL,
812 up_read(&io->bio_list_lock);
819 down_write(&io->bio_list_lock);
820 list_for_each_entry(be, head, list) {
822 found = (target == be->bio);
824 found = __has_merged_page(be->bio, NULL,
832 up_write(&io->bio_list_lock);
836 __submit_bio(sbi, target, DATA);
843 int f2fs_merge_page_bio(struct f2fs_io_info *fio)
845 struct bio *bio = *fio->bio;
846 struct page *page = fio->encrypted_page ?
847 fio->encrypted_page : fio->page;
849 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
850 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
851 return -EFSCORRUPTED;
853 trace_f2fs_submit_page_bio(page, fio);
855 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
857 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
860 bio = __bio_alloc(fio, BIO_MAX_VECS);
861 __attach_io_flag(fio);
862 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
863 fio->page->index, fio, GFP_NOIO);
864 bio_set_op_attrs(bio, fio->op, fio->op_flags);
866 add_bio_entry(fio->sbi, bio, page, fio->temp);
868 if (add_ipu_page(fio, &bio, page))
873 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
875 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
877 *fio->last_block = fio->new_blkaddr;
883 void f2fs_submit_page_write(struct f2fs_io_info *fio)
885 struct f2fs_sb_info *sbi = fio->sbi;
886 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
887 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
888 struct page *bio_page;
890 f2fs_bug_on(sbi, is_read_io(fio->op));
892 down_write(&io->io_rwsem);
895 spin_lock(&io->io_lock);
896 if (list_empty(&io->io_list)) {
897 spin_unlock(&io->io_lock);
900 fio = list_first_entry(&io->io_list,
901 struct f2fs_io_info, list);
902 list_del(&fio->list);
903 spin_unlock(&io->io_lock);
906 verify_fio_blkaddr(fio);
908 if (fio->encrypted_page)
909 bio_page = fio->encrypted_page;
910 else if (fio->compressed_page)
911 bio_page = fio->compressed_page;
913 bio_page = fio->page;
915 /* set submitted = true as a return value */
916 fio->submitted = true;
918 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
921 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
923 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
924 bio_page->index, fio)))
925 __submit_merged_bio(io);
927 if (io->bio == NULL) {
928 if (F2FS_IO_ALIGNED(sbi) &&
929 (fio->type == DATA || fio->type == NODE) &&
930 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
931 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
935 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
936 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
937 bio_page->index, fio, GFP_NOIO);
941 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
942 __submit_merged_bio(io);
947 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
949 io->last_block_in_bio = fio->new_blkaddr;
951 trace_f2fs_submit_page_write(fio->page, fio);
956 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
957 !f2fs_is_checkpoint_ready(sbi))
958 __submit_merged_bio(io);
959 up_write(&io->io_rwsem);
962 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
963 unsigned nr_pages, unsigned op_flag,
964 pgoff_t first_idx, bool for_write)
966 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
968 struct bio_post_read_ctx *ctx;
969 unsigned int post_read_steps = 0;
971 bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
972 bio_max_segs(nr_pages), &f2fs_bioset);
974 return ERR_PTR(-ENOMEM);
976 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
978 f2fs_target_device(sbi, blkaddr, bio);
979 bio->bi_end_io = f2fs_read_end_io;
980 bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
982 if (fscrypt_inode_uses_fs_layer_crypto(inode))
983 post_read_steps |= STEP_DECRYPT;
985 if (f2fs_need_verity(inode, first_idx))
986 post_read_steps |= STEP_VERITY;
989 * STEP_DECOMPRESS is handled specially, since a compressed file might
990 * contain both compressed and uncompressed clusters. We'll allocate a
991 * bio_post_read_ctx if the file is compressed, but the caller is
992 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
995 if (post_read_steps || f2fs_compressed_file(inode)) {
996 /* Due to the mempool, this never fails. */
997 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1000 ctx->enabled_steps = post_read_steps;
1001 bio->bi_private = ctx;
1007 /* This can handle encryption stuffs */
1008 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1009 block_t blkaddr, int op_flags, bool for_write)
1011 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1014 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1015 page->index, for_write);
1017 return PTR_ERR(bio);
1019 /* wait for GCed page writeback via META_MAPPING */
1020 f2fs_wait_on_block_writeback(inode, blkaddr);
1022 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1026 ClearPageError(page);
1027 inc_page_count(sbi, F2FS_RD_DATA);
1028 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1029 __submit_bio(sbi, bio, DATA);
1033 static void __set_data_blkaddr(struct dnode_of_data *dn)
1035 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1039 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1040 base = get_extra_isize(dn->inode);
1042 /* Get physical address of data block */
1043 addr_array = blkaddr_in_node(rn);
1044 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1048 * Lock ordering for the change of data block address:
1051 * update block addresses in the node page
1053 void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1055 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1056 __set_data_blkaddr(dn);
1057 if (set_page_dirty(dn->node_page))
1058 dn->node_changed = true;
1061 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1063 dn->data_blkaddr = blkaddr;
1064 f2fs_set_data_blkaddr(dn);
1065 f2fs_update_extent_cache(dn);
1068 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
1069 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1071 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1077 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1079 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1082 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1083 dn->ofs_in_node, count);
1085 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1087 for (; count > 0; dn->ofs_in_node++) {
1088 block_t blkaddr = f2fs_data_blkaddr(dn);
1089 if (blkaddr == NULL_ADDR) {
1090 dn->data_blkaddr = NEW_ADDR;
1091 __set_data_blkaddr(dn);
1096 if (set_page_dirty(dn->node_page))
1097 dn->node_changed = true;
1101 /* Should keep dn->ofs_in_node unchanged */
1102 int f2fs_reserve_new_block(struct dnode_of_data *dn)
1104 unsigned int ofs_in_node = dn->ofs_in_node;
1107 ret = f2fs_reserve_new_blocks(dn, 1);
1108 dn->ofs_in_node = ofs_in_node;
1112 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1114 bool need_put = dn->inode_page ? false : true;
1117 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1121 if (dn->data_blkaddr == NULL_ADDR)
1122 err = f2fs_reserve_new_block(dn);
1123 if (err || need_put)
1128 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1130 struct extent_info ei = {0, 0, 0};
1131 struct inode *inode = dn->inode;
1133 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1134 dn->data_blkaddr = ei.blk + index - ei.fofs;
1138 return f2fs_reserve_block(dn, index);
1141 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1142 int op_flags, bool for_write)
1144 struct address_space *mapping = inode->i_mapping;
1145 struct dnode_of_data dn;
1147 struct extent_info ei = {0,0,0};
1150 page = f2fs_grab_cache_page(mapping, index, for_write);
1152 return ERR_PTR(-ENOMEM);
1154 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1155 dn.data_blkaddr = ei.blk + index - ei.fofs;
1156 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1157 DATA_GENERIC_ENHANCE_READ)) {
1158 err = -EFSCORRUPTED;
1164 set_new_dnode(&dn, inode, NULL, NULL, 0);
1165 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1168 f2fs_put_dnode(&dn);
1170 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1174 if (dn.data_blkaddr != NEW_ADDR &&
1175 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1177 DATA_GENERIC_ENHANCE)) {
1178 err = -EFSCORRUPTED;
1182 if (PageUptodate(page)) {
1188 * A new dentry page is allocated but not able to be written, since its
1189 * new inode page couldn't be allocated due to -ENOSPC.
1190 * In such the case, its blkaddr can be remained as NEW_ADDR.
1191 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1192 * f2fs_init_inode_metadata.
1194 if (dn.data_blkaddr == NEW_ADDR) {
1195 zero_user_segment(page, 0, PAGE_SIZE);
1196 if (!PageUptodate(page))
1197 SetPageUptodate(page);
1202 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1203 op_flags, for_write);
1209 f2fs_put_page(page, 1);
1210 return ERR_PTR(err);
1213 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1215 struct address_space *mapping = inode->i_mapping;
1218 page = find_get_page(mapping, index);
1219 if (page && PageUptodate(page))
1221 f2fs_put_page(page, 0);
1223 page = f2fs_get_read_data_page(inode, index, 0, false);
1227 if (PageUptodate(page))
1230 wait_on_page_locked(page);
1231 if (unlikely(!PageUptodate(page))) {
1232 f2fs_put_page(page, 0);
1233 return ERR_PTR(-EIO);
1239 * If it tries to access a hole, return an error.
1240 * Because, the callers, functions in dir.c and GC, should be able to know
1241 * whether this page exists or not.
1243 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1246 struct address_space *mapping = inode->i_mapping;
1249 page = f2fs_get_read_data_page(inode, index, 0, for_write);
1253 /* wait for read completion */
1255 if (unlikely(page->mapping != mapping)) {
1256 f2fs_put_page(page, 1);
1259 if (unlikely(!PageUptodate(page))) {
1260 f2fs_put_page(page, 1);
1261 return ERR_PTR(-EIO);
1267 * Caller ensures that this data page is never allocated.
1268 * A new zero-filled data page is allocated in the page cache.
1270 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1272 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1273 * ipage should be released by this function.
1275 struct page *f2fs_get_new_data_page(struct inode *inode,
1276 struct page *ipage, pgoff_t index, bool new_i_size)
1278 struct address_space *mapping = inode->i_mapping;
1280 struct dnode_of_data dn;
1283 page = f2fs_grab_cache_page(mapping, index, true);
1286 * before exiting, we should make sure ipage will be released
1287 * if any error occur.
1289 f2fs_put_page(ipage, 1);
1290 return ERR_PTR(-ENOMEM);
1293 set_new_dnode(&dn, inode, ipage, NULL, 0);
1294 err = f2fs_reserve_block(&dn, index);
1296 f2fs_put_page(page, 1);
1297 return ERR_PTR(err);
1300 f2fs_put_dnode(&dn);
1302 if (PageUptodate(page))
1305 if (dn.data_blkaddr == NEW_ADDR) {
1306 zero_user_segment(page, 0, PAGE_SIZE);
1307 if (!PageUptodate(page))
1308 SetPageUptodate(page);
1310 f2fs_put_page(page, 1);
1312 /* if ipage exists, blkaddr should be NEW_ADDR */
1313 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1314 page = f2fs_get_lock_data_page(inode, index, true);
1319 if (new_i_size && i_size_read(inode) <
1320 ((loff_t)(index + 1) << PAGE_SHIFT))
1321 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1325 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1327 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1328 struct f2fs_summary sum;
1329 struct node_info ni;
1330 block_t old_blkaddr;
1334 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1337 err = f2fs_get_node_info(sbi, dn->nid, &ni);
1341 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1342 if (dn->data_blkaddr != NULL_ADDR)
1345 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1349 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1350 old_blkaddr = dn->data_blkaddr;
1351 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1352 &sum, seg_type, NULL);
1353 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1354 invalidate_mapping_pages(META_MAPPING(sbi),
1355 old_blkaddr, old_blkaddr);
1356 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1359 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1360 * data from unwritten block via dio_read.
1365 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1367 struct inode *inode = file_inode(iocb->ki_filp);
1368 struct f2fs_map_blocks map;
1371 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1373 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1374 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1375 if (map.m_len > map.m_lblk)
1376 map.m_len -= map.m_lblk;
1380 map.m_next_pgofs = NULL;
1381 map.m_next_extent = NULL;
1382 map.m_seg_type = NO_CHECK_TYPE;
1383 map.m_may_create = true;
1386 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1387 flag = f2fs_force_buffered_io(inode, iocb, from) ?
1388 F2FS_GET_BLOCK_PRE_AIO :
1389 F2FS_GET_BLOCK_PRE_DIO;
1392 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1393 err = f2fs_convert_inline_inode(inode);
1397 if (f2fs_has_inline_data(inode))
1400 flag = F2FS_GET_BLOCK_PRE_AIO;
1403 err = f2fs_map_blocks(inode, &map, 1, flag);
1404 if (map.m_len > 0 && err == -ENOSPC) {
1406 set_inode_flag(inode, FI_NO_PREALLOC);
1412 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1414 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1416 down_read(&sbi->node_change);
1418 up_read(&sbi->node_change);
1423 f2fs_unlock_op(sbi);
1428 * f2fs_map_blocks() tries to find or build mapping relationship which
1429 * maps continuous logical blocks to physical blocks, and return such
1430 * info via f2fs_map_blocks structure.
1432 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1433 int create, int flag)
1435 unsigned int maxblocks = map->m_len;
1436 struct dnode_of_data dn;
1437 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1438 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1439 pgoff_t pgofs, end_offset, end;
1440 int err = 0, ofs = 1;
1441 unsigned int ofs_in_node, last_ofs_in_node;
1443 struct extent_info ei = {0,0,0};
1445 unsigned int start_pgofs;
1453 /* it only supports block size == page size */
1454 pgofs = (pgoff_t)map->m_lblk;
1455 end = pgofs + maxblocks;
1457 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1458 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1462 map->m_pblk = ei.blk + pgofs - ei.fofs;
1463 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1464 map->m_flags = F2FS_MAP_MAPPED;
1465 if (map->m_next_extent)
1466 *map->m_next_extent = pgofs + map->m_len;
1468 /* for hardware encryption, but to avoid potential issue in future */
1469 if (flag == F2FS_GET_BLOCK_DIO)
1470 f2fs_wait_on_block_writeback_range(inode,
1471 map->m_pblk, map->m_len);
1476 if (map->m_may_create)
1477 f2fs_do_map_lock(sbi, flag, true);
1479 /* When reading holes, we need its node page */
1480 set_new_dnode(&dn, inode, NULL, NULL, 0);
1481 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1483 if (flag == F2FS_GET_BLOCK_BMAP)
1485 if (err == -ENOENT) {
1487 if (map->m_next_pgofs)
1488 *map->m_next_pgofs =
1489 f2fs_get_next_page_offset(&dn, pgofs);
1490 if (map->m_next_extent)
1491 *map->m_next_extent =
1492 f2fs_get_next_page_offset(&dn, pgofs);
1497 start_pgofs = pgofs;
1499 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1500 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1503 blkaddr = f2fs_data_blkaddr(&dn);
1505 if (__is_valid_data_blkaddr(blkaddr) &&
1506 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1507 err = -EFSCORRUPTED;
1511 if (__is_valid_data_blkaddr(blkaddr)) {
1512 /* use out-place-update for driect IO under LFS mode */
1513 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1514 map->m_may_create) {
1515 err = __allocate_data_block(&dn, map->m_seg_type);
1518 blkaddr = dn.data_blkaddr;
1519 set_inode_flag(inode, FI_APPEND_WRITE);
1523 if (unlikely(f2fs_cp_error(sbi))) {
1527 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1528 if (blkaddr == NULL_ADDR) {
1530 last_ofs_in_node = dn.ofs_in_node;
1533 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1534 flag != F2FS_GET_BLOCK_DIO);
1535 err = __allocate_data_block(&dn,
1538 set_inode_flag(inode, FI_APPEND_WRITE);
1542 map->m_flags |= F2FS_MAP_NEW;
1543 blkaddr = dn.data_blkaddr;
1545 if (flag == F2FS_GET_BLOCK_BMAP) {
1549 if (flag == F2FS_GET_BLOCK_PRECACHE)
1551 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1552 blkaddr == NULL_ADDR) {
1553 if (map->m_next_pgofs)
1554 *map->m_next_pgofs = pgofs + 1;
1557 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1558 /* for defragment case */
1559 if (map->m_next_pgofs)
1560 *map->m_next_pgofs = pgofs + 1;
1566 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1569 if (map->m_len == 0) {
1570 /* preallocated unwritten block should be mapped for fiemap. */
1571 if (blkaddr == NEW_ADDR)
1572 map->m_flags |= F2FS_MAP_UNWRITTEN;
1573 map->m_flags |= F2FS_MAP_MAPPED;
1575 map->m_pblk = blkaddr;
1577 } else if ((map->m_pblk != NEW_ADDR &&
1578 blkaddr == (map->m_pblk + ofs)) ||
1579 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1580 flag == F2FS_GET_BLOCK_PRE_DIO) {
1591 /* preallocate blocks in batch for one dnode page */
1592 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1593 (pgofs == end || dn.ofs_in_node == end_offset)) {
1595 dn.ofs_in_node = ofs_in_node;
1596 err = f2fs_reserve_new_blocks(&dn, prealloc);
1600 map->m_len += dn.ofs_in_node - ofs_in_node;
1601 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1605 dn.ofs_in_node = end_offset;
1610 else if (dn.ofs_in_node < end_offset)
1613 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1614 if (map->m_flags & F2FS_MAP_MAPPED) {
1615 unsigned int ofs = start_pgofs - map->m_lblk;
1617 f2fs_update_extent_cache_range(&dn,
1618 start_pgofs, map->m_pblk + ofs,
1623 f2fs_put_dnode(&dn);
1625 if (map->m_may_create) {
1626 f2fs_do_map_lock(sbi, flag, false);
1627 f2fs_balance_fs(sbi, dn.node_changed);
1633 /* for hardware encryption, but to avoid potential issue in future */
1634 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1635 f2fs_wait_on_block_writeback_range(inode,
1636 map->m_pblk, map->m_len);
1638 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1639 if (map->m_flags & F2FS_MAP_MAPPED) {
1640 unsigned int ofs = start_pgofs - map->m_lblk;
1642 f2fs_update_extent_cache_range(&dn,
1643 start_pgofs, map->m_pblk + ofs,
1646 if (map->m_next_extent)
1647 *map->m_next_extent = pgofs + 1;
1649 f2fs_put_dnode(&dn);
1651 if (map->m_may_create) {
1652 f2fs_do_map_lock(sbi, flag, false);
1653 f2fs_balance_fs(sbi, dn.node_changed);
1656 trace_f2fs_map_blocks(inode, map, err);
1660 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1662 struct f2fs_map_blocks map;
1666 if (pos + len > i_size_read(inode))
1669 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1670 map.m_next_pgofs = NULL;
1671 map.m_next_extent = NULL;
1672 map.m_seg_type = NO_CHECK_TYPE;
1673 map.m_may_create = false;
1674 last_lblk = F2FS_BLK_ALIGN(pos + len);
1676 while (map.m_lblk < last_lblk) {
1677 map.m_len = last_lblk - map.m_lblk;
1678 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1679 if (err || map.m_len == 0)
1681 map.m_lblk += map.m_len;
1686 static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1688 return (bytes >> inode->i_blkbits);
1691 static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1693 return (blks << inode->i_blkbits);
1696 static int __get_data_block(struct inode *inode, sector_t iblock,
1697 struct buffer_head *bh, int create, int flag,
1698 pgoff_t *next_pgofs, int seg_type, bool may_write)
1700 struct f2fs_map_blocks map;
1703 map.m_lblk = iblock;
1704 map.m_len = bytes_to_blks(inode, bh->b_size);
1705 map.m_next_pgofs = next_pgofs;
1706 map.m_next_extent = NULL;
1707 map.m_seg_type = seg_type;
1708 map.m_may_create = may_write;
1710 err = f2fs_map_blocks(inode, &map, create, flag);
1712 map_bh(bh, inode->i_sb, map.m_pblk);
1713 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1714 bh->b_size = blks_to_bytes(inode, map.m_len);
1719 static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1720 struct buffer_head *bh_result, int create)
1722 return __get_data_block(inode, iblock, bh_result, create,
1723 F2FS_GET_BLOCK_DIO, NULL,
1724 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1725 IS_SWAPFILE(inode) ? false : true);
1728 static int get_data_block_dio(struct inode *inode, sector_t iblock,
1729 struct buffer_head *bh_result, int create)
1731 return __get_data_block(inode, iblock, bh_result, create,
1732 F2FS_GET_BLOCK_DIO, NULL,
1733 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1737 static int f2fs_xattr_fiemap(struct inode *inode,
1738 struct fiemap_extent_info *fieinfo)
1740 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1742 struct node_info ni;
1743 __u64 phys = 0, len;
1745 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1748 if (f2fs_has_inline_xattr(inode)) {
1751 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1752 inode->i_ino, false);
1756 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1758 f2fs_put_page(page, 1);
1762 phys = blks_to_bytes(inode, ni.blk_addr);
1763 offset = offsetof(struct f2fs_inode, i_addr) +
1764 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1765 get_inline_xattr_addrs(inode));
1768 len = inline_xattr_size(inode);
1770 f2fs_put_page(page, 1);
1772 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1775 flags |= FIEMAP_EXTENT_LAST;
1777 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1778 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1779 if (err || err == 1)
1784 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1788 err = f2fs_get_node_info(sbi, xnid, &ni);
1790 f2fs_put_page(page, 1);
1794 phys = blks_to_bytes(inode, ni.blk_addr);
1795 len = inode->i_sb->s_blocksize;
1797 f2fs_put_page(page, 1);
1799 flags = FIEMAP_EXTENT_LAST;
1803 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1804 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1807 return (err < 0 ? err : 0);
1810 static loff_t max_inode_blocks(struct inode *inode)
1812 loff_t result = ADDRS_PER_INODE(inode);
1813 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1815 /* two direct node blocks */
1816 result += (leaf_count * 2);
1818 /* two indirect node blocks */
1819 leaf_count *= NIDS_PER_BLOCK;
1820 result += (leaf_count * 2);
1822 /* one double indirect node block */
1823 leaf_count *= NIDS_PER_BLOCK;
1824 result += leaf_count;
1829 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1832 struct f2fs_map_blocks map;
1833 sector_t start_blk, last_blk;
1835 u64 logical = 0, phys = 0, size = 0;
1838 bool compr_cluster = false;
1839 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1841 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1842 ret = f2fs_precache_extents(inode);
1847 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1853 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1854 ret = f2fs_xattr_fiemap(inode, fieinfo);
1858 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1859 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1864 if (bytes_to_blks(inode, len) == 0)
1865 len = blks_to_bytes(inode, 1);
1867 start_blk = bytes_to_blks(inode, start);
1868 last_blk = bytes_to_blks(inode, start + len - 1);
1871 memset(&map, 0, sizeof(map));
1872 map.m_lblk = start_blk;
1873 map.m_len = bytes_to_blks(inode, len);
1874 map.m_next_pgofs = &next_pgofs;
1875 map.m_seg_type = NO_CHECK_TYPE;
1878 map.m_len = cluster_size - 1;
1880 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
1885 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1886 start_blk = next_pgofs;
1888 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1889 max_inode_blocks(inode)))
1892 flags |= FIEMAP_EXTENT_LAST;
1896 flags |= FIEMAP_EXTENT_MERGED;
1897 if (IS_ENCRYPTED(inode))
1898 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1900 ret = fiemap_fill_next_extent(fieinfo, logical,
1902 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1908 if (start_blk > last_blk)
1911 if (compr_cluster) {
1912 compr_cluster = false;
1915 logical = blks_to_bytes(inode, start_blk - 1);
1916 phys = blks_to_bytes(inode, map.m_pblk);
1917 size = blks_to_bytes(inode, cluster_size);
1919 flags |= FIEMAP_EXTENT_ENCODED;
1921 start_blk += cluster_size - 1;
1923 if (start_blk > last_blk)
1929 if (map.m_pblk == COMPRESS_ADDR) {
1930 compr_cluster = true;
1935 logical = blks_to_bytes(inode, start_blk);
1936 phys = blks_to_bytes(inode, map.m_pblk);
1937 size = blks_to_bytes(inode, map.m_len);
1939 if (map.m_flags & F2FS_MAP_UNWRITTEN)
1940 flags = FIEMAP_EXTENT_UNWRITTEN;
1942 start_blk += bytes_to_blks(inode, size);
1946 if (fatal_signal_pending(current))
1954 inode_unlock(inode);
1958 static inline loff_t f2fs_readpage_limit(struct inode *inode)
1960 if (IS_ENABLED(CONFIG_FS_VERITY) &&
1961 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
1962 return inode->i_sb->s_maxbytes;
1964 return i_size_read(inode);
1967 static int f2fs_read_single_page(struct inode *inode, struct page *page,
1969 struct f2fs_map_blocks *map,
1970 struct bio **bio_ret,
1971 sector_t *last_block_in_bio,
1974 struct bio *bio = *bio_ret;
1975 const unsigned blocksize = blks_to_bytes(inode, 1);
1976 sector_t block_in_file;
1977 sector_t last_block;
1978 sector_t last_block_in_file;
1982 block_in_file = (sector_t)page_index(page);
1983 last_block = block_in_file + nr_pages;
1984 last_block_in_file = bytes_to_blks(inode,
1985 f2fs_readpage_limit(inode) + blocksize - 1);
1986 if (last_block > last_block_in_file)
1987 last_block = last_block_in_file;
1989 /* just zeroing out page which is beyond EOF */
1990 if (block_in_file >= last_block)
1993 * Map blocks using the previous result first.
1995 if ((map->m_flags & F2FS_MAP_MAPPED) &&
1996 block_in_file > map->m_lblk &&
1997 block_in_file < (map->m_lblk + map->m_len))
2001 * Then do more f2fs_map_blocks() calls until we are
2002 * done with this page.
2004 map->m_lblk = block_in_file;
2005 map->m_len = last_block - block_in_file;
2007 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2011 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2012 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2013 SetPageMappedToDisk(page);
2015 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2016 !cleancache_get_page(page))) {
2017 SetPageUptodate(page);
2021 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2022 DATA_GENERIC_ENHANCE_READ)) {
2023 ret = -EFSCORRUPTED;
2028 zero_user_segment(page, 0, PAGE_SIZE);
2029 if (f2fs_need_verity(inode, page->index) &&
2030 !fsverity_verify_page(page)) {
2034 if (!PageUptodate(page))
2035 SetPageUptodate(page);
2041 * This page will go to BIO. Do we need to send this
2044 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2045 *last_block_in_bio, block_nr) ||
2046 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2048 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2052 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2053 is_readahead ? REQ_RAHEAD : 0, page->index,
2063 * If the page is under writeback, we need to wait for
2064 * its completion to see the correct decrypted data.
2066 f2fs_wait_on_block_writeback(inode, block_nr);
2068 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2069 goto submit_and_realloc;
2071 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2072 f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2073 ClearPageError(page);
2074 *last_block_in_bio = block_nr;
2078 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2087 #ifdef CONFIG_F2FS_FS_COMPRESSION
2088 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2089 unsigned nr_pages, sector_t *last_block_in_bio,
2090 bool is_readahead, bool for_write)
2092 struct dnode_of_data dn;
2093 struct inode *inode = cc->inode;
2094 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2095 struct bio *bio = *bio_ret;
2096 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2097 sector_t last_block_in_file;
2098 const unsigned blocksize = blks_to_bytes(inode, 1);
2099 struct decompress_io_ctx *dic = NULL;
2103 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2105 last_block_in_file = bytes_to_blks(inode,
2106 f2fs_readpage_limit(inode) + blocksize - 1);
2108 /* get rid of pages beyond EOF */
2109 for (i = 0; i < cc->cluster_size; i++) {
2110 struct page *page = cc->rpages[i];
2114 if ((sector_t)page->index >= last_block_in_file) {
2115 zero_user_segment(page, 0, PAGE_SIZE);
2116 if (!PageUptodate(page))
2117 SetPageUptodate(page);
2118 } else if (!PageUptodate(page)) {
2122 cc->rpages[i] = NULL;
2126 /* we are done since all pages are beyond EOF */
2127 if (f2fs_cluster_is_empty(cc))
2130 set_new_dnode(&dn, inode, NULL, NULL, 0);
2131 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2135 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2137 for (i = 1; i < cc->cluster_size; i++) {
2140 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2141 dn.ofs_in_node + i);
2143 if (!__is_valid_data_blkaddr(blkaddr))
2146 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2153 /* nothing to decompress */
2154 if (cc->nr_cpages == 0) {
2159 dic = f2fs_alloc_dic(cc);
2165 for (i = 0; i < dic->nr_cpages; i++) {
2166 struct page *page = dic->cpages[i];
2168 struct bio_post_read_ctx *ctx;
2170 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2171 dn.ofs_in_node + i + 1);
2173 if (bio && (!page_is_mergeable(sbi, bio,
2174 *last_block_in_bio, blkaddr) ||
2175 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2177 __submit_bio(sbi, bio, DATA);
2182 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2183 is_readahead ? REQ_RAHEAD : 0,
2184 page->index, for_write);
2187 f2fs_decompress_end_io(dic, ret);
2188 f2fs_put_dnode(&dn);
2194 f2fs_wait_on_block_writeback(inode, blkaddr);
2196 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2197 goto submit_and_realloc;
2199 ctx = bio->bi_private;
2200 ctx->enabled_steps |= STEP_DECOMPRESS;
2201 refcount_inc(&dic->refcnt);
2203 inc_page_count(sbi, F2FS_RD_DATA);
2204 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2205 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2206 ClearPageError(page);
2207 *last_block_in_bio = blkaddr;
2210 f2fs_put_dnode(&dn);
2216 f2fs_put_dnode(&dn);
2218 for (i = 0; i < cc->cluster_size; i++) {
2219 if (cc->rpages[i]) {
2220 ClearPageUptodate(cc->rpages[i]);
2221 ClearPageError(cc->rpages[i]);
2222 unlock_page(cc->rpages[i]);
2231 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2232 * Major change was from block_size == page_size in f2fs by default.
2234 static int f2fs_mpage_readpages(struct inode *inode,
2235 struct readahead_control *rac, struct page *page)
2237 struct bio *bio = NULL;
2238 sector_t last_block_in_bio = 0;
2239 struct f2fs_map_blocks map;
2240 #ifdef CONFIG_F2FS_FS_COMPRESSION
2241 struct compress_ctx cc = {
2243 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2244 .cluster_size = F2FS_I(inode)->i_cluster_size,
2245 .cluster_idx = NULL_CLUSTER,
2252 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2253 unsigned max_nr_pages = nr_pages;
2260 map.m_next_pgofs = NULL;
2261 map.m_next_extent = NULL;
2262 map.m_seg_type = NO_CHECK_TYPE;
2263 map.m_may_create = false;
2265 for (; nr_pages; nr_pages--) {
2267 page = readahead_page(rac);
2268 prefetchw(&page->flags);
2271 #ifdef CONFIG_F2FS_FS_COMPRESSION
2272 if (f2fs_compressed_file(inode)) {
2273 /* there are remained comressed pages, submit them */
2274 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2275 ret = f2fs_read_multi_pages(&cc, &bio,
2278 rac != NULL, false);
2279 f2fs_destroy_compress_ctx(&cc);
2281 goto set_error_page;
2283 ret = f2fs_is_compressed_cluster(inode, page->index);
2285 goto set_error_page;
2287 goto read_single_page;
2289 ret = f2fs_init_compress_ctx(&cc);
2291 goto set_error_page;
2293 f2fs_compress_ctx_add_page(&cc, page);
2300 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2301 &bio, &last_block_in_bio, rac);
2303 #ifdef CONFIG_F2FS_FS_COMPRESSION
2307 zero_user_segment(page, 0, PAGE_SIZE);
2310 #ifdef CONFIG_F2FS_FS_COMPRESSION
2316 #ifdef CONFIG_F2FS_FS_COMPRESSION
2317 if (f2fs_compressed_file(inode)) {
2319 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2320 ret = f2fs_read_multi_pages(&cc, &bio,
2323 rac != NULL, false);
2324 f2fs_destroy_compress_ctx(&cc);
2330 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2334 static int f2fs_read_data_page(struct file *file, struct page *page)
2336 struct inode *inode = page_file_mapping(page)->host;
2339 trace_f2fs_readpage(page, DATA);
2341 if (!f2fs_is_compress_backend_ready(inode)) {
2346 /* If the file has inline data, try to read it directly */
2347 if (f2fs_has_inline_data(inode))
2348 ret = f2fs_read_inline_data(inode, page);
2350 ret = f2fs_mpage_readpages(inode, NULL, page);
2354 static void f2fs_readahead(struct readahead_control *rac)
2356 struct inode *inode = rac->mapping->host;
2358 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2360 if (!f2fs_is_compress_backend_ready(inode))
2363 /* If the file has inline data, skip readpages */
2364 if (f2fs_has_inline_data(inode))
2367 f2fs_mpage_readpages(inode, rac, NULL);
2370 int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2372 struct inode *inode = fio->page->mapping->host;
2373 struct page *mpage, *page;
2374 gfp_t gfp_flags = GFP_NOFS;
2376 if (!f2fs_encrypted_file(inode))
2379 page = fio->compressed_page ? fio->compressed_page : fio->page;
2381 /* wait for GCed page writeback via META_MAPPING */
2382 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2384 if (fscrypt_inode_uses_inline_crypto(inode))
2388 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2389 PAGE_SIZE, 0, gfp_flags);
2390 if (IS_ERR(fio->encrypted_page)) {
2391 /* flush pending IOs and wait for a while in the ENOMEM case */
2392 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2393 f2fs_flush_merged_writes(fio->sbi);
2394 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2395 gfp_flags |= __GFP_NOFAIL;
2398 return PTR_ERR(fio->encrypted_page);
2401 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2403 if (PageUptodate(mpage))
2404 memcpy(page_address(mpage),
2405 page_address(fio->encrypted_page), PAGE_SIZE);
2406 f2fs_put_page(mpage, 1);
2411 static inline bool check_inplace_update_policy(struct inode *inode,
2412 struct f2fs_io_info *fio)
2414 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2415 unsigned int policy = SM_I(sbi)->ipu_policy;
2417 if (policy & (0x1 << F2FS_IPU_FORCE))
2419 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2421 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2422 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2424 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2425 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2429 * IPU for rewrite async pages
2431 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2432 fio && fio->op == REQ_OP_WRITE &&
2433 !(fio->op_flags & REQ_SYNC) &&
2434 !IS_ENCRYPTED(inode))
2437 /* this is only set during fdatasync */
2438 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2439 is_inode_flag_set(inode, FI_NEED_IPU))
2442 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2443 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2449 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2451 if (f2fs_is_pinned_file(inode))
2454 /* if this is cold file, we should overwrite to avoid fragmentation */
2455 if (file_is_cold(inode))
2458 return check_inplace_update_policy(inode, fio);
2461 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2463 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2465 if (f2fs_lfs_mode(sbi))
2467 if (S_ISDIR(inode->i_mode))
2469 if (IS_NOQUOTA(inode))
2471 if (f2fs_is_atomic_file(inode))
2474 if (is_cold_data(fio->page))
2476 if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2478 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2479 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2485 static inline bool need_inplace_update(struct f2fs_io_info *fio)
2487 struct inode *inode = fio->page->mapping->host;
2489 if (f2fs_should_update_outplace(inode, fio))
2492 return f2fs_should_update_inplace(inode, fio);
2495 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2497 struct page *page = fio->page;
2498 struct inode *inode = page->mapping->host;
2499 struct dnode_of_data dn;
2500 struct extent_info ei = {0,0,0};
2501 struct node_info ni;
2502 bool ipu_force = false;
2505 set_new_dnode(&dn, inode, NULL, NULL, 0);
2506 if (need_inplace_update(fio) &&
2507 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2508 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2510 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2511 DATA_GENERIC_ENHANCE))
2512 return -EFSCORRUPTED;
2515 fio->need_lock = LOCK_DONE;
2519 /* Deadlock due to between page->lock and f2fs_lock_op */
2520 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2523 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2527 fio->old_blkaddr = dn.data_blkaddr;
2529 /* This page is already truncated */
2530 if (fio->old_blkaddr == NULL_ADDR) {
2531 ClearPageUptodate(page);
2532 clear_cold_data(page);
2536 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2537 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2538 DATA_GENERIC_ENHANCE)) {
2539 err = -EFSCORRUPTED;
2543 * If current allocation needs SSR,
2544 * it had better in-place writes for updated data.
2547 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2548 need_inplace_update(fio))) {
2549 err = f2fs_encrypt_one_page(fio);
2553 set_page_writeback(page);
2554 ClearPageError(page);
2555 f2fs_put_dnode(&dn);
2556 if (fio->need_lock == LOCK_REQ)
2557 f2fs_unlock_op(fio->sbi);
2558 err = f2fs_inplace_write_data(fio);
2560 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2561 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2562 if (PageWriteback(page))
2563 end_page_writeback(page);
2565 set_inode_flag(inode, FI_UPDATE_WRITE);
2567 trace_f2fs_do_write_data_page(fio->page, IPU);
2571 if (fio->need_lock == LOCK_RETRY) {
2572 if (!f2fs_trylock_op(fio->sbi)) {
2576 fio->need_lock = LOCK_REQ;
2579 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2583 fio->version = ni.version;
2585 err = f2fs_encrypt_one_page(fio);
2589 set_page_writeback(page);
2590 ClearPageError(page);
2592 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2593 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2595 /* LFS mode write path */
2596 f2fs_outplace_write_data(&dn, fio);
2597 trace_f2fs_do_write_data_page(page, OPU);
2598 set_inode_flag(inode, FI_APPEND_WRITE);
2599 if (page->index == 0)
2600 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2602 f2fs_put_dnode(&dn);
2604 if (fio->need_lock == LOCK_REQ)
2605 f2fs_unlock_op(fio->sbi);
2609 int f2fs_write_single_data_page(struct page *page, int *submitted,
2611 sector_t *last_block,
2612 struct writeback_control *wbc,
2613 enum iostat_type io_type,
2617 struct inode *inode = page->mapping->host;
2618 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2619 loff_t i_size = i_size_read(inode);
2620 const pgoff_t end_index = ((unsigned long long)i_size)
2622 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2623 unsigned offset = 0;
2624 bool need_balance_fs = false;
2626 struct f2fs_io_info fio = {
2628 .ino = inode->i_ino,
2631 .op_flags = wbc_to_write_flags(wbc),
2632 .old_blkaddr = NULL_ADDR,
2634 .encrypted_page = NULL,
2636 .compr_blocks = compr_blocks,
2637 .need_lock = LOCK_RETRY,
2641 .last_block = last_block,
2644 trace_f2fs_writepage(page, DATA);
2646 /* we should bypass data pages to proceed the kworkder jobs */
2647 if (unlikely(f2fs_cp_error(sbi))) {
2648 mapping_set_error(page->mapping, -EIO);
2650 * don't drop any dirty dentry pages for keeping lastest
2651 * directory structure.
2653 if (S_ISDIR(inode->i_mode))
2658 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2661 if (page->index < end_index ||
2662 f2fs_verity_in_progress(inode) ||
2667 * If the offset is out-of-range of file size,
2668 * this page does not have to be written to disk.
2670 offset = i_size & (PAGE_SIZE - 1);
2671 if ((page->index >= end_index + 1) || !offset)
2674 zero_user_segment(page, offset, PAGE_SIZE);
2676 if (f2fs_is_drop_cache(inode))
2678 /* we should not write 0'th page having journal header */
2679 if (f2fs_is_volatile_file(inode) && (!page->index ||
2680 (!wbc->for_reclaim &&
2681 f2fs_available_free_memory(sbi, BASE_CHECK))))
2684 /* Dentry/quota blocks are controlled by checkpoint */
2685 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2687 * We need to wait for node_write to avoid block allocation during
2688 * checkpoint. This can only happen to quota writes which can cause
2689 * the below discard race condition.
2691 if (IS_NOQUOTA(inode))
2692 down_read(&sbi->node_write);
2694 fio.need_lock = LOCK_DONE;
2695 err = f2fs_do_write_data_page(&fio);
2697 if (IS_NOQUOTA(inode))
2698 up_read(&sbi->node_write);
2703 if (!wbc->for_reclaim)
2704 need_balance_fs = true;
2705 else if (has_not_enough_free_secs(sbi, 0, 0))
2708 set_inode_flag(inode, FI_HOT_DATA);
2711 if (f2fs_has_inline_data(inode)) {
2712 err = f2fs_write_inline_data(inode, page);
2717 if (err == -EAGAIN) {
2718 err = f2fs_do_write_data_page(&fio);
2719 if (err == -EAGAIN) {
2720 fio.need_lock = LOCK_REQ;
2721 err = f2fs_do_write_data_page(&fio);
2726 file_set_keep_isize(inode);
2728 spin_lock(&F2FS_I(inode)->i_size_lock);
2729 if (F2FS_I(inode)->last_disk_size < psize)
2730 F2FS_I(inode)->last_disk_size = psize;
2731 spin_unlock(&F2FS_I(inode)->i_size_lock);
2735 if (err && err != -ENOENT)
2739 inode_dec_dirty_pages(inode);
2741 ClearPageUptodate(page);
2742 clear_cold_data(page);
2745 if (wbc->for_reclaim) {
2746 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2747 clear_inode_flag(inode, FI_HOT_DATA);
2748 f2fs_remove_dirty_inode(inode);
2752 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2753 !F2FS_I(inode)->cp_task && allow_balance)
2754 f2fs_balance_fs(sbi, need_balance_fs);
2756 if (unlikely(f2fs_cp_error(sbi))) {
2757 f2fs_submit_merged_write(sbi, DATA);
2758 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2763 *submitted = fio.submitted ? 1 : 0;
2768 redirty_page_for_writepage(wbc, page);
2770 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2771 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2772 * file_write_and_wait_range() will see EIO error, which is critical
2773 * to return value of fsync() followed by atomic_write failure to user.
2775 if (!err || wbc->for_reclaim)
2776 return AOP_WRITEPAGE_ACTIVATE;
2781 static int f2fs_write_data_page(struct page *page,
2782 struct writeback_control *wbc)
2784 #ifdef CONFIG_F2FS_FS_COMPRESSION
2785 struct inode *inode = page->mapping->host;
2787 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2790 if (f2fs_compressed_file(inode)) {
2791 if (f2fs_is_compressed_cluster(inode, page->index)) {
2792 redirty_page_for_writepage(wbc, page);
2793 return AOP_WRITEPAGE_ACTIVATE;
2799 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2800 wbc, FS_DATA_IO, 0, true);
2804 * This function was copied from write_cche_pages from mm/page-writeback.c.
2805 * The major change is making write step of cold data page separately from
2806 * warm/hot data page.
2808 static int f2fs_write_cache_pages(struct address_space *mapping,
2809 struct writeback_control *wbc,
2810 enum iostat_type io_type)
2813 int done = 0, retry = 0;
2814 struct pagevec pvec;
2815 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2816 struct bio *bio = NULL;
2817 sector_t last_block;
2818 #ifdef CONFIG_F2FS_FS_COMPRESSION
2819 struct inode *inode = mapping->host;
2820 struct compress_ctx cc = {
2822 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2823 .cluster_size = F2FS_I(inode)->i_cluster_size,
2824 .cluster_idx = NULL_CLUSTER,
2830 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2836 pgoff_t end; /* Inclusive */
2838 int range_whole = 0;
2844 pagevec_init(&pvec);
2846 if (get_dirty_pages(mapping->host) <=
2847 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2848 set_inode_flag(mapping->host, FI_HOT_DATA);
2850 clear_inode_flag(mapping->host, FI_HOT_DATA);
2852 if (wbc->range_cyclic) {
2853 index = mapping->writeback_index; /* prev offset */
2856 index = wbc->range_start >> PAGE_SHIFT;
2857 end = wbc->range_end >> PAGE_SHIFT;
2858 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2861 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2862 tag = PAGECACHE_TAG_TOWRITE;
2864 tag = PAGECACHE_TAG_DIRTY;
2867 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2868 tag_pages_for_writeback(mapping, index, end);
2870 while (!done && !retry && (index <= end)) {
2871 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2876 for (i = 0; i < nr_pages; i++) {
2877 struct page *page = pvec.pages[i];
2881 #ifdef CONFIG_F2FS_FS_COMPRESSION
2882 if (f2fs_compressed_file(inode)) {
2883 ret = f2fs_init_compress_ctx(&cc);
2889 if (!f2fs_cluster_can_merge_page(&cc,
2891 ret = f2fs_write_multi_pages(&cc,
2892 &submitted, wbc, io_type);
2898 if (unlikely(f2fs_cp_error(sbi)))
2901 if (f2fs_cluster_is_empty(&cc)) {
2902 void *fsdata = NULL;
2906 ret2 = f2fs_prepare_compress_overwrite(
2908 page->index, &fsdata);
2914 !f2fs_compress_write_end(inode,
2915 fsdata, page->index,
2925 /* give a priority to WB_SYNC threads */
2926 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2927 wbc->sync_mode == WB_SYNC_NONE) {
2931 #ifdef CONFIG_F2FS_FS_COMPRESSION
2934 done_index = page->index;
2938 if (unlikely(page->mapping != mapping)) {
2944 if (!PageDirty(page)) {
2945 /* someone wrote it for us */
2946 goto continue_unlock;
2949 if (PageWriteback(page)) {
2950 if (wbc->sync_mode != WB_SYNC_NONE)
2951 f2fs_wait_on_page_writeback(page,
2954 goto continue_unlock;
2957 if (!clear_page_dirty_for_io(page))
2958 goto continue_unlock;
2960 #ifdef CONFIG_F2FS_FS_COMPRESSION
2961 if (f2fs_compressed_file(inode)) {
2963 f2fs_compress_ctx_add_page(&cc, page);
2967 ret = f2fs_write_single_data_page(page, &submitted,
2968 &bio, &last_block, wbc, io_type,
2970 if (ret == AOP_WRITEPAGE_ACTIVATE)
2972 #ifdef CONFIG_F2FS_FS_COMPRESSION
2975 nwritten += submitted;
2976 wbc->nr_to_write -= submitted;
2978 if (unlikely(ret)) {
2980 * keep nr_to_write, since vfs uses this to
2981 * get # of written pages.
2983 if (ret == AOP_WRITEPAGE_ACTIVATE) {
2986 } else if (ret == -EAGAIN) {
2988 if (wbc->sync_mode == WB_SYNC_ALL) {
2990 congestion_wait(BLK_RW_ASYNC,
2991 DEFAULT_IO_TIMEOUT);
2996 done_index = page->index + 1;
3001 if (wbc->nr_to_write <= 0 &&
3002 wbc->sync_mode == WB_SYNC_NONE) {
3010 pagevec_release(&pvec);
3013 #ifdef CONFIG_F2FS_FS_COMPRESSION
3014 /* flush remained pages in compress cluster */
3015 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3016 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3017 nwritten += submitted;
3018 wbc->nr_to_write -= submitted;
3024 if (f2fs_compressed_file(inode))
3025 f2fs_destroy_compress_ctx(&cc);
3032 if (wbc->range_cyclic && !done)
3034 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3035 mapping->writeback_index = done_index;
3038 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3040 /* submit cached bio of IPU write */
3042 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3047 static inline bool __should_serialize_io(struct inode *inode,
3048 struct writeback_control *wbc)
3050 /* to avoid deadlock in path of data flush */
3051 if (F2FS_I(inode)->cp_task)
3054 if (!S_ISREG(inode->i_mode))
3056 if (IS_NOQUOTA(inode))
3059 if (f2fs_need_compress_data(inode))
3061 if (wbc->sync_mode != WB_SYNC_ALL)
3063 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3068 static int __f2fs_write_data_pages(struct address_space *mapping,
3069 struct writeback_control *wbc,
3070 enum iostat_type io_type)
3072 struct inode *inode = mapping->host;
3073 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3074 struct blk_plug plug;
3076 bool locked = false;
3078 /* deal with chardevs and other special file */
3079 if (!mapping->a_ops->writepage)
3082 /* skip writing if there is no dirty page in this inode */
3083 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3086 /* during POR, we don't need to trigger writepage at all. */
3087 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3090 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3091 wbc->sync_mode == WB_SYNC_NONE &&
3092 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3093 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3096 /* skip writing during file defragment */
3097 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3100 trace_f2fs_writepages(mapping->host, wbc, DATA);
3102 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3103 if (wbc->sync_mode == WB_SYNC_ALL)
3104 atomic_inc(&sbi->wb_sync_req[DATA]);
3105 else if (atomic_read(&sbi->wb_sync_req[DATA]))
3108 if (__should_serialize_io(inode, wbc)) {
3109 mutex_lock(&sbi->writepages);
3113 blk_start_plug(&plug);
3114 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3115 blk_finish_plug(&plug);
3118 mutex_unlock(&sbi->writepages);
3120 if (wbc->sync_mode == WB_SYNC_ALL)
3121 atomic_dec(&sbi->wb_sync_req[DATA]);
3123 * if some pages were truncated, we cannot guarantee its mapping->host
3124 * to detect pending bios.
3127 f2fs_remove_dirty_inode(inode);
3131 wbc->pages_skipped += get_dirty_pages(inode);
3132 trace_f2fs_writepages(mapping->host, wbc, DATA);
3136 static int f2fs_write_data_pages(struct address_space *mapping,
3137 struct writeback_control *wbc)
3139 struct inode *inode = mapping->host;
3141 return __f2fs_write_data_pages(mapping, wbc,
3142 F2FS_I(inode)->cp_task == current ?
3143 FS_CP_DATA_IO : FS_DATA_IO);
3146 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
3148 struct inode *inode = mapping->host;
3149 loff_t i_size = i_size_read(inode);
3151 if (IS_NOQUOTA(inode))
3154 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3155 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3156 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3157 down_write(&F2FS_I(inode)->i_mmap_sem);
3159 truncate_pagecache(inode, i_size);
3160 f2fs_truncate_blocks(inode, i_size, true);
3162 up_write(&F2FS_I(inode)->i_mmap_sem);
3163 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3167 static int prepare_write_begin(struct f2fs_sb_info *sbi,
3168 struct page *page, loff_t pos, unsigned len,
3169 block_t *blk_addr, bool *node_changed)
3171 struct inode *inode = page->mapping->host;
3172 pgoff_t index = page->index;
3173 struct dnode_of_data dn;
3175 bool locked = false;
3176 struct extent_info ei = {0,0,0};
3181 * we already allocated all the blocks, so we don't need to get
3182 * the block addresses when there is no need to fill the page.
3184 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3185 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3186 !f2fs_verity_in_progress(inode))
3189 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3190 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3191 flag = F2FS_GET_BLOCK_DEFAULT;
3193 flag = F2FS_GET_BLOCK_PRE_AIO;
3195 if (f2fs_has_inline_data(inode) ||
3196 (pos & PAGE_MASK) >= i_size_read(inode)) {
3197 f2fs_do_map_lock(sbi, flag, true);
3202 /* check inline_data */
3203 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3204 if (IS_ERR(ipage)) {
3205 err = PTR_ERR(ipage);
3209 set_new_dnode(&dn, inode, ipage, ipage, 0);
3211 if (f2fs_has_inline_data(inode)) {
3212 if (pos + len <= MAX_INLINE_DATA(inode)) {
3213 f2fs_do_read_inline_data(page, ipage);
3214 set_inode_flag(inode, FI_DATA_EXIST);
3216 set_inline_node(ipage);
3218 err = f2fs_convert_inline_page(&dn, page);
3221 if (dn.data_blkaddr == NULL_ADDR)
3222 err = f2fs_get_block(&dn, index);
3224 } else if (locked) {
3225 err = f2fs_get_block(&dn, index);
3227 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3228 dn.data_blkaddr = ei.blk + index - ei.fofs;
3231 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3232 if (err || dn.data_blkaddr == NULL_ADDR) {
3233 f2fs_put_dnode(&dn);
3234 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3236 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3243 /* convert_inline_page can make node_changed */
3244 *blk_addr = dn.data_blkaddr;
3245 *node_changed = dn.node_changed;
3247 f2fs_put_dnode(&dn);
3250 f2fs_do_map_lock(sbi, flag, false);
3254 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3255 loff_t pos, unsigned len, unsigned flags,
3256 struct page **pagep, void **fsdata)
3258 struct inode *inode = mapping->host;
3259 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3260 struct page *page = NULL;
3261 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3262 bool need_balance = false, drop_atomic = false;
3263 block_t blkaddr = NULL_ADDR;
3266 trace_f2fs_write_begin(inode, pos, len, flags);
3268 if (!f2fs_is_checkpoint_ready(sbi)) {
3273 if ((f2fs_is_atomic_file(inode) &&
3274 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3275 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3282 * We should check this at this moment to avoid deadlock on inode page
3283 * and #0 page. The locking rule for inline_data conversion should be:
3284 * lock_page(page #0) -> lock_page(inode_page)
3287 err = f2fs_convert_inline_inode(inode);
3292 #ifdef CONFIG_F2FS_FS_COMPRESSION
3293 if (f2fs_compressed_file(inode)) {
3298 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3311 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3312 * wait_for_stable_page. Will wait that below with our IO control.
3314 page = f2fs_pagecache_get_page(mapping, index,
3315 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3321 /* TODO: cluster can be compressed due to race with .writepage */
3325 err = prepare_write_begin(sbi, page, pos, len,
3326 &blkaddr, &need_balance);
3330 if (need_balance && !IS_NOQUOTA(inode) &&
3331 has_not_enough_free_secs(sbi, 0, 0)) {
3333 f2fs_balance_fs(sbi, true);
3335 if (page->mapping != mapping) {
3336 /* The page got truncated from under us */
3337 f2fs_put_page(page, 1);
3342 f2fs_wait_on_page_writeback(page, DATA, false, true);
3344 if (len == PAGE_SIZE || PageUptodate(page))
3347 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3348 !f2fs_verity_in_progress(inode)) {
3349 zero_user_segment(page, len, PAGE_SIZE);
3353 if (blkaddr == NEW_ADDR) {
3354 zero_user_segment(page, 0, PAGE_SIZE);
3355 SetPageUptodate(page);
3357 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3358 DATA_GENERIC_ENHANCE_READ)) {
3359 err = -EFSCORRUPTED;
3362 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3367 if (unlikely(page->mapping != mapping)) {
3368 f2fs_put_page(page, 1);
3371 if (unlikely(!PageUptodate(page))) {
3379 f2fs_put_page(page, 1);
3380 f2fs_write_failed(mapping, pos + len);
3382 f2fs_drop_inmem_pages_all(sbi, false);
3386 static int f2fs_write_end(struct file *file,
3387 struct address_space *mapping,
3388 loff_t pos, unsigned len, unsigned copied,
3389 struct page *page, void *fsdata)
3391 struct inode *inode = page->mapping->host;
3393 trace_f2fs_write_end(inode, pos, len, copied);
3396 * This should be come from len == PAGE_SIZE, and we expect copied
3397 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3398 * let generic_perform_write() try to copy data again through copied=0.
3400 if (!PageUptodate(page)) {
3401 if (unlikely(copied != len))
3404 SetPageUptodate(page);
3407 #ifdef CONFIG_F2FS_FS_COMPRESSION
3408 /* overwrite compressed file */
3409 if (f2fs_compressed_file(inode) && fsdata) {
3410 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3411 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3413 if (pos + copied > i_size_read(inode) &&
3414 !f2fs_verity_in_progress(inode))
3415 f2fs_i_size_write(inode, pos + copied);
3423 set_page_dirty(page);
3425 if (pos + copied > i_size_read(inode) &&
3426 !f2fs_verity_in_progress(inode))
3427 f2fs_i_size_write(inode, pos + copied);
3429 f2fs_put_page(page, 1);
3430 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3434 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3437 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3438 unsigned blkbits = i_blkbits;
3439 unsigned blocksize_mask = (1 << blkbits) - 1;
3440 unsigned long align = offset | iov_iter_alignment(iter);
3441 struct block_device *bdev = inode->i_sb->s_bdev;
3443 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3446 if (align & blocksize_mask) {
3448 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3449 blocksize_mask = (1 << blkbits) - 1;
3450 if (align & blocksize_mask)
3457 static void f2fs_dio_end_io(struct bio *bio)
3459 struct f2fs_private_dio *dio = bio->bi_private;
3461 dec_page_count(F2FS_I_SB(dio->inode),
3462 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3464 bio->bi_private = dio->orig_private;
3465 bio->bi_end_io = dio->orig_end_io;
3472 static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3475 struct f2fs_private_dio *dio;
3476 bool write = (bio_op(bio) == REQ_OP_WRITE);
3478 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3479 sizeof(struct f2fs_private_dio), GFP_NOFS);
3484 dio->orig_end_io = bio->bi_end_io;
3485 dio->orig_private = bio->bi_private;
3488 bio->bi_end_io = f2fs_dio_end_io;
3489 bio->bi_private = dio;
3491 inc_page_count(F2FS_I_SB(inode),
3492 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3497 bio->bi_status = BLK_STS_IOERR;
3501 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3503 struct address_space *mapping = iocb->ki_filp->f_mapping;
3504 struct inode *inode = mapping->host;
3505 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3506 struct f2fs_inode_info *fi = F2FS_I(inode);
3507 size_t count = iov_iter_count(iter);
3508 loff_t offset = iocb->ki_pos;
3509 int rw = iov_iter_rw(iter);
3511 enum rw_hint hint = iocb->ki_hint;
3512 int whint_mode = F2FS_OPTION(sbi).whint_mode;
3515 err = check_direct_IO(inode, iter, offset);
3517 return err < 0 ? err : 0;
3519 if (f2fs_force_buffered_io(inode, iocb, iter))
3522 do_opu = allow_outplace_dio(inode, iocb, iter);
3524 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3526 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3527 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3529 if (iocb->ki_flags & IOCB_NOWAIT) {
3530 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3531 iocb->ki_hint = hint;
3535 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3536 up_read(&fi->i_gc_rwsem[rw]);
3537 iocb->ki_hint = hint;
3542 down_read(&fi->i_gc_rwsem[rw]);
3544 down_read(&fi->i_gc_rwsem[READ]);
3547 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3548 iter, rw == WRITE ? get_data_block_dio_write :
3549 get_data_block_dio, NULL, f2fs_dio_submit_bio,
3550 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3554 up_read(&fi->i_gc_rwsem[READ]);
3556 up_read(&fi->i_gc_rwsem[rw]);
3559 if (whint_mode == WHINT_MODE_OFF)
3560 iocb->ki_hint = hint;
3562 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3565 set_inode_flag(inode, FI_UPDATE_WRITE);
3566 } else if (err == -EIOCBQUEUED) {
3567 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3568 count - iov_iter_count(iter));
3569 } else if (err < 0) {
3570 f2fs_write_failed(mapping, offset + count);
3574 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3575 else if (err == -EIOCBQUEUED)
3576 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3577 count - iov_iter_count(iter));
3581 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3586 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3587 unsigned int length)
3589 struct inode *inode = page->mapping->host;
3590 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3592 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3593 (offset % PAGE_SIZE || length != PAGE_SIZE))
3596 if (PageDirty(page)) {
3597 if (inode->i_ino == F2FS_META_INO(sbi)) {
3598 dec_page_count(sbi, F2FS_DIRTY_META);
3599 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3600 dec_page_count(sbi, F2FS_DIRTY_NODES);
3602 inode_dec_dirty_pages(inode);
3603 f2fs_remove_dirty_inode(inode);
3607 clear_cold_data(page);
3609 if (IS_ATOMIC_WRITTEN_PAGE(page))
3610 return f2fs_drop_inmem_page(inode, page);
3612 f2fs_clear_page_private(page);
3615 int f2fs_release_page(struct page *page, gfp_t wait)
3617 /* If this is dirty page, keep PagePrivate */
3618 if (PageDirty(page))
3621 /* This is atomic written page, keep Private */
3622 if (IS_ATOMIC_WRITTEN_PAGE(page))
3625 clear_cold_data(page);
3626 f2fs_clear_page_private(page);
3630 static int f2fs_set_data_page_dirty(struct page *page)
3632 struct inode *inode = page_file_mapping(page)->host;
3634 trace_f2fs_set_page_dirty(page, DATA);
3636 if (!PageUptodate(page))
3637 SetPageUptodate(page);
3638 if (PageSwapCache(page))
3639 return __set_page_dirty_nobuffers(page);
3641 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3642 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
3643 f2fs_register_inmem_page(inode, page);
3647 * Previously, this page has been registered, we just
3653 if (!PageDirty(page)) {
3654 __set_page_dirty_nobuffers(page);
3655 f2fs_update_dirty_page(inode, page);
3662 static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3664 #ifdef CONFIG_F2FS_FS_COMPRESSION
3665 struct dnode_of_data dn;
3666 sector_t start_idx, blknr = 0;
3669 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3671 set_new_dnode(&dn, inode, NULL, NULL, 0);
3672 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3676 if (dn.data_blkaddr != COMPRESS_ADDR) {
3677 dn.ofs_in_node += block - start_idx;
3678 blknr = f2fs_data_blkaddr(&dn);
3679 if (!__is_valid_data_blkaddr(blknr))
3683 f2fs_put_dnode(&dn);
3691 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3693 struct inode *inode = mapping->host;
3696 if (f2fs_has_inline_data(inode))
3699 /* make sure allocating whole blocks */
3700 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3701 filemap_write_and_wait(mapping);
3703 /* Block number less than F2FS MAX BLOCKS */
3704 if (unlikely(block >= max_file_blocks(inode)))
3707 if (f2fs_compressed_file(inode)) {
3708 blknr = f2fs_bmap_compress(inode, block);
3710 struct f2fs_map_blocks map;
3712 memset(&map, 0, sizeof(map));
3715 map.m_next_pgofs = NULL;
3716 map.m_seg_type = NO_CHECK_TYPE;
3718 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3722 trace_f2fs_bmap(inode, block, blknr);
3726 #ifdef CONFIG_MIGRATION
3727 #include <linux/migrate.h>
3729 int f2fs_migrate_page(struct address_space *mapping,
3730 struct page *newpage, struct page *page, enum migrate_mode mode)
3732 int rc, extra_count;
3733 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3734 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
3736 BUG_ON(PageWriteback(page));
3738 /* migrating an atomic written page is safe with the inmem_lock hold */
3739 if (atomic_written) {
3740 if (mode != MIGRATE_SYNC)
3742 if (!mutex_trylock(&fi->inmem_lock))
3746 /* one extra reference was held for atomic_write page */
3747 extra_count = atomic_written ? 1 : 0;
3748 rc = migrate_page_move_mapping(mapping, newpage,
3750 if (rc != MIGRATEPAGE_SUCCESS) {
3752 mutex_unlock(&fi->inmem_lock);
3756 if (atomic_written) {
3757 struct inmem_pages *cur;
3758 list_for_each_entry(cur, &fi->inmem_pages, list)
3759 if (cur->page == page) {
3760 cur->page = newpage;
3763 mutex_unlock(&fi->inmem_lock);
3768 if (PagePrivate(page)) {
3769 f2fs_set_page_private(newpage, page_private(page));
3770 f2fs_clear_page_private(page);
3773 if (mode != MIGRATE_SYNC_NO_COPY)
3774 migrate_page_copy(newpage, page);
3776 migrate_page_states(newpage, page);
3778 return MIGRATEPAGE_SUCCESS;
3783 static int check_swap_activate_fast(struct swap_info_struct *sis,
3784 struct file *swap_file, sector_t *span)
3786 struct address_space *mapping = swap_file->f_mapping;
3787 struct inode *inode = mapping->host;
3788 sector_t cur_lblock;
3789 sector_t last_lblock;
3791 sector_t lowest_pblock = -1;
3792 sector_t highest_pblock = 0;
3794 unsigned long nr_pblocks;
3799 * Map all the blocks into the extent list. This code doesn't try
3803 last_lblock = bytes_to_blks(inode, i_size_read(inode));
3804 len = i_size_read(inode);
3806 while (cur_lblock <= last_lblock && cur_lblock < sis->max) {
3807 struct f2fs_map_blocks map;
3812 memset(&map, 0, sizeof(map));
3813 map.m_lblk = cur_lblock;
3814 map.m_len = bytes_to_blks(inode, len) - cur_lblock;
3815 map.m_next_pgofs = &next_pgofs;
3816 map.m_seg_type = NO_CHECK_TYPE;
3818 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
3823 if (!(map.m_flags & F2FS_MAP_FLAGS))
3826 pblock = map.m_pblk;
3827 nr_pblocks = map.m_len;
3829 if (cur_lblock + nr_pblocks >= sis->max)
3830 nr_pblocks = sis->max - cur_lblock;
3832 if (cur_lblock) { /* exclude the header page */
3833 if (pblock < lowest_pblock)
3834 lowest_pblock = pblock;
3835 if (pblock + nr_pblocks - 1 > highest_pblock)
3836 highest_pblock = pblock + nr_pblocks - 1;
3840 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3842 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
3846 cur_lblock += nr_pblocks;
3849 *span = 1 + highest_pblock - lowest_pblock;
3850 if (cur_lblock == 0)
3851 cur_lblock = 1; /* force Empty message */
3852 sis->max = cur_lblock;
3853 sis->pages = cur_lblock - 1;
3854 sis->highest_bit = cur_lblock - 1;
3858 pr_err("swapon: swapfile has holes\n");
3862 /* Copied from generic_swapfile_activate() to check any holes */
3863 static int check_swap_activate(struct swap_info_struct *sis,
3864 struct file *swap_file, sector_t *span)
3866 struct address_space *mapping = swap_file->f_mapping;
3867 struct inode *inode = mapping->host;
3868 unsigned blocks_per_page;
3869 unsigned long page_no;
3870 sector_t probe_block;
3871 sector_t last_block;
3872 sector_t lowest_block = -1;
3873 sector_t highest_block = 0;
3877 if (PAGE_SIZE == F2FS_BLKSIZE)
3878 return check_swap_activate_fast(sis, swap_file, span);
3880 blocks_per_page = bytes_to_blks(inode, PAGE_SIZE);
3883 * Map all the blocks into the extent list. This code doesn't try
3888 last_block = bytes_to_blks(inode, i_size_read(inode));
3889 while ((probe_block + blocks_per_page) <= last_block &&
3890 page_no < sis->max) {
3891 unsigned block_in_page;
3892 sector_t first_block;
3898 block = probe_block;
3899 err = bmap(inode, &block);
3902 first_block = block;
3905 * It must be PAGE_SIZE aligned on-disk
3907 if (first_block & (blocks_per_page - 1)) {
3912 for (block_in_page = 1; block_in_page < blocks_per_page;
3915 block = probe_block + block_in_page;
3916 err = bmap(inode, &block);
3921 if (block != first_block + block_in_page) {
3928 first_block >>= (PAGE_SHIFT - inode->i_blkbits);
3929 if (page_no) { /* exclude the header page */
3930 if (first_block < lowest_block)
3931 lowest_block = first_block;
3932 if (first_block > highest_block)
3933 highest_block = first_block;
3937 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3939 ret = add_swap_extent(sis, page_no, 1, first_block);
3944 probe_block += blocks_per_page;
3949 *span = 1 + highest_block - lowest_block;
3951 page_no = 1; /* force Empty message */
3953 sis->pages = page_no - 1;
3954 sis->highest_bit = page_no - 1;
3958 pr_err("swapon: swapfile has holes\n");
3962 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3965 struct inode *inode = file_inode(file);
3968 if (!S_ISREG(inode->i_mode))
3971 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3974 ret = f2fs_convert_inline_inode(inode);
3978 if (!f2fs_disable_compressed_file(inode))
3981 f2fs_precache_extents(inode);
3983 ret = check_swap_activate(sis, file, span);
3987 set_inode_flag(inode, FI_PIN_FILE);
3988 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3992 static void f2fs_swap_deactivate(struct file *file)
3994 struct inode *inode = file_inode(file);
3996 clear_inode_flag(inode, FI_PIN_FILE);
3999 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4005 static void f2fs_swap_deactivate(struct file *file)
4010 const struct address_space_operations f2fs_dblock_aops = {
4011 .readpage = f2fs_read_data_page,
4012 .readahead = f2fs_readahead,
4013 .writepage = f2fs_write_data_page,
4014 .writepages = f2fs_write_data_pages,
4015 .write_begin = f2fs_write_begin,
4016 .write_end = f2fs_write_end,
4017 .set_page_dirty = f2fs_set_data_page_dirty,
4018 .invalidatepage = f2fs_invalidate_page,
4019 .releasepage = f2fs_release_page,
4020 .direct_IO = f2fs_direct_IO,
4022 .swap_activate = f2fs_swap_activate,
4023 .swap_deactivate = f2fs_swap_deactivate,
4024 #ifdef CONFIG_MIGRATION
4025 .migratepage = f2fs_migrate_page,
4029 void f2fs_clear_page_cache_dirty_tag(struct page *page)
4031 struct address_space *mapping = page_mapping(page);
4032 unsigned long flags;
4034 xa_lock_irqsave(&mapping->i_pages, flags);
4035 __xa_clear_mark(&mapping->i_pages, page_index(page),
4036 PAGECACHE_TAG_DIRTY);
4037 xa_unlock_irqrestore(&mapping->i_pages, flags);
4040 int __init f2fs_init_post_read_processing(void)
4042 bio_post_read_ctx_cache =
4043 kmem_cache_create("f2fs_bio_post_read_ctx",
4044 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4045 if (!bio_post_read_ctx_cache)
4047 bio_post_read_ctx_pool =
4048 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4049 bio_post_read_ctx_cache);
4050 if (!bio_post_read_ctx_pool)
4051 goto fail_free_cache;
4055 kmem_cache_destroy(bio_post_read_ctx_cache);
4060 void f2fs_destroy_post_read_processing(void)
4062 mempool_destroy(bio_post_read_ctx_pool);
4063 kmem_cache_destroy(bio_post_read_ctx_cache);
4066 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4068 if (!f2fs_sb_has_encrypt(sbi) &&
4069 !f2fs_sb_has_verity(sbi) &&
4070 !f2fs_sb_has_compression(sbi))
4073 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4074 WQ_UNBOUND | WQ_HIGHPRI,
4076 if (!sbi->post_read_wq)
4081 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4083 if (sbi->post_read_wq)
4084 destroy_workqueue(sbi->post_read_wq);
4087 int __init f2fs_init_bio_entry_cache(void)
4089 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4090 sizeof(struct bio_entry));
4091 if (!bio_entry_slab)
4096 void f2fs_destroy_bio_entry_cache(void)
4098 kmem_cache_destroy(bio_entry_slab);