1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2021 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/fscrypt.h>
10 #include <linux/pagemap.h>
11 #include <linux/iomap.h>
12 #include <linux/backing-dev.h>
13 #include <linux/uio.h>
14 #include <linux/task_io_accounting_ops.h>
17 #include "../internal.h"
20 * Private flags for iomap_dio, must not overlap with the public ones in
23 #define IOMAP_DIO_WRITE_THROUGH (1U << 28)
24 #define IOMAP_DIO_NEED_SYNC (1U << 29)
25 #define IOMAP_DIO_WRITE (1U << 30)
26 #define IOMAP_DIO_DIRTY (1U << 31)
30 const struct iomap_dio_ops *dops;
37 bool wait_for_completion;
40 /* used during submission and for synchronous completion: */
42 struct iov_iter *iter;
43 struct task_struct *waiter;
46 /* used for aio completion: */
48 struct work_struct work;
53 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
54 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
56 if (dio->dops && dio->dops->bio_set)
57 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf,
58 GFP_KERNEL, dio->dops->bio_set);
59 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL);
62 static void iomap_dio_submit_bio(const struct iomap_iter *iter,
63 struct iomap_dio *dio, struct bio *bio, loff_t pos)
65 struct kiocb *iocb = dio->iocb;
67 atomic_inc(&dio->ref);
69 /* Sync dio can't be polled reliably */
70 if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) {
71 bio_set_polled(bio, iocb);
72 WRITE_ONCE(iocb->private, bio);
75 if (dio->dops && dio->dops->submit_io)
76 dio->dops->submit_io(iter, bio, pos);
81 ssize_t iomap_dio_complete(struct iomap_dio *dio)
83 const struct iomap_dio_ops *dops = dio->dops;
84 struct kiocb *iocb = dio->iocb;
85 loff_t offset = iocb->ki_pos;
86 ssize_t ret = dio->error;
88 if (dops && dops->end_io)
89 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
93 /* check for short read */
94 if (offset + ret > dio->i_size &&
95 !(dio->flags & IOMAP_DIO_WRITE))
96 ret = dio->i_size - offset;
100 * Try again to invalidate clean pages which might have been cached by
101 * non-direct readahead, or faulted in by get_user_pages() if the source
102 * of the write was an mmap'ed region of the file we're writing. Either
103 * one is a pretty crazy thing to do, so we don't support it 100%. If
104 * this invalidation fails, tough, the write still worked...
106 * And this page cache invalidation has to be after ->end_io(), as some
107 * filesystems convert unwritten extents to real allocations in
108 * ->end_io() when necessary, otherwise a racing buffer read would cache
109 * zeros from unwritten extents.
111 if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE))
112 kiocb_invalidate_post_direct_write(iocb, dio->size);
114 inode_dio_end(file_inode(iocb->ki_filp));
120 * If this is a DSYNC write, make sure we push it to stable
121 * storage now that we've written data.
123 if (dio->flags & IOMAP_DIO_NEED_SYNC)
124 ret = generic_write_sync(iocb, ret);
126 ret += dio->done_before;
128 trace_iomap_dio_complete(iocb, dio->error, ret);
132 EXPORT_SYMBOL_GPL(iomap_dio_complete);
134 static void iomap_dio_complete_work(struct work_struct *work)
136 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
137 struct kiocb *iocb = dio->iocb;
139 iocb->ki_complete(iocb, iomap_dio_complete(dio));
143 * Set an error in the dio if none is set yet. We have to use cmpxchg
144 * as the submission context and the completion context(s) can race to
147 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
149 cmpxchg(&dio->error, 0, ret);
152 void iomap_dio_bio_end_io(struct bio *bio)
154 struct iomap_dio *dio = bio->bi_private;
155 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
156 struct kiocb *iocb = dio->iocb;
159 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
160 if (!atomic_dec_and_test(&dio->ref))
164 * Synchronous dio, task itself will handle any completion work
165 * that needs after IO. All we need to do is wake the task.
167 if (dio->wait_for_completion) {
168 struct task_struct *waiter = dio->submit.waiter;
170 WRITE_ONCE(dio->submit.waiter, NULL);
171 blk_wake_io_task(waiter);
175 /* Read completion can always complete inline. */
176 if (!(dio->flags & IOMAP_DIO_WRITE)) {
177 WRITE_ONCE(iocb->private, NULL);
178 iomap_dio_complete_work(&dio->aio.work);
183 * Async DIO completion that requires filesystem level completion work
184 * gets punted to a work queue to complete as the operation may require
185 * more IO to be issued to finalise filesystem metadata changes or
186 * guarantee data integrity.
188 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
189 queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq,
193 bio_check_pages_dirty(bio);
195 bio_release_pages(bio, false);
199 EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
201 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
202 loff_t pos, unsigned len)
204 struct inode *inode = file_inode(dio->iocb->ki_filp);
205 struct page *page = ZERO_PAGE(0);
208 bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
209 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
211 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
212 bio->bi_private = dio;
213 bio->bi_end_io = iomap_dio_bio_end_io;
215 __bio_add_page(bio, page, len, 0);
216 iomap_dio_submit_bio(iter, dio, bio, pos);
220 * Figure out the bio's operation flags from the dio request, the
221 * mapping, and whether or not we want FUA. Note that we can end up
222 * clearing the WRITE_THROUGH flag in the dio request.
224 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
225 const struct iomap *iomap, bool use_fua)
227 blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
229 if (!(dio->flags & IOMAP_DIO_WRITE))
232 opflags |= REQ_OP_WRITE;
236 dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
241 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
242 struct iomap_dio *dio)
244 const struct iomap *iomap = &iter->iomap;
245 struct inode *inode = iter->inode;
246 unsigned int fs_block_size = i_blocksize(inode), pad;
247 loff_t length = iomap_length(iter);
248 loff_t pos = iter->pos;
251 bool need_zeroout = false;
252 bool use_fua = false;
253 int nr_pages, ret = 0;
257 if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) ||
258 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
261 if (iomap->type == IOMAP_UNWRITTEN) {
262 dio->flags |= IOMAP_DIO_UNWRITTEN;
266 if (iomap->flags & IOMAP_F_SHARED)
267 dio->flags |= IOMAP_DIO_COW;
269 if (iomap->flags & IOMAP_F_NEW) {
271 } else if (iomap->type == IOMAP_MAPPED) {
273 * Use a FUA write if we need datasync semantics, this is a pure
274 * data IO that doesn't require any metadata updates (including
275 * after IO completion such as unwritten extent conversion) and
276 * the underlying device either supports FUA or doesn't have
277 * a volatile write cache. This allows us to avoid cache flushes
280 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
281 (dio->flags & IOMAP_DIO_WRITE_THROUGH) &&
282 (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev)))
287 * Save the original count and trim the iter to just the extent we
288 * are operating on right now. The iter will be re-expanded once
291 orig_count = iov_iter_count(dio->submit.iter);
292 iov_iter_truncate(dio->submit.iter, length);
294 if (!iov_iter_count(dio->submit.iter))
298 * We can only poll for single bio I/Os.
301 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
302 dio->iocb->ki_flags &= ~IOCB_HIPRI;
305 /* zero out from the start of the block to the write offset */
306 pad = pos & (fs_block_size - 1);
308 iomap_dio_zero(iter, dio, pos - pad, pad);
312 * Set the operation flags early so that bio_iov_iter_get_pages
313 * can set up the page vector appropriately for a ZONE_APPEND
316 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
318 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
322 iov_iter_revert(dio->submit.iter, copied);
327 bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf);
328 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
330 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
331 bio->bi_ioprio = dio->iocb->ki_ioprio;
332 bio->bi_private = dio;
333 bio->bi_end_io = iomap_dio_bio_end_io;
335 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
338 * We have to stop part way through an IO. We must fall
339 * through to the sub-block tail zeroing here, otherwise
340 * this short IO may expose stale data in the tail of
341 * the block we haven't written data to.
347 n = bio->bi_iter.bi_size;
348 if (dio->flags & IOMAP_DIO_WRITE) {
349 task_io_account_write(n);
351 if (dio->flags & IOMAP_DIO_DIRTY)
352 bio_set_pages_dirty(bio);
358 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
361 * We can only poll for single bio I/Os.
364 dio->iocb->ki_flags &= ~IOCB_HIPRI;
365 iomap_dio_submit_bio(iter, dio, bio, pos);
370 * We need to zeroout the tail of a sub-block write if the extent type
371 * requires zeroing or the write extends beyond EOF. If we don't zero
372 * the block tail in the latter case, we can expose stale data via mmap
373 * reads of the EOF block.
377 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
378 /* zero out from the end of the write to the end of the block */
379 pad = pos & (fs_block_size - 1);
381 iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
384 /* Undo iter limitation to current extent */
385 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
391 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
392 struct iomap_dio *dio)
394 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
402 static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
403 struct iomap_dio *dio)
405 const struct iomap *iomap = &iomi->iomap;
406 struct iov_iter *iter = dio->submit.iter;
407 void *inline_data = iomap_inline_data(iomap, iomi->pos);
408 loff_t length = iomap_length(iomi);
409 loff_t pos = iomi->pos;
412 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
415 if (dio->flags & IOMAP_DIO_WRITE) {
416 loff_t size = iomi->inode->i_size;
419 memset(iomap_inline_data(iomap, size), 0, pos - size);
420 copied = copy_from_iter(inline_data, length, iter);
422 if (pos + copied > size)
423 i_size_write(iomi->inode, pos + copied);
424 mark_inode_dirty(iomi->inode);
427 copied = copy_to_iter(inline_data, length, iter);
435 static loff_t iomap_dio_iter(const struct iomap_iter *iter,
436 struct iomap_dio *dio)
438 switch (iter->iomap.type) {
440 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
442 return iomap_dio_hole_iter(iter, dio);
443 case IOMAP_UNWRITTEN:
444 if (!(dio->flags & IOMAP_DIO_WRITE))
445 return iomap_dio_hole_iter(iter, dio);
446 return iomap_dio_bio_iter(iter, dio);
448 return iomap_dio_bio_iter(iter, dio);
450 return iomap_dio_inline_iter(iter, dio);
453 * DIO is not serialised against mmap() access at all, and so
454 * if the page_mkwrite occurs between the writeback and the
455 * iomap_iter() call in the DIO path, then it will see the
456 * DELALLOC block that the page-mkwrite allocated.
458 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
459 dio->iocb->ki_filp, current->comm);
468 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
469 * is being issued as AIO or not. This allows us to optimise pure data writes
470 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
471 * REQ_FLUSH post write. This is slightly tricky because a single request here
472 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
473 * may be pure data writes. In that case, we still need to do a full data sync
476 * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL,
477 * __iomap_dio_rw can return a partial result if it encounters a non-resident
478 * page in @iter after preparing a transfer. In that case, the non-resident
479 * pages can be faulted in and the request resumed with @done_before set to the
480 * number of bytes previously transferred. The request will then complete with
481 * the correct total number of bytes transferred; this is essential for
482 * completing partial requests asynchronously.
484 * Returns -ENOTBLK In case of a page invalidation invalidation failure for
485 * writes. The callers needs to fall back to buffered I/O in this case.
488 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
489 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
490 unsigned int dio_flags, void *private, size_t done_before)
492 struct inode *inode = file_inode(iocb->ki_filp);
493 struct iomap_iter iomi = {
496 .len = iov_iter_count(iter),
497 .flags = IOMAP_DIRECT,
500 bool wait_for_completion =
501 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
502 struct blk_plug plug;
503 struct iomap_dio *dio;
506 trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before);
511 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
513 return ERR_PTR(-ENOMEM);
516 atomic_set(&dio->ref, 1);
518 dio->i_size = i_size_read(inode);
522 dio->done_before = done_before;
524 dio->submit.iter = iter;
525 dio->submit.waiter = current;
527 if (iocb->ki_flags & IOCB_NOWAIT)
528 iomi.flags |= IOMAP_NOWAIT;
530 if (iov_iter_rw(iter) == READ) {
531 if (iomi.pos >= dio->i_size)
534 if (user_backed_iter(iter))
535 dio->flags |= IOMAP_DIO_DIRTY;
537 ret = kiocb_write_and_wait(iocb, iomi.len);
541 iomi.flags |= IOMAP_WRITE;
542 dio->flags |= IOMAP_DIO_WRITE;
544 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
546 if (iomi.pos >= dio->i_size ||
547 iomi.pos + iomi.len > dio->i_size)
549 iomi.flags |= IOMAP_OVERWRITE_ONLY;
552 /* for data sync or sync, we need sync completion processing */
553 if (iocb_is_dsync(iocb)) {
554 dio->flags |= IOMAP_DIO_NEED_SYNC;
557 * For datasync only writes, we optimistically try using
558 * WRITE_THROUGH for this IO. This flag requires either
559 * FUA writes through the device's write cache, or a
560 * normal write to a device without a volatile write
561 * cache. For the former, Any non-FUA write that occurs
562 * will clear this flag, hence we know before completion
563 * whether a cache flush is necessary.
565 if (!(iocb->ki_flags & IOCB_SYNC))
566 dio->flags |= IOMAP_DIO_WRITE_THROUGH;
570 * Try to invalidate cache pages for the range we are writing.
571 * If this invalidation fails, let the caller fall back to
574 ret = kiocb_invalidate_pages(iocb, iomi.len);
576 if (ret != -EAGAIN) {
577 trace_iomap_dio_invalidate_fail(inode, iomi.pos,
584 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
585 ret = sb_init_dio_done_wq(inode->i_sb);
591 inode_dio_begin(inode);
593 blk_start_plug(&plug);
594 while ((ret = iomap_iter(&iomi, ops)) > 0) {
595 iomi.processed = iomap_dio_iter(&iomi, dio);
598 * We can only poll for single bio I/Os.
600 iocb->ki_flags &= ~IOCB_HIPRI;
603 blk_finish_plug(&plug);
606 * We only report that we've read data up to i_size.
607 * Revert iter to a state corresponding to that as some callers (such
608 * as the splice code) rely on it.
610 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
611 iov_iter_revert(iter, iomi.pos - dio->i_size);
613 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
614 if (!(iocb->ki_flags & IOCB_NOWAIT))
615 wait_for_completion = true;
619 /* magic error code to fall back to buffered I/O */
620 if (ret == -ENOTBLK) {
621 wait_for_completion = true;
625 iomap_dio_set_error(dio, ret);
628 * If all the writes we issued were already written through to the
629 * media, we don't need to flush the cache on IO completion. Clear the
630 * sync flag for this case.
632 if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
633 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
636 * We are about to drop our additional submission reference, which
637 * might be the last reference to the dio. There are three different
638 * ways we can progress here:
640 * (a) If this is the last reference we will always complete and free
642 * (b) If this is not the last reference, and we serve an asynchronous
643 * iocb, we must never touch the dio after the decrement, the
644 * I/O completion handler will complete and free it.
645 * (c) If this is not the last reference, but we serve a synchronous
646 * iocb, the I/O completion handler will wake us up on the drop
647 * of the final reference, and we will complete and free it here
648 * after we got woken by the I/O completion handler.
650 dio->wait_for_completion = wait_for_completion;
651 if (!atomic_dec_and_test(&dio->ref)) {
652 if (!wait_for_completion) {
653 trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len);
654 return ERR_PTR(-EIOCBQUEUED);
658 set_current_state(TASK_UNINTERRUPTIBLE);
659 if (!READ_ONCE(dio->submit.waiter))
664 __set_current_state(TASK_RUNNING);
675 EXPORT_SYMBOL_GPL(__iomap_dio_rw);
678 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
679 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
680 unsigned int dio_flags, void *private, size_t done_before)
682 struct iomap_dio *dio;
684 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
686 if (IS_ERR_OR_NULL(dio))
687 return PTR_ERR_OR_ZERO(dio);
688 return iomap_dio_complete(dio);
690 EXPORT_SYMBOL_GPL(iomap_dio_rw);