1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Christoph Hellwig.
7 static inline unsigned int bio_max_vecs(unsigned int count)
9 return bio_max_segs(howmany(count, PAGE_SIZE));
13 xfs_flush_bdev_async_endio(
16 complete(bio->bi_private);
20 * Submit a request for an async cache flush to run. If the request queue does
21 * not require flush operations, just skip it altogether. If the caller needs
22 * to wait for the flush completion at a later point in time, they must supply a
23 * valid completion. This will be signalled when the flush completes. The
24 * caller never sees the bio that is issued here.
29 struct block_device *bdev,
30 struct completion *done)
32 struct request_queue *q = bdev->bd_disk->queue;
34 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
39 bio_init(bio, NULL, 0);
40 bio_set_dev(bio, bdev);
41 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
42 bio->bi_private = done;
43 bio->bi_end_io = xfs_flush_bdev_async_endio;
49 struct block_device *bdev,
56 unsigned int is_vmalloc = is_vmalloc_addr(data);
57 unsigned int left = count;
61 if (is_vmalloc && op == REQ_OP_WRITE)
62 flush_kernel_vmap_range(data, count);
64 bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
65 bio_set_dev(bio, bdev);
66 bio->bi_iter.bi_sector = sector;
67 bio->bi_opf = op | REQ_META | REQ_SYNC;
70 struct page *page = kmem_to_page(data);
71 unsigned int off = offset_in_page(data);
72 unsigned int len = min_t(unsigned, left, PAGE_SIZE - off);
74 while (bio_add_page(bio, page, len, off) != len) {
75 struct bio *prev = bio;
77 bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
78 bio_copy_dev(bio, prev);
79 bio->bi_iter.bi_sector = bio_end_sector(prev);
80 bio->bi_opf = prev->bi_opf;
90 error = submit_bio_wait(bio);
93 if (is_vmalloc && op == REQ_OP_READ)
94 invalidate_kernel_vmap_range(data, count);