Merge tag 'hardening-v5.18-rc1-fix1' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-rpi.git] / fs / xfs / xfs_bio_io.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Christoph Hellwig.
4  */
5 #include "xfs.h"
6
7 static inline unsigned int bio_max_vecs(unsigned int count)
8 {
9         return bio_max_segs(howmany(count, PAGE_SIZE));
10 }
11
12 static void
13 xfs_flush_bdev_async_endio(
14         struct bio      *bio)
15 {
16         complete(bio->bi_private);
17 }
18
19 /*
20  * Submit a request for an async cache flush to run. If the request queue does
21  * not require flush operations, just skip it altogether. If the caller needs
22  * to wait for the flush completion at a later point in time, they must supply a
23  * valid completion. This will be signalled when the flush completes.  The
24  * caller never sees the bio that is issued here.
25  */
26 void
27 xfs_flush_bdev_async(
28         struct bio              *bio,
29         struct block_device     *bdev,
30         struct completion       *done)
31 {
32         struct request_queue    *q = bdev->bd_disk->queue;
33
34         if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
35                 complete(done);
36                 return;
37         }
38
39         bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
40         bio->bi_private = done;
41         bio->bi_end_io = xfs_flush_bdev_async_endio;
42
43         submit_bio(bio);
44 }
45 int
46 xfs_rw_bdev(
47         struct block_device     *bdev,
48         sector_t                sector,
49         unsigned int            count,
50         char                    *data,
51         unsigned int            op)
52
53 {
54         unsigned int            is_vmalloc = is_vmalloc_addr(data);
55         unsigned int            left = count;
56         int                     error;
57         struct bio              *bio;
58
59         if (is_vmalloc && op == REQ_OP_WRITE)
60                 flush_kernel_vmap_range(data, count);
61
62         bio = bio_alloc(bdev, bio_max_vecs(left), op | REQ_META | REQ_SYNC,
63                         GFP_KERNEL);
64         bio->bi_iter.bi_sector = sector;
65
66         do {
67                 struct page     *page = kmem_to_page(data);
68                 unsigned int    off = offset_in_page(data);
69                 unsigned int    len = min_t(unsigned, left, PAGE_SIZE - off);
70
71                 while (bio_add_page(bio, page, len, off) != len) {
72                         struct bio      *prev = bio;
73
74                         bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left),
75                                         prev->bi_opf, GFP_KERNEL);
76                         bio->bi_iter.bi_sector = bio_end_sector(prev);
77                         bio_chain(prev, bio);
78
79                         submit_bio(prev);
80                 }
81
82                 data += len;
83                 left -= len;
84         } while (left > 0);
85
86         error = submit_bio_wait(bio);
87         bio_put(bio);
88
89         if (is_vmalloc && op == REQ_OP_READ)
90                 invalidate_kernel_vmap_range(data, count);
91         return error;
92 }