block: Allow mapping of vmalloc-ed buffers
authorDamien Le Moal <damien.lemoal@wdc.com>
Mon, 1 Jul 2019 05:09:15 +0000 (14:09 +0900)
committerJens Axboe <axboe@kernel.dk>
Fri, 12 Jul 2019 02:04:36 +0000 (20:04 -0600)
To allow the SCSI subsystem scsi_execute_req() function to issue
requests using large buffers that are better allocated with vmalloc()
rather than kmalloc(), modify bio_map_kern() to allow passing a buffer
allocated with vmalloc().

To do so, detect vmalloc-ed buffers using is_vmalloc_addr(). For
vmalloc-ed buffers, flush the buffer using flush_kernel_vmap_range(),
use vmalloc_to_page() instead of virt_to_page() to obtain the pages of
the buffer, and invalidate the buffer addresses with
invalidate_kernel_vmap_range() on completion of read BIOs. This last
point is executed using the function bio_invalidate_vmalloc_pages()
which is defined only if the architecture defines
ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE, that is, if the architecture
actually needs the invalidation done.

Fixes: 515ce6061312 ("scsi: sd_zbc: Fix sd_zbc_report_zones() buffer allocation")
Fixes: e76239a3748c ("block: add a report_zones method")
Cc: stable@vger.kernel.org
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bio.c

index 29cd6cf4da5131bb2b8d1d19c671f449350977ba..299a0e7651ec00336f8b41b9d515eabc7d65ac0e 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/workqueue.h>
 #include <linux/cgroup.h>
 #include <linux/blk-cgroup.h>
+#include <linux/highmem.h>
 
 #include <trace/events/block.h>
 #include "blk.h"
@@ -1441,8 +1442,22 @@ void bio_unmap_user(struct bio *bio)
        bio_put(bio);
 }
 
+static void bio_invalidate_vmalloc_pages(struct bio *bio)
+{
+#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+       if (bio->bi_private && !op_is_write(bio_op(bio))) {
+               unsigned long i, len = 0;
+
+               for (i = 0; i < bio->bi_vcnt; i++)
+                       len += bio->bi_io_vec[i].bv_len;
+               invalidate_kernel_vmap_range(bio->bi_private, len);
+       }
+#endif
+}
+
 static void bio_map_kern_endio(struct bio *bio)
 {
+       bio_invalidate_vmalloc_pages(bio);
        bio_put(bio);
 }
 
@@ -1463,6 +1478,8 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
        unsigned long start = kaddr >> PAGE_SHIFT;
        const int nr_pages = end - start;
+       bool is_vmalloc = is_vmalloc_addr(data);
+       struct page *page;
        int offset, i;
        struct bio *bio;
 
@@ -1470,6 +1487,11 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
        if (!bio)
                return ERR_PTR(-ENOMEM);
 
+       if (is_vmalloc) {
+               flush_kernel_vmap_range(data, len);
+               bio->bi_private = data;
+       }
+
        offset = offset_in_page(kaddr);
        for (i = 0; i < nr_pages; i++) {
                unsigned int bytes = PAGE_SIZE - offset;
@@ -1480,7 +1502,11 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
                if (bytes > len)
                        bytes = len;
 
-               if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
+               if (!is_vmalloc)
+                       page = virt_to_page(data);
+               else
+                       page = vmalloc_to_page(data);
+               if (bio_add_pc_page(q, bio, page, bytes,
                                    offset) < bytes) {
                        /* we don't support partial mappings */
                        bio_put(bio);