block: ublk_drv: cleanup 'struct ublk_map_data'
authorMing Lei <ming.lei@redhat.com>
Thu, 30 Mar 2023 11:36:24 +0000 (19:36 +0800)
committerJens Axboe <axboe@kernel.dk>
Mon, 3 Apr 2023 01:22:55 +0000 (19:22 -0600)
'struct ublk_map_data' is passed to ublk_copy_user_pages()
for copying data between userspace buffer and request pages.

Here what matters is userspace buffer address/len and 'struct request',
so replace ->io field with user buffer address, and rename max_bytes
as len.

Meantime remove 'ubq' field from ublk_map_data, since it isn't used
any more.

Then code becomes more readable.

Reviewed-by: Ziyang Zhang <ZiyangZhang@linux.alibaba.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c

index a40edc4..1223fcb 100644 (file)
@@ -420,10 +420,9 @@ static const struct block_device_operations ub_fops = {
 #define UBLK_MAX_PIN_PAGES     32
 
 struct ublk_map_data {
-       const struct ublk_queue *ubq;
        const struct request *rq;
-       const struct ublk_io *io;
-       unsigned max_bytes;
+       unsigned long   ubuf;
+       unsigned int    len;
 };
 
 struct ublk_io_iter {
@@ -483,14 +482,14 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
 static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
 {
        const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
-       const unsigned long start_vm = data->io->addr;
+       const unsigned long start_vm = data->ubuf;
        unsigned int done = 0;
        struct ublk_io_iter iter = {
                .pg_off = start_vm & (PAGE_SIZE - 1),
                .bio    = data->rq->bio,
                .iter   = data->rq->bio->bi_iter,
        };
-       const unsigned int nr_pages = round_up(data->max_bytes +
+       const unsigned int nr_pages = round_up(data->len +
                        (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
 
        while (done < nr_pages) {
@@ -503,13 +502,13 @@ static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
                                iter.pages);
                if (iter.nr_pages <= 0)
                        return done == 0 ? iter.nr_pages : done;
-               len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
+               len = ublk_copy_io_pages(&iter, data->len, to_vm);
                for (i = 0; i < iter.nr_pages; i++) {
                        if (to_vm)
                                set_page_dirty(iter.pages[i]);
                        put_page(iter.pages[i]);
                }
-               data->max_bytes -= len;
+               data->len -= len;
                done += iter.nr_pages;
        }
 
@@ -538,15 +537,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
         */
        if (ublk_need_map_req(req)) {
                struct ublk_map_data data = {
-                       .ubq    =       ubq,
                        .rq     =       req,
-                       .io     =       io,
-                       .max_bytes =    rq_bytes,
+                       .ubuf   =       io->addr,
+                       .len    =       rq_bytes,
                };
 
                ublk_copy_user_pages(&data, true);
 
-               return rq_bytes - data.max_bytes;
+               return rq_bytes - data.len;
        }
        return rq_bytes;
 }
@@ -559,17 +557,16 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
 
        if (ublk_need_unmap_req(req)) {
                struct ublk_map_data data = {
-                       .ubq    =       ubq,
                        .rq     =       req,
-                       .io     =       io,
-                       .max_bytes =    io->res,
+                       .ubuf   =       io->addr,
+                       .len    =       io->res,
                };
 
                WARN_ON_ONCE(io->res > rq_bytes);
 
                ublk_copy_user_pages(&data, false);
 
-               return io->res - data.max_bytes;
+               return io->res - data.len;
        }
        return rq_bytes;
 }