1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to mapping data to requests
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
15 bool is_our_pages : 1;
16 bool is_null_mapped : 1;
21 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
24 struct bio_map_data *bmd;
26 if (data->nr_segs > UIO_MAXIOV)
29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
34 bmd->iter.iov = bmd->iov;
39 * bio_copy_from_iter - copy all pages from iov_iter to bio
40 * @bio: The &struct bio which describes the I/O as destination
41 * @iter: iov_iter as source
43 * Copy all pages from iov_iter to bio.
44 * Returns 0 on success, or error on failure.
46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
49 struct bvec_iter_all iter_all;
51 bio_for_each_segment_all(bvec, bio, iter_all) {
54 ret = copy_page_from_iter(bvec->bv_page,
59 if (!iov_iter_count(iter))
62 if (ret < bvec->bv_len)
70 * bio_copy_to_iter - copy all pages from bio to iov_iter
71 * @bio: The &struct bio which describes the I/O as source
72 * @iter: iov_iter as destination
74 * Copy all pages from bio to iov_iter.
75 * Returns 0 on success, or error on failure.
77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
80 struct bvec_iter_all iter_all;
82 bio_for_each_segment_all(bvec, bio, iter_all) {
85 ret = copy_page_to_iter(bvec->bv_page,
90 if (!iov_iter_count(&iter))
93 if (ret < bvec->bv_len)
101 * bio_uncopy_user - finish previously mapped bio
102 * @bio: bio being terminated
104 * Free pages allocated from bio_copy_user_iov() and write back data
105 * to user space in case of a read.
107 static int bio_uncopy_user(struct bio *bio)
109 struct bio_map_data *bmd = bio->bi_private;
112 if (!bmd->is_null_mapped) {
114 * if we're in a workqueue, the request is orphaned, so
115 * don't copy into a random user address space, just free
116 * and return -EINTR so user space doesn't expect any data.
120 else if (bio_data_dir(bio) == READ)
121 ret = bio_copy_to_iter(bio, bmd->iter);
122 if (bmd->is_our_pages)
130 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
131 struct iov_iter *iter, gfp_t gfp_mask)
133 struct bio_map_data *bmd;
135 struct bio *bio, *bounce_bio;
138 unsigned int len = iter->count;
139 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
141 bmd = bio_alloc_map_data(iter, gfp_mask);
146 * We need to do a deep copy of the iov_iter including the iovecs.
147 * The caller provided iov might point to an on-stack or otherwise
150 bmd->is_our_pages = !map_data;
151 bmd->is_null_mapped = (map_data && map_data->null_mapped);
153 nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
156 bio = bio_kmalloc(gfp_mask, nr_pages);
159 bio->bi_opf |= req_op(rq);
162 nr_pages = 1 << map_data->page_order;
163 i = map_data->offset / PAGE_SIZE;
166 unsigned int bytes = PAGE_SIZE;
174 if (i == map_data->nr_entries * nr_pages) {
179 page = map_data->pages[i / nr_pages];
180 page += (i % nr_pages);
184 page = alloc_page(rq->q->bounce_gfp | gfp_mask);
191 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
202 map_data->offset += bio->bi_iter.bi_size;
207 if ((iov_iter_rw(iter) == WRITE &&
208 (!map_data || !map_data->null_mapped)) ||
209 (map_data && map_data->from_user)) {
210 ret = bio_copy_from_iter(bio, iter);
214 if (bmd->is_our_pages)
216 iov_iter_advance(iter, bio->bi_iter.bi_size);
219 bio->bi_private = bmd;
222 ret = blk_rq_append_bio(rq, &bounce_bio);
227 * We link the bounce buffer in and could have to traverse it later, so
228 * we have to get a ref to prevent it from being freed
241 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
244 unsigned int max_sectors = queue_max_hw_sectors(rq->q);
245 struct bio *bio, *bounce_bio;
249 if (!iov_iter_count(iter))
252 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
255 bio->bi_opf |= req_op(rq);
257 while (iov_iter_count(iter)) {
260 size_t offs, added = 0;
263 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
264 if (unlikely(bytes <= 0)) {
265 ret = bytes ? bytes : -EFAULT;
269 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
271 if (unlikely(offs & queue_dma_alignment(rq->q))) {
275 for (j = 0; j < npages; j++) {
276 struct page *page = pages[j];
277 unsigned int n = PAGE_SIZE - offs;
278 bool same_page = false;
283 if (!bio_add_hw_page(rq->q, bio, page, n, offs,
284 max_sectors, &same_page)) {
294 iov_iter_advance(iter, added);
297 * release the pages we didn't map into the bio, if any
300 put_page(pages[j++]);
302 /* couldn't stuff something into bio? */
308 * Subtle: if we end up needing to bounce a bio, it would normally
309 * disappear when its bi_end_io is run. However, we need the original
310 * bio for the unmap, so grab an extra reference to it
315 ret = blk_rq_append_bio(rq, &bounce_bio);
320 * We link the bounce buffer in and could have to traverse it
321 * later, so we have to get a ref to prevent it from being freed
329 bio_release_pages(bio, false);
335 * bio_unmap_user - unmap a bio
336 * @bio: the bio being unmapped
338 * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
341 * bio_unmap_user() may sleep.
343 static void bio_unmap_user(struct bio *bio)
345 bio_release_pages(bio, bio_data_dir(bio) == READ);
350 static void bio_invalidate_vmalloc_pages(struct bio *bio)
352 #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
353 if (bio->bi_private && !op_is_write(bio_op(bio))) {
354 unsigned long i, len = 0;
356 for (i = 0; i < bio->bi_vcnt; i++)
357 len += bio->bi_io_vec[i].bv_len;
358 invalidate_kernel_vmap_range(bio->bi_private, len);
363 static void bio_map_kern_endio(struct bio *bio)
365 bio_invalidate_vmalloc_pages(bio);
370 * bio_map_kern - map kernel address into bio
371 * @q: the struct request_queue for the bio
372 * @data: pointer to buffer to map
373 * @len: length in bytes
374 * @gfp_mask: allocation flags for bio allocation
376 * Map the kernel address into a bio suitable for io to a block
377 * device. Returns an error pointer in case of error.
379 static struct bio *bio_map_kern(struct request_queue *q, void *data,
380 unsigned int len, gfp_t gfp_mask)
382 unsigned long kaddr = (unsigned long)data;
383 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
384 unsigned long start = kaddr >> PAGE_SHIFT;
385 const int nr_pages = end - start;
386 bool is_vmalloc = is_vmalloc_addr(data);
391 bio = bio_kmalloc(gfp_mask, nr_pages);
393 return ERR_PTR(-ENOMEM);
396 flush_kernel_vmap_range(data, len);
397 bio->bi_private = data;
400 offset = offset_in_page(kaddr);
401 for (i = 0; i < nr_pages; i++) {
402 unsigned int bytes = PAGE_SIZE - offset;
411 page = virt_to_page(data);
413 page = vmalloc_to_page(data);
414 if (bio_add_pc_page(q, bio, page, bytes,
416 /* we don't support partial mappings */
418 return ERR_PTR(-EINVAL);
426 bio->bi_end_io = bio_map_kern_endio;
430 static void bio_copy_kern_endio(struct bio *bio)
436 static void bio_copy_kern_endio_read(struct bio *bio)
438 char *p = bio->bi_private;
439 struct bio_vec *bvec;
440 struct bvec_iter_all iter_all;
442 bio_for_each_segment_all(bvec, bio, iter_all) {
443 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
447 bio_copy_kern_endio(bio);
451 * bio_copy_kern - copy kernel address into bio
452 * @q: the struct request_queue for the bio
453 * @data: pointer to buffer to copy
454 * @len: length in bytes
455 * @gfp_mask: allocation flags for bio and page allocation
456 * @reading: data direction is READ
458 * copy the kernel address into a bio suitable for io to a block
459 * device. Returns an error pointer in case of error.
461 static struct bio *bio_copy_kern(struct request_queue *q, void *data,
462 unsigned int len, gfp_t gfp_mask, int reading)
464 unsigned long kaddr = (unsigned long)data;
465 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
466 unsigned long start = kaddr >> PAGE_SHIFT;
475 return ERR_PTR(-EINVAL);
477 nr_pages = end - start;
478 bio = bio_kmalloc(gfp_mask, nr_pages);
480 return ERR_PTR(-ENOMEM);
484 unsigned int bytes = PAGE_SIZE;
489 page = alloc_page(q->bounce_gfp | gfp_mask);
494 memcpy(page_address(page), p, bytes);
496 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
504 bio->bi_end_io = bio_copy_kern_endio_read;
505 bio->bi_private = data;
507 bio->bi_end_io = bio_copy_kern_endio;
515 return ERR_PTR(-ENOMEM);
519 * Append a bio to a passthrough request. Only works if the bio can be merged
520 * into the request based on the driver constraints.
522 int blk_rq_append_bio(struct request *rq, struct bio **bio)
524 struct bio *orig_bio = *bio;
525 struct bvec_iter iter;
527 unsigned int nr_segs = 0;
529 blk_queue_bounce(rq->q, bio);
531 bio_for_each_bvec(bv, *bio, iter)
535 blk_rq_bio_prep(rq, *bio, nr_segs);
537 if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
538 if (orig_bio != *bio) {
545 rq->biotail->bi_next = *bio;
547 rq->__data_len += (*bio)->bi_iter.bi_size;
548 bio_crypt_free_ctx(*bio);
553 EXPORT_SYMBOL(blk_rq_append_bio);
556 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
557 * @q: request queue where request should be inserted
558 * @rq: request to map data to
559 * @map_data: pointer to the rq_map_data holding pages (if necessary)
560 * @iter: iovec iterator
561 * @gfp_mask: memory allocation flags
564 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
565 * a kernel bounce buffer is used.
567 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
568 * still in process context.
570 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
571 * before being submitted to the device, as pages mapped may be out of
572 * reach. It's the callers responsibility to make sure this happens. The
573 * original bio must be passed back in to blk_rq_unmap_user() for proper
576 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
577 struct rq_map_data *map_data,
578 const struct iov_iter *iter, gfp_t gfp_mask)
581 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
582 struct bio *bio = NULL;
586 if (!iter_is_iovec(iter))
591 else if (iov_iter_alignment(iter) & align)
593 else if (queue_virt_boundary(q))
594 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
599 ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
601 ret = bio_map_user_iov(rq, &i, gfp_mask);
606 } while (iov_iter_count(&i));
611 blk_rq_unmap_user(bio);
616 EXPORT_SYMBOL(blk_rq_map_user_iov);
618 int blk_rq_map_user(struct request_queue *q, struct request *rq,
619 struct rq_map_data *map_data, void __user *ubuf,
620 unsigned long len, gfp_t gfp_mask)
624 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
626 if (unlikely(ret < 0))
629 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
631 EXPORT_SYMBOL(blk_rq_map_user);
634 * blk_rq_unmap_user - unmap a request with user data
635 * @bio: start of bio list
638 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
639 * supply the original rq->bio from the blk_rq_map_user() return, since
640 * the I/O completion may have changed rq->bio.
642 int blk_rq_unmap_user(struct bio *bio)
644 struct bio *mapped_bio;
649 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
650 mapped_bio = bio->bi_private;
652 if (bio->bi_private) {
653 ret2 = bio_uncopy_user(mapped_bio);
657 bio_unmap_user(mapped_bio);
667 EXPORT_SYMBOL(blk_rq_unmap_user);
670 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
671 * @q: request queue where request should be inserted
672 * @rq: request to fill
673 * @kbuf: the kernel buffer
674 * @len: length of user data
675 * @gfp_mask: memory allocation flags
678 * Data will be mapped directly if possible. Otherwise a bounce
679 * buffer is used. Can be called multiple times to append multiple
682 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
683 unsigned int len, gfp_t gfp_mask)
685 int reading = rq_data_dir(rq) == READ;
686 unsigned long addr = (unsigned long) kbuf;
687 struct bio *bio, *orig_bio;
690 if (len > (queue_max_hw_sectors(q) << 9))
695 if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
696 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
698 bio = bio_map_kern(q, kbuf, len, gfp_mask);
703 bio->bi_opf &= ~REQ_OP_MASK;
704 bio->bi_opf |= req_op(rq);
707 ret = blk_rq_append_bio(rq, &bio);
709 /* request is too big */
716 EXPORT_SYMBOL(blk_rq_map_kern);