2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
13 * Append a bio to a passthrough request. Only works can be merged into
14 * the request based on the driver constraints.
16 int blk_rq_append_bio(struct request *rq, struct bio *bio)
19 blk_rq_bio_prep(rq->q, rq, bio);
21 if (!ll_back_merge_fn(rq->q, rq, bio))
24 rq->biotail->bi_next = bio;
26 rq->__data_len += bio->bi_iter.bi_size;
31 EXPORT_SYMBOL(blk_rq_append_bio);
33 static int __blk_rq_unmap_user(struct bio *bio)
38 if (bio_flagged(bio, BIO_USER_MAPPED))
41 ret = bio_uncopy_user(bio);
47 static int __blk_rq_map_user_iov(struct request *rq,
48 struct rq_map_data *map_data, struct iov_iter *iter,
49 gfp_t gfp_mask, bool copy)
51 struct request_queue *q = rq->q;
52 struct bio *bio, *orig_bio;
56 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
58 bio = bio_map_user_iov(q, iter, gfp_mask);
63 if (map_data && map_data->null_mapped)
64 bio_set_flag(bio, BIO_NULL_MAPPED);
66 iov_iter_advance(iter, bio->bi_iter.bi_size);
68 map_data->offset += bio->bi_iter.bi_size;
71 blk_queue_bounce(q, &bio);
74 * We link the bounce buffer in and could have to traverse it
75 * later so we have to get a ref to prevent it from being freed
79 ret = blk_rq_append_bio(rq, bio);
82 __blk_rq_unmap_user(orig_bio);
91 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
92 * @q: request queue where request should be inserted
93 * @rq: request to map data to
94 * @map_data: pointer to the rq_map_data holding pages (if necessary)
95 * @iter: iovec iterator
96 * @gfp_mask: memory allocation flags
99 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
100 * a kernel bounce buffer is used.
102 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
103 * still in process context.
105 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
106 * before being submitted to the device, as pages mapped may be out of
107 * reach. It's the callers responsibility to make sure this happens. The
108 * original bio must be passed back in to blk_rq_unmap_user() for proper
111 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
112 struct rq_map_data *map_data,
113 const struct iov_iter *iter, gfp_t gfp_mask)
116 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
117 struct bio *bio = NULL;
121 if (!iter_is_iovec(iter))
126 else if (iov_iter_alignment(iter) & align)
128 else if (queue_virt_boundary(q))
129 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
133 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
138 } while (iov_iter_count(&i));
140 if (!bio_flagged(bio, BIO_USER_MAPPED))
141 rq->cmd_flags |= REQ_COPY_USER;
145 __blk_rq_unmap_user(bio);
150 EXPORT_SYMBOL(blk_rq_map_user_iov);
152 int blk_rq_map_user(struct request_queue *q, struct request *rq,
153 struct rq_map_data *map_data, void __user *ubuf,
154 unsigned long len, gfp_t gfp_mask)
158 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
160 if (unlikely(ret < 0))
163 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
165 EXPORT_SYMBOL(blk_rq_map_user);
168 * blk_rq_unmap_user - unmap a request with user data
169 * @bio: start of bio list
172 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
173 * supply the original rq->bio from the blk_rq_map_user() return, since
174 * the I/O completion may have changed rq->bio.
176 int blk_rq_unmap_user(struct bio *bio)
178 struct bio *mapped_bio;
183 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
184 mapped_bio = bio->bi_private;
186 ret2 = __blk_rq_unmap_user(mapped_bio);
197 EXPORT_SYMBOL(blk_rq_unmap_user);
200 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
201 * @q: request queue where request should be inserted
202 * @rq: request to fill
203 * @kbuf: the kernel buffer
204 * @len: length of user data
205 * @gfp_mask: memory allocation flags
208 * Data will be mapped directly if possible. Otherwise a bounce
209 * buffer is used. Can be called multiple times to append multiple
212 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
213 unsigned int len, gfp_t gfp_mask)
215 int reading = rq_data_dir(rq) == READ;
216 unsigned long addr = (unsigned long) kbuf;
221 if (len > (queue_max_hw_sectors(q) << 9))
226 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
228 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
230 bio = bio_map_kern(q, kbuf, len, gfp_mask);
236 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
239 rq->cmd_flags |= REQ_COPY_USER;
241 ret = blk_rq_append_bio(rq, bio);
243 /* request is too big */
248 blk_queue_bounce(q, &rq->bio);
251 EXPORT_SYMBOL(blk_rq_map_kern);