1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/io_uring.h>
12 #include <uapi/linux/io_uring.h>
18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
22 /* BIDs are addressed by a 16-bit field in a CQE */
23 #define MAX_BIDS_PER_BGID (1 << 16)
25 struct io_provide_buf {
34 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
37 if (ctx->io_bl && bgid < BGID_ARRAY)
38 return &ctx->io_bl[bgid];
40 return xa_load(&ctx->io_bl_xa, bgid);
43 static int io_buffer_add_list(struct io_ring_ctx *ctx,
44 struct io_buffer_list *bl, unsigned int bgid)
47 if (bgid < BGID_ARRAY)
50 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
53 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
55 struct io_ring_ctx *ctx = req->ctx;
56 struct io_buffer_list *bl;
57 struct io_buffer *buf;
60 * For legacy provided buffer mode, don't recycle if we already did
61 * IO to this buffer. For ring-mapped provided buffer mode, we should
62 * increment ring->head to explicitly monopolize the buffer to avoid
65 if (req->flags & REQ_F_PARTIAL_IO)
68 io_ring_submit_lock(ctx, issue_flags);
71 bl = io_buffer_get_list(ctx, buf->bgid);
72 list_add(&buf->list, &bl->buf_list);
73 req->flags &= ~REQ_F_BUFFER_SELECTED;
74 req->buf_index = buf->bgid;
76 io_ring_submit_unlock(ctx, issue_flags);
80 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
85 * We can add this buffer back to two lists:
87 * 1) The io_buffers_cache list. This one is protected by the
88 * ctx->uring_lock. If we already hold this lock, add back to this
89 * list as we can grab it from issue as well.
90 * 2) The io_buffers_comp list. This one is protected by the
91 * ctx->completion_lock.
93 * We migrate buffers from the comp_list to the issue cache list
96 if (req->flags & REQ_F_BUFFER_RING) {
97 /* no buffers to recycle for this case */
98 cflags = __io_put_kbuf_list(req, NULL);
99 } else if (issue_flags & IO_URING_F_UNLOCKED) {
100 struct io_ring_ctx *ctx = req->ctx;
102 spin_lock(&ctx->completion_lock);
103 cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
104 spin_unlock(&ctx->completion_lock);
106 lockdep_assert_held(&req->ctx->uring_lock);
108 cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
113 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
114 struct io_buffer_list *bl)
116 if (!list_empty(&bl->buf_list)) {
117 struct io_buffer *kbuf;
119 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
120 list_del(&kbuf->list);
121 if (*len == 0 || *len > kbuf->len)
123 req->flags |= REQ_F_BUFFER_SELECTED;
125 req->buf_index = kbuf->bid;
126 return u64_to_user_ptr(kbuf->addr);
131 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
132 struct io_buffer_list *bl,
133 unsigned int issue_flags)
135 struct io_uring_buf_ring *br = bl->buf_ring;
136 struct io_uring_buf *buf;
137 __u16 head = bl->head;
139 if (unlikely(smp_load_acquire(&br->tail) == head))
143 if (head < IO_BUFFER_LIST_BUF_PER_PAGE) {
144 buf = &br->bufs[head];
146 int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
147 int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
148 buf = page_address(bl->buf_pages[index]);
151 if (*len == 0 || *len > buf->len)
153 req->flags |= REQ_F_BUFFER_RING;
155 req->buf_index = buf->bid;
157 if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
159 * If we came in unlocked, we have no choice but to consume the
160 * buffer here, otherwise nothing ensures that the buffer won't
161 * get used by others. This does mean it'll be pinned until the
162 * IO completes, coming in unlocked means we're being called from
163 * io-wq context and there may be further retries in async hybrid
164 * mode. For the locked case, the caller must call commit when
165 * the transfer completes (or if we get -EAGAIN and must poll of
168 req->buf_list = NULL;
171 return u64_to_user_ptr(buf->addr);
174 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
175 unsigned int issue_flags)
177 struct io_ring_ctx *ctx = req->ctx;
178 struct io_buffer_list *bl;
179 void __user *ret = NULL;
181 io_ring_submit_lock(req->ctx, issue_flags);
183 bl = io_buffer_get_list(ctx, req->buf_index);
185 if (bl->buf_nr_pages)
186 ret = io_ring_buffer_select(req, len, bl, issue_flags);
188 ret = io_provided_buffer_select(req, len, bl);
190 io_ring_submit_unlock(req->ctx, issue_flags);
194 static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
198 ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
203 for (i = 0; i < BGID_ARRAY; i++) {
204 INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
205 ctx->io_bl[i].bgid = i;
211 static int __io_remove_buffers(struct io_ring_ctx *ctx,
212 struct io_buffer_list *bl, unsigned nbufs)
216 /* shouldn't happen */
220 if (bl->buf_nr_pages) {
223 i = bl->buf_ring->tail - bl->head;
224 for (j = 0; j < bl->buf_nr_pages; j++)
225 unpin_user_page(bl->buf_pages[j]);
226 kvfree(bl->buf_pages);
227 bl->buf_pages = NULL;
228 bl->buf_nr_pages = 0;
229 /* make sure it's seen as empty */
230 INIT_LIST_HEAD(&bl->buf_list);
234 /* protects io_buffers_cache */
235 lockdep_assert_held(&ctx->uring_lock);
237 while (!list_empty(&bl->buf_list)) {
238 struct io_buffer *nxt;
240 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
241 list_move(&nxt->list, &ctx->io_buffers_cache);
250 void io_destroy_buffers(struct io_ring_ctx *ctx)
252 struct io_buffer_list *bl;
256 for (i = 0; i < BGID_ARRAY; i++) {
259 __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
262 xa_for_each(&ctx->io_bl_xa, index, bl) {
263 xa_erase(&ctx->io_bl_xa, bl->bgid);
264 __io_remove_buffers(ctx, bl, -1U);
268 while (!list_empty(&ctx->io_buffers_pages)) {
271 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
272 list_del_init(&page->lru);
277 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
279 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
282 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
286 tmp = READ_ONCE(sqe->fd);
287 if (!tmp || tmp > MAX_BIDS_PER_BGID)
290 memset(p, 0, sizeof(*p));
292 p->bgid = READ_ONCE(sqe->buf_group);
296 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
298 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
299 struct io_ring_ctx *ctx = req->ctx;
300 struct io_buffer_list *bl;
303 io_ring_submit_lock(ctx, issue_flags);
306 bl = io_buffer_get_list(ctx, p->bgid);
309 /* can't use provide/remove buffers command on mapped buffers */
310 if (!bl->buf_nr_pages)
311 ret = __io_remove_buffers(ctx, bl, p->nbufs);
316 /* complete before unlock, IOPOLL may need the lock */
317 io_req_set_res(req, ret, 0);
318 __io_req_complete(req, issue_flags);
319 io_ring_submit_unlock(ctx, issue_flags);
320 return IOU_ISSUE_SKIP_COMPLETE;
323 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
325 unsigned long size, tmp_check;
326 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
329 if (sqe->rw_flags || sqe->splice_fd_in)
332 tmp = READ_ONCE(sqe->fd);
333 if (!tmp || tmp > MAX_BIDS_PER_BGID)
336 p->addr = READ_ONCE(sqe->addr);
337 p->len = READ_ONCE(sqe->len);
339 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
342 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
345 size = (unsigned long)p->len * p->nbufs;
346 if (!access_ok(u64_to_user_ptr(p->addr), size))
349 p->bgid = READ_ONCE(sqe->buf_group);
350 tmp = READ_ONCE(sqe->off);
353 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
359 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
361 struct io_buffer *buf;
366 * Completions that don't happen inline (eg not under uring_lock) will
367 * add to ->io_buffers_comp. If we don't have any free buffers, check
368 * the completion list and splice those entries first.
370 if (!list_empty_careful(&ctx->io_buffers_comp)) {
371 spin_lock(&ctx->completion_lock);
372 if (!list_empty(&ctx->io_buffers_comp)) {
373 list_splice_init(&ctx->io_buffers_comp,
374 &ctx->io_buffers_cache);
375 spin_unlock(&ctx->completion_lock);
378 spin_unlock(&ctx->completion_lock);
382 * No free buffers and no completion entries either. Allocate a new
383 * page worth of buffer entries and add those to our freelist.
385 page = alloc_page(GFP_KERNEL_ACCOUNT);
389 list_add(&page->lru, &ctx->io_buffers_pages);
391 buf = page_address(page);
392 bufs_in_page = PAGE_SIZE / sizeof(*buf);
393 while (bufs_in_page) {
394 list_add_tail(&buf->list, &ctx->io_buffers_cache);
402 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
403 struct io_buffer_list *bl)
405 struct io_buffer *buf;
406 u64 addr = pbuf->addr;
407 int i, bid = pbuf->bid;
409 for (i = 0; i < pbuf->nbufs; i++) {
410 if (list_empty(&ctx->io_buffers_cache) &&
411 io_refill_buffer_cache(ctx))
413 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
415 list_move_tail(&buf->list, &bl->buf_list);
417 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
419 buf->bgid = pbuf->bgid;
425 return i ? 0 : -ENOMEM;
428 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
430 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
431 struct io_ring_ctx *ctx = req->ctx;
432 struct io_buffer_list *bl;
435 io_ring_submit_lock(ctx, issue_flags);
437 if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
438 ret = io_init_bl_list(ctx);
443 bl = io_buffer_get_list(ctx, p->bgid);
445 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
450 INIT_LIST_HEAD(&bl->buf_list);
451 ret = io_buffer_add_list(ctx, bl, p->bgid);
457 /* can't add buffers via this command for a mapped buffer ring */
458 if (bl->buf_nr_pages) {
463 ret = io_add_buffers(ctx, p, bl);
467 /* complete before unlock, IOPOLL may need the lock */
468 io_req_set_res(req, ret, 0);
469 __io_req_complete(req, issue_flags);
470 io_ring_submit_unlock(ctx, issue_flags);
471 return IOU_ISSUE_SKIP_COMPLETE;
474 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
476 struct io_uring_buf_ring *br;
477 struct io_uring_buf_reg reg;
478 struct io_buffer_list *bl, *free_bl = NULL;
482 if (copy_from_user(®, arg, sizeof(reg)))
485 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
489 if (reg.ring_addr & ~PAGE_MASK)
491 if (!is_power_of_2(reg.ring_entries))
494 /* cannot disambiguate full vs empty due to head/tail size */
495 if (reg.ring_entries >= 65536)
498 if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
499 int ret = io_init_bl_list(ctx);
504 bl = io_buffer_get_list(ctx, reg.bgid);
506 /* if mapped buffer ring OR classic exists, don't allow */
507 if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
510 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
515 pages = io_pin_pages(reg.ring_addr,
516 flex_array_size(br, bufs, reg.ring_entries),
520 return PTR_ERR(pages);
523 br = page_address(pages[0]);
524 bl->buf_pages = pages;
525 bl->buf_nr_pages = nr_pages;
526 bl->nr_entries = reg.ring_entries;
528 bl->mask = reg.ring_entries - 1;
529 io_buffer_add_list(ctx, bl, reg.bgid);
533 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
535 struct io_uring_buf_reg reg;
536 struct io_buffer_list *bl;
538 if (copy_from_user(®, arg, sizeof(reg)))
540 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
543 bl = io_buffer_get_list(ctx, reg.bgid);
546 if (!bl->buf_nr_pages)
549 __io_remove_buffers(ctx, bl, -1U);
550 if (bl->bgid >= BGID_ARRAY) {
551 xa_erase(&ctx->io_bl_xa, bl->bgid);