__u64 addr;
__u32 len;
__u16 bid;
+ __u16 bgid;
};
struct io_restriction {
return cflags;
}
+static void io_kbuf_recycle(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_buffer *head, *buf;
+
+ if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
+ return;
+
+ lockdep_assert_held(&ctx->uring_lock);
+
+ buf = req->kbuf;
+
+ head = xa_load(&ctx->io_buffers, buf->bgid);
+ if (head) {
+ list_add(&buf->list, &head->list);
+ } else {
+ int ret;
+
+ INIT_LIST_HEAD(&buf->list);
+
+ /* if we fail, just leave buffer attached */
+ ret = xa_insert(&ctx->io_buffers, buf->bgid, buf, GFP_KERNEL);
+ if (unlikely(ret < 0))
+ return;
+ }
+
+ req->flags &= ~REQ_F_BUFFER_SELECTED;
+ req->kbuf = NULL;
+}
+
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
__must_hold(&req->ctx->timeout_lock)
buf->addr = addr;
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
buf->bid = bid;
+ buf->bgid = pbuf->bgid;
addr += pbuf->len;
bid++;
if (!*head) {
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
+ io_kbuf_recycle(req);
io_queue_async_work(req, NULL);
break;
+ case IO_APOLL_OK:
+ io_kbuf_recycle(req);
+ break;
}
if (linked_timeout)