enum io_uring_cmd_flags {
IO_URING_F_NONBLOCK = 1,
+ IO_URING_F_COMPLETE_DEFER = 2,
};
struct io_mapped_ubuf {
struct fixed_rsrc_ref_node *ref_node);
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
- struct io_comp_state *cs);
+ unsigned int issue_flags);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req, int nr);
}
static void io_req_complete_state(struct io_kiocb *req, long res,
- unsigned int cflags, struct io_comp_state *cs)
+ unsigned int cflags)
{
io_clean_op(req);
req->result = res;
req->flags |= REQ_F_COMPLETE_INLINE;
}
-static inline void __io_req_complete(struct io_kiocb *req, long res,
- unsigned cflags, struct io_comp_state *cs)
+static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
+ long res, unsigned cflags)
{
- if (!cs)
- io_req_complete_nostate(req, res, cflags);
+ if (issue_flags & IO_URING_F_COMPLETE_DEFER)
+ io_req_complete_state(req, res, cflags);
else
- io_req_complete_state(req, res, cflags, cs);
+ io_req_complete_nostate(req, res, cflags);
}
static inline void io_req_complete(struct io_kiocb *req, long res)
{
- __io_req_complete(req, res, 0, NULL);
+ __io_req_complete(req, 0, res, 0);
}
static inline bool io_is_fallback_req(struct io_kiocb *req)
do {
req = list_first_entry(again, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- __io_complete_rw(req, -EAGAIN, 0, NULL);
+ __io_complete_rw(req, -EAGAIN, 0, 0);
} while (!list_empty(again));
}
}
static void io_complete_rw_common(struct kiocb *kiocb, long res,
- struct io_comp_state *cs)
+ unsigned int issue_flags)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
int cflags = 0;
req_set_fail_links(req);
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_rw_kbuf(req);
- __io_req_complete(req, res, cflags, cs);
+ __io_req_complete(req, issue_flags, res, cflags);
}
#ifdef CONFIG_BLOCK
}
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
- struct io_comp_state *cs)
+ unsigned int issue_flags)
{
if (!io_rw_reissue(req, res))
- io_complete_rw_common(&req->rw.kiocb, res, cs);
+ io_complete_rw_common(&req->rw.kiocb, res, issue_flags);
}
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- __io_complete_rw(req, res, res2, NULL);
+ __io_complete_rw(req, res, res2, 0);
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
}
static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
- struct io_comp_state *cs)
+ unsigned int issue_flags)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data;
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
- __io_complete_rw(req, ret, 0, cs);
+ __io_complete_rw(req, ret, 0, issue_flags);
else
io_rw_done(kiocb, ret);
}
return -EINVAL;
}
-static int io_read(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_read(struct io_kiocb *req, unsigned int issue_flags)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
/* we got some bytes, but not all. retry. */
} while (ret > 0 && ret < io_size);
done:
- kiocb_done(kiocb, ret, cs);
+ kiocb_done(kiocb, ret, issue_flags);
return 0;
}
return io_rw_prep_async(req, WRITE);
}
-static int io_write(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_write(struct io_kiocb *req, unsigned int issue_flags)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
goto copy_iov;
done:
- kiocb_done(kiocb, ret2, cs);
+ kiocb_done(kiocb, ret2, issue_flags);
} else {
copy_iov:
/* some cases will consume bytes even on error returns */
/*
* IORING_OP_NOP just posts a completion event, nothing else.
*/
-static int io_nop(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- __io_req_complete(req, 0, 0, cs);
+ __io_req_complete(req, issue_flags, 0, 0);
return 0;
}
return i;
}
-static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
/* need to hold the lock to complete IOPOLL requests */
if (ctx->flags & IORING_SETUP_IOPOLL) {
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, !force_nonblock);
} else {
io_ring_submit_unlock(ctx, !force_nonblock);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
}
return 0;
}
return i ? i : -ENOMEM;
}
-static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
/* need to hold the lock to complete IOPOLL requests */
if (ctx->flags & IORING_SETUP_IOPOLL) {
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, !force_nonblock);
} else {
io_ring_submit_unlock(ctx, !force_nonblock);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
}
return 0;
}
#endif
}
-static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_EPOLL)
struct io_epoll *ie = &req->epoll;
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
#else
return -EOPNOTSUPP;
return 0;
}
-static int io_close(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_close(struct io_kiocb *req, unsigned int issue_flags)
{
struct files_struct *files = current->files;
struct io_close *close = &req->close;
req_set_fail_links(req);
if (file)
fput(file);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
return ret;
}
-static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
-static int io_send(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = &req->sr_msg;
struct msghdr msg;
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
return ret;
}
-static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, cflags, cs);
+ __io_req_complete(req, issue_flags, ret, cflags);
return 0;
}
-static int io_recv(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_buffer *kbuf;
struct io_sr_msg *sr = &req->sr_msg;
cflags = io_put_recv_kbuf(req);
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, cflags, cs);
+ __io_req_complete(req, issue_flags, ret, cflags);
return 0;
}
return 0;
}
-static int io_accept(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_accept *accept = &req->accept;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ret = -EINTR;
req_set_fail_links(req);
}
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
&io->address);
}
-static int io_connect(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_connect __io, *io;
unsigned file_flags;
out:
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
#else /* !CONFIG_NET */
return -EOPNOTSUPP;
}
-static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
return -EOPNOTSUPP;
}
-static int io_send(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
-static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
return -EOPNOTSUPP;
}
-static int io_recv(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
-static int io_accept(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
-static int io_connect(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
{
return -EOPNOTSUPP;
}
return 0;
}
-static int io_files_update(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_rsrc_update up;
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
}
}
-static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
- struct io_comp_state *cs)
+static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
switch (req->opcode) {
case IORING_OP_NOP:
- ret = io_nop(req, issue_flags, cs);
+ ret = io_nop(req, issue_flags);
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
- ret = io_read(req, issue_flags, cs);
+ ret = io_read(req, issue_flags);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
- ret = io_write(req, issue_flags, cs);
+ ret = io_write(req, issue_flags);
break;
case IORING_OP_FSYNC:
ret = io_fsync(req, issue_flags);
ret = io_sync_file_range(req, issue_flags);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, issue_flags, cs);
+ ret = io_sendmsg(req, issue_flags);
break;
case IORING_OP_SEND:
- ret = io_send(req, issue_flags, cs);
+ ret = io_send(req, issue_flags);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, issue_flags, cs);
+ ret = io_recvmsg(req, issue_flags);
break;
case IORING_OP_RECV:
- ret = io_recv(req, issue_flags, cs);
+ ret = io_recv(req, issue_flags);
break;
case IORING_OP_TIMEOUT:
ret = io_timeout(req, issue_flags);
ret = io_timeout_remove(req, issue_flags);
break;
case IORING_OP_ACCEPT:
- ret = io_accept(req, issue_flags, cs);
+ ret = io_accept(req, issue_flags);
break;
case IORING_OP_CONNECT:
- ret = io_connect(req, issue_flags, cs);
+ ret = io_connect(req, issue_flags);
break;
case IORING_OP_ASYNC_CANCEL:
ret = io_async_cancel(req, issue_flags);
ret = io_openat(req, issue_flags);
break;
case IORING_OP_CLOSE:
- ret = io_close(req, issue_flags, cs);
+ ret = io_close(req, issue_flags);
break;
case IORING_OP_FILES_UPDATE:
- ret = io_files_update(req, issue_flags, cs);
+ ret = io_files_update(req, issue_flags);
break;
case IORING_OP_STATX:
ret = io_statx(req, issue_flags);
ret = io_openat2(req, issue_flags);
break;
case IORING_OP_EPOLL_CTL:
- ret = io_epoll_ctl(req, issue_flags, cs);
+ ret = io_epoll_ctl(req, issue_flags);
break;
case IORING_OP_SPLICE:
ret = io_splice(req, issue_flags);
break;
case IORING_OP_PROVIDE_BUFFERS:
- ret = io_provide_buffers(req, issue_flags, cs);
+ ret = io_provide_buffers(req, issue_flags);
break;
case IORING_OP_REMOVE_BUFFERS:
- ret = io_remove_buffers(req, issue_flags, cs);
+ ret = io_remove_buffers(req, issue_flags);
break;
case IORING_OP_TEE:
ret = io_tee(req, issue_flags);
if (!ret) {
do {
- ret = io_issue_sqe(req, 0, NULL);
+ ret = io_issue_sqe(req, 0);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
{
struct io_kiocb *linked_timeout;
const struct cred *old_creds = NULL;
- int ret;
+ int ret, issue_flags = IO_URING_F_NONBLOCK;
+ if (cs)
+ issue_flags |= IO_URING_F_COMPLETE_DEFER;
again:
linked_timeout = io_prep_linked_timeout(req);
old_creds = override_creds(req->work.identity->creds);
}
- ret = io_issue_sqe(req, IO_URING_F_NONBLOCK, cs);
+ ret = io_issue_sqe(req, issue_flags);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file