bool cancel_all);
static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
-static void __io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags);
static void io_dismantle_req(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
static inline void req_fail_link_node(struct io_kiocb *req, int res)
{
req_set_fail(req);
- req->cqe.res = res;
+ io_req_set_res(req, res, 0);
}
static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
}
}
-static void __io_req_complete_post(struct io_kiocb *req, s32 res,
- u32 cflags)
+static void __io_req_complete_post(struct io_kiocb *req)
{
- if (!(req->flags & REQ_F_CQE_SKIP)) {
- req->cqe.res = res;
- req->cqe.flags = cflags;
+ if (!(req->flags & REQ_F_CQE_SKIP))
__io_fill_cqe_req(req->ctx, req);
- }
__io_req_complete_put(req);
}
-static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
+static void io_req_complete_post(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
spin_lock(&ctx->completion_lock);
- __io_req_complete_post(req, res, cflags);
+ __io_req_complete_post(req);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
}
-static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
- s32 res, u32 cflags)
+static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
{
- if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
- io_req_set_res(req, res, cflags);
+ if (issue_flags & IO_URING_F_COMPLETE_DEFER)
req->flags |= REQ_F_COMPLETE_INLINE;
- } else {
- io_req_complete_post(req, res, cflags);
- }
-}
-
-static inline void io_req_complete(struct io_kiocb *req, s32 res)
-{
- if (res < 0)
- req_set_fail(req);
- __io_req_complete(req, 0, res, 0);
+ else
+ io_req_complete_post(req);
}
static void io_req_complete_failed(struct io_kiocb *req, s32 res)
{
req_set_fail(req);
- io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
+ io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
+ io_req_complete_post(req);
}
/*
link->flags |= REQ_F_CQE_SKIP;
else
link->flags &= ~REQ_F_CQE_SKIP;
- __io_req_complete_post(link, res, 0);
+ io_req_set_res(link, res, 0);
+ __io_req_complete_post(link);
link = nxt;
}
}
if (unlikely(!*uring_locked))
spin_lock(&(*ctx)->completion_lock);
}
- if (likely(*uring_locked))
+ if (likely(*uring_locked)) {
req->io_task_work.func(req, uring_locked);
- else
- __io_req_complete_post(req, req->cqe.res,
- io_put_kbuf_comp(req));
+ } else {
+ req->cqe.flags = io_put_kbuf_comp(req);
+ __io_req_complete_post(req);
+ }
node = next;
} while (node);
static void io_req_tw_post(struct io_kiocb *req, bool *locked)
{
- io_req_complete_post(req, req->cqe.res, req->cqe.flags);
+ io_req_complete_post(req);
}
static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
{
- req->cqe.res = res;
- req->cqe.flags = cflags;
+ io_req_set_res(req, res, cflags);
req->io_task_work.func = io_req_tw_post;
io_req_task_work_add(req);
}
static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
{
- req->cqe.res = ret;
+ io_req_set_res(req, ret, 0);
req->io_task_work.func = io_req_task_cancel;
io_req_task_work_add(req);
}
static inline void io_req_task_complete(struct io_kiocb *req, bool *locked)
{
- int res = req->cqe.res;
-
if (*locked) {
- io_req_set_res(req, res, io_put_kbuf(req, 0));
+ req->cqe.flags |= io_put_kbuf(req, 0);
req->flags |= REQ_F_COMPLETE_INLINE;
io_req_add_compl_list(req);
} else {
- io_req_complete_post(req, res,
- io_put_kbuf(req, IO_URING_F_UNLOCKED));
+ req->cqe.flags |= io_put_kbuf(req, IO_URING_F_UNLOCKED);
+ io_req_complete_post(req);
}
}
{
if (__io_complete_rw_common(req, res))
return;
- __io_req_complete(req, issue_flags, req->cqe.res,
- io_put_kbuf(req, issue_flags));
+ io_req_set_res(req, req->cqe.res, io_put_kbuf(req, issue_flags));
+ __io_req_complete(req, issue_flags);
}
static void io_complete_rw(struct kiocb *kiocb, long res)
if (__io_complete_rw_common(req, res))
return;
- req->cqe.res = res;
+ io_req_set_res(req, res, 0);
req->io_task_work.func = io_req_task_complete;
io_req_task_prio_work_add(req);
}
*/
ret = io_iter_do_read(rw, &s->iter);
if (ret == -EIOCBQUEUED)
- return 0;
+ return IOU_ISSUE_SKIP_COMPLETE;
/* we got some bytes, but not all. retry. */
kiocb->ki_flags &= ~IOCB_WAITQ;
iov_iter_restore(&s->iter, &s->iter_state);
/* it's faster to check here then delegate to kfree */
if (iovec)
kfree(iovec);
- return 0;
+ return IOU_ISSUE_SKIP_COMPLETE;
}
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
goto copy_iov;
done:
kiocb_done(req, ret2, issue_flags);
+ ret = IOU_ISSUE_SKIP_COMPLETE;
} else {
copy_iov:
iov_iter_restore(&s->iter, &s->iter_state);
ren->newpath, ren->flags);
req->flags &= ~REQ_F_NEED_CLEANUP;
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static void io_renameat_cleanup(struct io_kiocb *req)
req->flags &= ~REQ_F_NEED_CLEANUP;
io_xattr_cleanup(req);
- io_req_complete(req, ret);
+ io_req_set_res(req, ret, 0);
}
static int __io_getxattr_prep(struct io_kiocb *req,
&ix->ctx);
io_xattr_finish(req, ret);
- return 0;
+ return IOU_OK;
}
static int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
}
io_xattr_finish(req, ret);
- return 0;
+ return IOU_OK;
}
static int __io_setxattr_prep(struct io_kiocb *req,
ret = __io_setxattr(req, issue_flags, &req->file->f_path);
io_xattr_finish(req, ret);
-
- return 0;
+ return IOU_OK;
}
static int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
}
io_xattr_finish(req, ret);
- return 0;
+ return IOU_OK;
}
static int io_unlinkat_prep(struct io_kiocb *req,
ret = do_unlinkat(un->dfd, un->filename);
req->flags &= ~REQ_F_NEED_CLEANUP;
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static void io_unlinkat_cleanup(struct io_kiocb *req)
ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
req->flags &= ~REQ_F_NEED_CLEANUP;
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static void io_mkdirat_cleanup(struct io_kiocb *req)
ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
req->flags &= ~REQ_F_NEED_CLEANUP;
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_linkat_prep(struct io_kiocb *req,
lnk->newpath, lnk->flags);
req->flags &= ~REQ_F_NEED_CLEANUP;
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static void io_link_cleanup(struct io_kiocb *req)
io_req_set_res(req, 0, ret);
if (req->ctx->flags & IORING_SETUP_CQE32)
io_req_set_cqe32_extra(req, res2, 0);
- io_req_complete(req, ret);
+ __io_req_complete(req, 0);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
return -EAGAIN;
}
- if (ret != -EIOCBQUEUED)
+ if (ret != -EIOCBQUEUED) {
io_uring_cmd_done(ioucmd, ret, 0);
- return 0;
+ return IOU_OK;
+ }
+
+ return IOU_ISSUE_SKIP_COMPLETE;
}
static int __io_splice_prep(struct io_kiocb *req,
done:
if (ret != sp->len)
req_set_fail(req);
- __io_req_complete(req, 0, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
done:
if (ret != sp->len)
req_set_fail(req);
- __io_req_complete(req, 0, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
*/
static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
{
- __io_req_complete(req, issue_flags, 0, 0);
- return 0;
+ io_req_set_res(req, 0, 0);
+ return IOU_OK;
}
static int io_msg_ring_prep(struct io_kiocb *req,
done:
if (ret < 0)
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
+ io_req_set_res(req, ret, 0);
/* put file to avoid an attempt to IOPOLL the req */
io_put_file(req->file);
req->file = NULL;
- return 0;
+ return IOU_OK;
}
static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX,
sync->flags & IORING_FSYNC_DATASYNC);
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_fallocate_prep(struct io_kiocb *req,
ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len);
if (ret >= 0)
fsnotify_modify(req->file);
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
req_set_fail(req);
/* complete before unlock, IOPOLL may need the lock */
- __io_req_complete(req, issue_flags, ret, 0);
+ io_req_set_res(req, ret, 0);
+ __io_req_complete(req, issue_flags);
io_ring_submit_unlock(ctx, issue_flags);
- return 0;
+ return IOU_ISSUE_SKIP_COMPLETE;
}
static int io_provide_buffers_prep(struct io_kiocb *req,
if (ret < 0)
req_set_fail(req);
/* complete before unlock, IOPOLL may need the lock */
- __io_req_complete(req, issue_flags, ret, 0);
+ io_req_set_res(req, ret, 0);
+ __io_req_complete(req, issue_flags);
io_ring_submit_unlock(ctx, issue_flags);
- return 0;
+ return IOU_ISSUE_SKIP_COMPLETE;
}
static int io_epoll_ctl_prep(struct io_kiocb *req,
if (ret < 0)
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
#else
return -EOPNOTSUPP;
#endif
return -EAGAIN;
ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
#else
return -EOPNOTSUPP;
#endif
ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
if (ret < 0)
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EAGAIN;
ret = do_statx(sx->dfd, sx->filename, sx->flags, sx->mask, sx->buffer);
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static void io_statx_cleanup(struct io_kiocb *req)
err:
if (ret < 0)
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EAGAIN;
ret = sync_file_range(req->file, sync->off, sync->len, sync->flags);
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
#if defined(CONFIG_NET)
return -ENOTSOCK;
ret = __sys_shutdown_sock(sock, shutdown->how);
- io_req_complete(req, ret);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static bool io_net_retry(struct socket *sock, int flags)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_send(struct io_kiocb *req, unsigned int issue_flags)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
cflags = io_put_kbuf(req, issue_flags);
if (kmsg->msg.msg_inq)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- __io_req_complete(req, issue_flags, ret, cflags);
- return 0;
+ io_req_set_res(req, ret, cflags);
+ return IOU_OK;
}
static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
cflags = io_put_kbuf(req, issue_flags);
if (msg.msg_inq)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- __io_req_complete(req, issue_flags, ret, cflags);
- return 0;
+ io_req_set_res(req, ret, cflags);
+ return IOU_OK;
}
static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
*/
if ((req->flags & IO_APOLL_MULTI_POLLED) ==
IO_APOLL_MULTI_POLLED)
- ret = 0;
+ ret = IOU_ISSUE_SKIP_COMPLETE;
return ret;
}
if (ret == -ERESTARTSYS)
}
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
if (ret >= 0) {
bool filled;
ret = io_fixed_fd_install(req, issue_flags, file,
sock->file_slot);
}
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_connect_prep_async(struct io_kiocb *req)
out:
if (ret < 0)
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
#else /* !CONFIG_NET */
#define IO_NETOP_FN(op) \
io_poll_remove_entries(req);
spin_lock(&ctx->completion_lock);
hash_del(&req->hash_node);
- __io_req_complete_post(req, req->cqe.res, 0);
+ req->cqe.flags = 0;
+ __io_req_complete_post(req);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
static void __io_poll_execute(struct io_kiocb *req, int mask,
__poll_t __maybe_unused events)
{
- req->cqe.res = mask;
+ io_req_set_res(req, mask, 0);
/*
* This is useful for poll that is armed on behalf of another
* request, and where the wakeup path could be on a different
ipt.pt._qproc = io_poll_queue_proc;
ret = __io_arm_poll_handler(req, poll, &ipt, poll->events);
- if (!ret && ipt.error)
+ if (ret) {
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
+ }
+ if (ipt.error) {
req_set_fail(req);
- ret = ret ?: ipt.error;
- if (ret)
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ return ipt.error;
+ }
+
+ return IOU_ISSUE_SKIP_COMPLETE;
}
static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
ret2 = io_poll_add(preq, issue_flags);
/* successfully updated, don't complete poll request */
- if (!ret2)
+ if (!ret2 || ret2 == -EIOCBQUEUED)
goto out;
}
req_set_fail(preq);
- preq->cqe.res = -ECANCELED;
+ io_req_set_res(preq, -ECANCELED, 0);
locked = !(issue_flags & IO_URING_F_UNLOCKED);
io_req_task_complete(preq, &locked);
out:
- if (ret < 0)
+ if (ret < 0) {
req_set_fail(req);
+ return ret;
+ }
/* complete update request, we're done with it */
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
req_set_fail(req);
- req->cqe.res = -ETIME;
+ io_req_set_res(req, -ETIME, 0);
req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req);
return HRTIMER_NORESTART;
if (ret < 0)
req_set_fail(req);
- io_req_complete_post(req, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int __io_timeout_prep(struct io_kiocb *req,
data->timer.function = io_timeout_fn;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
spin_unlock_irq(&ctx->timeout_lock);
- return 0;
+ return IOU_ISSUE_SKIP_COMPLETE;
}
static bool io_cancel_cb(struct io_wq_work *work, void *data)
done:
if (ret < 0)
req_set_fail(req);
- io_req_complete_post(req, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_files_update_prep(struct io_kiocb *req,
if (ret < 0)
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
}
static int io_req_prep_async(struct io_kiocb *req)
if (creds)
revert_creds(creds);
- if (ret)
+
+ if (ret == IOU_OK)
+ __io_req_complete(req, issue_flags);
+ else if (ret != IOU_ISSUE_SKIP_COMPLETE)
return ret;
+
/* If the op doesn't have a file, we're not polling for it */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
io_iopoll_req_issued(req, issue_flags);
} while (1);
/* avoid locking problems by failing it from a clean context */
- if (ret)
+ if (ret < 0)
io_req_task_queue_fail(req, ret);
}
ret = io_try_cancel(req, &cd);
}
- io_req_complete_post(req, ret ?: -ETIME, 0);
+ io_req_set_res(req, ret ?: -ETIME, 0);
+ io_req_complete_post(req);
io_put_req(prev);
} else {
- io_req_complete_post(req, -ETIME, 0);
+ io_req_set_res(req, -ETIME, 0);
+ io_req_complete_post(req);
}
}