Merge tag 'for-6.1/io_uring-2022-10-03' of git://git.kernel.dk/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Oct 2022 15:52:43 +0000 (08:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Oct 2022 15:52:43 +0000 (08:52 -0700)
Pull io_uring updates from Jens Axboe:

 - Add supported for more directly managed task_work running.

   This is beneficial for real world applications that end up issuing
   lots of system calls as part of handling work. Normal task_work will
   always execute as we transition in and out of the kernel, even for
   "unrelated" system calls. It's more efficient to defer the handling
   of io_uring's deferred work until the application wants it to be run,
   generally in batches.

   As part of ongoing work to write an io_uring network backend for
   Thrift, this has been shown to greatly improve performance. (Dylan)

 - Add IOPOLL support for passthrough (Kanchan)

 - Improvements and fixes to the send zero-copy support (Pavel)

 - Partial IO handling fixes (Pavel)

 - CQE ordering fixes around CQ ring overflow (Pavel)

 - Support sendto() for non-zc as well (Pavel)

 - Support sendmsg for zerocopy (Pavel)

 - Networking iov_iter fix (Stefan)

 - Misc fixes and cleanups (Pavel, me)

* tag 'for-6.1/io_uring-2022-10-03' of git://git.kernel.dk/linux: (56 commits)
  io_uring/net: fix notif cqe reordering
  io_uring/net: don't update msg_name if not provided
  io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL
  io_uring/rw: defer fsnotify calls to task context
  io_uring/net: fix fast_iov assignment in io_setup_async_msg()
  io_uring/net: fix non-zc send with address
  io_uring/net: don't skip notifs for failed requests
  io_uring/rw: don't lose short results on io_setup_async_rw()
  io_uring/rw: fix unexpected link breakage
  io_uring/net: fix cleanup double free free_iov init
  io_uring: fix CQE reordering
  io_uring/net: fix UAF in io_sendrecv_fail()
  selftest/net: adjust io_uring sendzc notif handling
  io_uring: ensure local task_work marks task as running
  io_uring/net: zerocopy sendmsg
  io_uring/net: combine fail handlers
  io_uring/net: rename io_sendzc()
  io_uring/net: support non-zerocopy sendto
  io_uring/net: refactor io_setup_async_addr
  io_uring/net: don't lose partial send_zc on fail
  ...

30 files changed:
block/blk-mq.c
drivers/nvme/host/core.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
fs/eventfd.c
include/linux/blk-mq.h
include/linux/eventfd.h
include/linux/fs.h
include/linux/io_uring.h
include/linux/io_uring_types.h
include/linux/sched.h
include/trace/events/io_uring.h
include/uapi/linux/io_uring.h
io_uring/cancel.c
io_uring/fdinfo.c
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/kbuf.h
io_uring/net.c
io_uring/net.h
io_uring/opdef.c
io_uring/opdef.h
io_uring/rsrc.c
io_uring/rw.c
io_uring/rw.h
io_uring/timeout.c
io_uring/timeout.h
io_uring/uring_cmd.c
tools/testing/selftests/net/io_uring_zerocopy_tx.c

index c96c8c4..0df2018 100644 (file)
@@ -1233,7 +1233,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
        complete(&wait->done);
 }
 
-static bool blk_rq_is_poll(struct request *rq)
+bool blk_rq_is_poll(struct request *rq)
 {
        if (!rq->mq_hctx)
                return false;
@@ -1243,6 +1243,7 @@ static bool blk_rq_is_poll(struct request *rq)
                return false;
        return true;
 }
+EXPORT_SYMBOL_GPL(blk_rq_is_poll);
 
 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 {
index 8d5a7ae..45ef8e8 100644 (file)
@@ -3976,6 +3976,7 @@ static const struct file_operations nvme_ns_chr_fops = {
        .unlocked_ioctl = nvme_ns_chr_ioctl,
        .compat_ioctl   = compat_ptr_ioctl,
        .uring_cmd      = nvme_ns_chr_uring_cmd,
+       .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
 };
 
 static int nvme_add_ns_cdev(struct nvme_ns *ns)
index 27614be..548aca8 100644 (file)
@@ -391,11 +391,19 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
        struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
        /* extract bio before reusing the same field for request */
        struct bio *bio = pdu->bio;
+       void *cookie = READ_ONCE(ioucmd->cookie);
 
        pdu->req = req;
        req->bio = bio;
-       /* this takes care of moving rest of completion-work to task context */
-       io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+       /*
+        * For iopoll, complete it directly.
+        * Otherwise, move the completion to task work.
+        */
+       if (cookie != NULL && blk_rq_is_poll(req))
+               nvme_uring_task_cb(ioucmd);
+       else
+               io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
 }
 
 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -445,7 +453,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
                rq_flags = REQ_NOWAIT;
                blk_flags = BLK_MQ_REQ_NOWAIT;
        }
+       if (issue_flags & IO_URING_F_IOPOLL)
+               rq_flags |= REQ_POLLED;
 
+retry:
        req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
                        d.data_len, nvme_to_user_ptr(d.metadata),
                        d.metadata_len, 0, &meta, d.timeout_ms ?
@@ -456,6 +467,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        req->end_io = nvme_uring_cmd_end_io;
        req->end_io_data = ioucmd;
 
+       if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
+               if (unlikely(!req->bio)) {
+                       /* we can't poll this, so alloc regular req instead */
+                       blk_mq_free_request(req);
+                       rq_flags &= ~REQ_POLLED;
+                       goto retry;
+               } else {
+                       WRITE_ONCE(ioucmd->cookie, req->bio);
+                       req->bio->bi_opf |= REQ_POLLED;
+               }
+       }
        /* to free bio on completion, as req->bio will be null at that time */
        pdu->bio = req->bio;
        pdu->meta = meta;
@@ -559,9 +581,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 static int nvme_uring_cmd_checks(unsigned int issue_flags)
 {
-       /* IOPOLL not supported yet */
-       if (issue_flags & IO_URING_F_IOPOLL)
-               return -EOPNOTSUPP;
 
        /* NVMe passthrough requires big SQE/CQE support */
        if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
@@ -604,6 +623,25 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
        return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
 }
 
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+                                struct io_comp_batch *iob,
+                                unsigned int poll_flags)
+{
+       struct bio *bio;
+       int ret = 0;
+       struct nvme_ns *ns;
+       struct request_queue *q;
+
+       rcu_read_lock();
+       bio = READ_ONCE(ioucmd->cookie);
+       ns = container_of(file_inode(ioucmd->file)->i_cdev,
+                       struct nvme_ns, cdev);
+       q = ns->queue;
+       if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
+               ret = bio_poll(bio, iob, poll_flags);
+       rcu_read_unlock();
+       return ret;
+}
 #ifdef CONFIG_NVME_MULTIPATH
 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
                void __user *argp, struct nvme_ns_head *head, int srcu_idx)
@@ -685,6 +723,31 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
        srcu_read_unlock(&head->srcu, srcu_idx);
        return ret;
 }
+
+int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+                                     struct io_comp_batch *iob,
+                                     unsigned int poll_flags)
+{
+       struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
+       struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+       int srcu_idx = srcu_read_lock(&head->srcu);
+       struct nvme_ns *ns = nvme_find_path(head);
+       struct bio *bio;
+       int ret = 0;
+       struct request_queue *q;
+
+       if (ns) {
+               rcu_read_lock();
+               bio = READ_ONCE(ioucmd->cookie);
+               q = ns->queue;
+               if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
+                               && bio->bi_bdev)
+                       ret = bio_poll(bio, iob, poll_flags);
+               rcu_read_unlock();
+       }
+       srcu_read_unlock(&head->srcu, srcu_idx);
+       return ret;
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
@@ -692,6 +755,10 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
        struct nvme_ctrl *ctrl = ioucmd->file->private_data;
        int ret;
 
+       /* IOPOLL not supported yet */
+       if (issue_flags & IO_URING_F_IOPOLL)
+               return -EOPNOTSUPP;
+
        ret = nvme_uring_cmd_checks(issue_flags);
        if (ret)
                return ret;
index 6ef497c..00f2f81 100644 (file)
@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
        .unlocked_ioctl = nvme_ns_head_chr_ioctl,
        .compat_ioctl   = compat_ptr_ioctl,
        .uring_cmd      = nvme_ns_head_chr_uring_cmd,
+       .uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
 };
 
 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
index 1bdf714..216acbe 100644 (file)
@@ -821,6 +821,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
                unsigned long arg);
 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
                unsigned long arg);
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+               struct io_comp_batch *iob, unsigned int poll_flags);
+int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+               struct io_comp_batch *iob, unsigned int poll_flags);
 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
                unsigned int issue_flags);
 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
index 3627dd7..c0ffee9 100644 (file)
@@ -69,17 +69,17 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
         * it returns false, the eventfd_signal() call should be deferred to a
         * safe context.
         */
-       if (WARN_ON_ONCE(current->in_eventfd_signal))
+       if (WARN_ON_ONCE(current->in_eventfd))
                return 0;
 
        spin_lock_irqsave(&ctx->wqh.lock, flags);
-       current->in_eventfd_signal = 1;
+       current->in_eventfd = 1;
        if (ULLONG_MAX - ctx->count < n)
                n = ULLONG_MAX - ctx->count;
        ctx->count += n;
        if (waitqueue_active(&ctx->wqh))
                wake_up_locked_poll(&ctx->wqh, EPOLLIN);
-       current->in_eventfd_signal = 0;
+       current->in_eventfd = 0;
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
        return n;
@@ -253,8 +253,10 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
                __set_current_state(TASK_RUNNING);
        }
        eventfd_ctx_do_read(ctx, &ucnt);
+       current->in_eventfd = 1;
        if (waitqueue_active(&ctx->wqh))
                wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
+       current->in_eventfd = 0;
        spin_unlock_irq(&ctx->wqh.lock);
        if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
                return -EFAULT;
@@ -301,8 +303,10 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
        }
        if (likely(res > 0)) {
                ctx->count += ucnt;
+               current->in_eventfd = 1;
                if (waitqueue_active(&ctx->wqh))
                        wake_up_locked_poll(&ctx->wqh, EPOLLIN);
+               current->in_eventfd = 0;
        }
        spin_unlock_irq(&ctx->wqh.lock);
 
index 92294a5..de384f5 100644 (file)
@@ -980,6 +980,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *,
 int blk_rq_append_bio(struct request *rq, struct bio *bio);
 void blk_execute_rq_nowait(struct request *rq, bool at_head);
 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
+bool blk_rq_is_poll(struct request *rq);
 
 struct req_iterator {
        struct bvec_iter iter;
index 305d5f1..30eb30d 100644 (file)
@@ -46,7 +46,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
 
 static inline bool eventfd_signal_allowed(void)
 {
-       return !current->in_eventfd_signal;
+       return !current->in_eventfd;
 }
 
 #else /* CONFIG_EVENTFD */
index 7098f08..619d683 100644 (file)
@@ -2133,6 +2133,8 @@ struct file_operations {
                                   loff_t len, unsigned int remap_flags);
        int (*fadvise)(struct file *, loff_t, loff_t, int);
        int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+       int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
+                               unsigned int poll_flags);
 } __randomize_layout;
 
 struct inode_operations {
index 4a2f6cc..58676c0 100644 (file)
@@ -20,8 +20,12 @@ enum io_uring_cmd_flags {
 struct io_uring_cmd {
        struct file     *file;
        const void      *cmd;
-       /* callback to defer completions to task context */
-       void (*task_work_cb)(struct io_uring_cmd *cmd);
+       union {
+               /* callback to defer completions to task context */
+               void (*task_work_cb)(struct io_uring_cmd *cmd);
+               /* used for polled completion */
+               void *cookie;
+       };
        u32             cmd_op;
        u32             pad;
        u8              pdu[32]; /* available inline for free use */
index 677a25d..aa4d90a 100644 (file)
@@ -184,6 +184,8 @@ struct io_ev_fd {
        struct eventfd_ctx      *cq_ev_fd;
        unsigned int            eventfd_async: 1;
        struct rcu_head         rcu;
+       atomic_t                refs;
+       atomic_t                ops;
 };
 
 struct io_alloc_cache {
@@ -301,6 +303,8 @@ struct io_ring_ctx {
                struct io_hash_table    cancel_table;
                bool                    poll_multi_queue;
 
+               struct llist_head       work_llist;
+
                struct list_head        io_buffers_comp;
        } ____cacheline_aligned_in_smp;
 
index e7b2f8a..8d82d6d 100644 (file)
@@ -936,7 +936,7 @@ struct task_struct {
 #endif
 #ifdef CONFIG_EVENTFD
        /* Recursion prevention for eventfd_signal() */
-       unsigned                        in_eventfd_signal:1;
+       unsigned                        in_eventfd:1;
 #endif
 #ifdef CONFIG_IOMMU_SVA
        unsigned                        pasid_activated:1;
index c5b21ff..936fd41 100644 (file)
@@ -655,6 +655,35 @@ TRACE_EVENT(io_uring_short_write,
                          __entry->wanted, __entry->got)
 );
 
+/*
+ * io_uring_local_work_run - ran ring local task work
+ *
+ * @tctx:              pointer to a io_uring_ctx
+ * @count:             how many functions it ran
+ * @loops:             how many loops it ran
+ *
+ */
+TRACE_EVENT(io_uring_local_work_run,
+
+       TP_PROTO(void *ctx, int count, unsigned int loops),
+
+       TP_ARGS(ctx, count, loops),
+
+       TP_STRUCT__entry (
+               __field(void *,         ctx     )
+               __field(int,            count   )
+               __field(unsigned int,   loops   )
+       ),
+
+       TP_fast_assign(
+               __entry->ctx            = ctx;
+               __entry->count          = count;
+               __entry->loops          = loops;
+       ),
+
+       TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
+);
+
 #endif /* _TRACE_IO_URING_H */
 
 /* This part must be outside protection */
index 6b83177..92f29d9 100644 (file)
@@ -157,6 +157,13 @@ enum {
  */
 #define IORING_SETUP_SINGLE_ISSUER     (1U << 12)
 
+/*
+ * Defer running task work to get events.
+ * Rather than running bits of task work whenever the task transitions
+ * try to do it just before it is needed.
+ */
+#define IORING_SETUP_DEFER_TASKRUN     (1U << 13)
+
 enum io_uring_op {
        IORING_OP_NOP,
        IORING_OP_READV,
@@ -206,6 +213,7 @@ enum io_uring_op {
        IORING_OP_SOCKET,
        IORING_OP_URING_CMD,
        IORING_OP_SEND_ZC,
+       IORING_OP_SENDMSG_ZC,
 
        /* this goes last, obviously */
        IORING_OP_LAST,
index 5fc5d3e..2291a53 100644 (file)
@@ -292,7 +292,7 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
                        break;
 
                mutex_unlock(&ctx->uring_lock);
-               ret = io_run_task_work_sig();
+               ret = io_run_task_work_sig(ctx);
                if (ret < 0) {
                        mutex_lock(&ctx->uring_lock);
                        break;
index b29e2d0..4eae088 100644 (file)
@@ -60,13 +60,15 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
        unsigned int cq_head = READ_ONCE(r->cq.head);
        unsigned int cq_tail = READ_ONCE(r->cq.tail);
        unsigned int cq_shift = 0;
+       unsigned int sq_shift = 0;
        unsigned int sq_entries, cq_entries;
        bool has_lock;
-       bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
        unsigned int i;
 
-       if (is_cqe32)
+       if (ctx->flags & IORING_SETUP_CQE32)
                cq_shift = 1;
+       if (ctx->flags & IORING_SETUP_SQE128)
+               sq_shift = 1;
 
        /*
         * we may get imprecise sqe and cqe info if uring is actively running
@@ -82,19 +84,36 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
        seq_printf(m, "CqHead:\t%u\n", cq_head);
        seq_printf(m, "CqTail:\t%u\n", cq_tail);
        seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
-       seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
+       seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
        sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
        for (i = 0; i < sq_entries; i++) {
                unsigned int entry = i + sq_head;
-               unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
                struct io_uring_sqe *sqe;
+               unsigned int sq_idx;
 
+               sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
                if (sq_idx > sq_mask)
                        continue;
-               sqe = &ctx->sq_sqes[sq_idx];
-               seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
-                          sq_idx, sqe->opcode, sqe->fd, sqe->flags,
-                          sqe->user_data);
+               sqe = &ctx->sq_sqes[sq_idx << 1];
+               seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
+                             "addr:0x%llx, rw_flags:0x%x, buf_index:%d "
+                             "user_data:%llu",
+                          sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
+                          sqe->flags, (unsigned long long) sqe->off,
+                          (unsigned long long) sqe->addr, sqe->rw_flags,
+                          sqe->buf_index, sqe->user_data);
+               if (sq_shift) {
+                       u64 *sqeb = (void *) (sqe + 1);
+                       int size = sizeof(struct io_uring_sqe) / sizeof(u64);
+                       int j;
+
+                       for (j = 0; j < size; j++) {
+                               seq_printf(m, ", e%d:0x%llx", j,
+                                               (unsigned long long) *sqeb);
+                               sqeb++;
+                       }
+               }
+               seq_printf(m, "\n");
        }
        seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
        cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
@@ -102,16 +121,13 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                unsigned int entry = i + cq_head;
                struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
 
-               if (!is_cqe32) {
-                       seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
+               seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x",
                           entry & cq_mask, cqe->user_data, cqe->res,
                           cqe->flags);
-               } else {
-                       seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, "
-                               "extra1:%llu, extra2:%llu\n",
-                               entry & cq_mask, cqe->user_data, cqe->res,
-                               cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]);
-               }
+               if (cq_shift)
+                       seq_printf(m, ", extra1:%llu, extra2:%llu\n",
+                                       cqe->big_cqe[0], cqe->big_cqe[1]);
+               seq_printf(m, "\n");
        }
 
        /*
index 242d896..00db7ad 100644 (file)
@@ -125,6 +125,11 @@ enum {
        IO_CHECK_CQ_DROPPED_BIT,
 };
 
+enum {
+       IO_EVENTFD_OP_SIGNAL_BIT,
+       IO_EVENTFD_OP_FREE_BIT,
+};
+
 struct io_defer_entry {
        struct list_head        list;
        struct io_kiocb         *req;
@@ -142,7 +147,7 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
 static void io_dismantle_req(struct io_kiocb *req);
 static void io_clean_op(struct io_kiocb *req);
 static void io_queue_sqe(struct io_kiocb *req);
-
+static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
 static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
 
 static struct kmem_cache *req_cachep;
@@ -316,6 +321,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        INIT_LIST_HEAD(&ctx->rsrc_ref_list);
        INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
        init_llist_head(&ctx->rsrc_put_llist);
+       init_llist_head(&ctx->work_llist);
        INIT_LIST_HEAD(&ctx->tctx_list);
        ctx->submit_state.free_list.next = NULL;
        INIT_WQ_LIST(&ctx->locked_free_list);
@@ -477,25 +483,28 @@ static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
        }
 }
 
-static void io_eventfd_signal(struct io_ring_ctx *ctx)
+
+static void io_eventfd_ops(struct rcu_head *rcu)
 {
-       struct io_ev_fd *ev_fd;
-       bool skip;
+       struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
+       int ops = atomic_xchg(&ev_fd->ops, 0);
 
-       spin_lock(&ctx->completion_lock);
-       /*
-        * Eventfd should only get triggered when at least one event has been
-        * posted. Some applications rely on the eventfd notification count only
-        * changing IFF a new CQE has been added to the CQ ring. There's no
-        * depedency on 1:1 relationship between how many times this function is
-        * called (and hence the eventfd count) and number of CQEs posted to the
-        * CQ ring.
+       if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
+               eventfd_signal(ev_fd->cq_ev_fd, 1);
+
+       /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
+        * ordering in a race but if references are 0 we know we have to free
+        * it regardless.
         */
-       skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
-       ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
-       spin_unlock(&ctx->completion_lock);
-       if (skip)
-               return;
+       if (atomic_dec_and_test(&ev_fd->refs)) {
+               eventfd_ctx_put(ev_fd->cq_ev_fd);
+               kfree(ev_fd);
+       }
+}
+
+static void io_eventfd_signal(struct io_ring_ctx *ctx)
+{
+       struct io_ev_fd *ev_fd = NULL;
 
        rcu_read_lock();
        /*
@@ -513,13 +522,46 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
                goto out;
        if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
                goto out;
+       if (ev_fd->eventfd_async && !io_wq_current_is_worker())
+               goto out;
 
-       if (!ev_fd->eventfd_async || io_wq_current_is_worker())
+       if (likely(eventfd_signal_allowed())) {
                eventfd_signal(ev_fd->cq_ev_fd, 1);
+       } else {
+               atomic_inc(&ev_fd->refs);
+               if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
+                       call_rcu(&ev_fd->rcu, io_eventfd_ops);
+               else
+                       atomic_dec(&ev_fd->refs);
+       }
+
 out:
        rcu_read_unlock();
 }
 
+static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
+{
+       bool skip;
+
+       spin_lock(&ctx->completion_lock);
+
+       /*
+        * Eventfd should only get triggered when at least one event has been
+        * posted. Some applications rely on the eventfd notification count
+        * only changing IFF a new CQE has been added to the CQ ring. There's
+        * no depedency on 1:1 relationship between how many times this
+        * function is called (and hence the eventfd count) and number of CQEs
+        * posted to the CQ ring.
+        */
+       skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
+       ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
+       spin_unlock(&ctx->completion_lock);
+       if (skip)
+               return;
+
+       io_eventfd_signal(ctx);
+}
+
 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
 {
        if (ctx->off_timeout_used || ctx->drain_active) {
@@ -531,7 +573,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
                spin_unlock(&ctx->completion_lock);
        }
        if (ctx->has_evfd)
-               io_eventfd_signal(ctx);
+               io_eventfd_flush_signal(ctx);
 }
 
 static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
@@ -567,7 +609,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 
        io_cq_lock(ctx);
        while (!list_empty(&ctx->cq_overflow_list)) {
-               struct io_uring_cqe *cqe = io_get_cqe(ctx);
+               struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
                struct io_overflow_cqe *ocqe;
 
                if (!cqe && !force)
@@ -694,12 +736,19 @@ bool io_req_cqe_overflow(struct io_kiocb *req)
  * control dependency is enough as we're using WRITE_ONCE to
  * fill the cq entry
  */
-struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
+struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
 {
        struct io_rings *rings = ctx->rings;
        unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
        unsigned int free, queued, len;
 
+       /*
+        * Posting into the CQ when there are pending overflowed CQEs may break
+        * ordering guarantees, which will affect links, F_MORE users and more.
+        * Force overflow the completion.
+        */
+       if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
+               return NULL;
 
        /* userspace may cheat modifying the tail, be safe and do min */
        queued = min(__io_cqring_events(ctx), ctx->cq_entries);
@@ -823,8 +872,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
 
 void io_req_complete_failed(struct io_kiocb *req, s32 res)
 {
+       const struct io_op_def *def = &io_op_defs[req->opcode];
+
        req_set_fail(req);
        io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
+       if (def->fail)
+               def->fail(req);
        io_req_complete_post(req);
 }
 
@@ -1047,17 +1100,40 @@ void tctx_task_work(struct callback_head *cb)
        trace_io_uring_task_work_run(tctx, count, loops);
 }
 
-void io_req_task_work_add(struct io_kiocb *req)
+static void io_req_local_work_add(struct io_kiocb *req)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+
+       if (!llist_add(&req->io_task_work.node, &ctx->work_llist))
+               return;
+
+       if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
+               io_move_task_work_from_local(ctx);
+               return;
+       }
+
+       if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
+               atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+
+       if (ctx->has_evfd)
+               io_eventfd_signal(ctx);
+       io_cqring_wake(ctx);
+
+}
+
+static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
 {
        struct io_uring_task *tctx = req->task->io_uring;
        struct io_ring_ctx *ctx = req->ctx;
        struct llist_node *node;
-       bool running;
 
-       running = !llist_add(&req->io_task_work.node, &tctx->task_list);
+       if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+               io_req_local_work_add(req);
+               return;
+       }
 
        /* task_work already pending, we're done */
-       if (running)
+       if (!llist_add(&req->io_task_work.node, &tctx->task_list))
                return;
 
        if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
@@ -1077,6 +1153,84 @@ void io_req_task_work_add(struct io_kiocb *req)
        }
 }
 
+void io_req_task_work_add(struct io_kiocb *req)
+{
+       __io_req_task_work_add(req, true);
+}
+
+static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
+{
+       struct llist_node *node;
+
+       node = llist_del_all(&ctx->work_llist);
+       while (node) {
+               struct io_kiocb *req = container_of(node, struct io_kiocb,
+                                                   io_task_work.node);
+
+               node = node->next;
+               __io_req_task_work_add(req, false);
+       }
+}
+
+int __io_run_local_work(struct io_ring_ctx *ctx, bool locked)
+{
+       struct llist_node *node;
+       struct llist_node fake;
+       struct llist_node *current_final = NULL;
+       int ret;
+       unsigned int loops = 1;
+
+       if (unlikely(ctx->submitter_task != current))
+               return -EEXIST;
+
+       node = io_llist_xchg(&ctx->work_llist, &fake);
+       ret = 0;
+again:
+       while (node != current_final) {
+               struct llist_node *next = node->next;
+               struct io_kiocb *req = container_of(node, struct io_kiocb,
+                                                   io_task_work.node);
+               prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+               req->io_task_work.func(req, &locked);
+               ret++;
+               node = next;
+       }
+
+       if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
+               atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+
+       node = io_llist_cmpxchg(&ctx->work_llist, &fake, NULL);
+       if (node != &fake) {
+               loops++;
+               current_final = &fake;
+               node = io_llist_xchg(&ctx->work_llist, &fake);
+               goto again;
+       }
+
+       if (locked)
+               io_submit_flush_completions(ctx);
+       trace_io_uring_local_work_run(ctx, ret, loops);
+       return ret;
+
+}
+
+int io_run_local_work(struct io_ring_ctx *ctx)
+{
+       bool locked;
+       int ret;
+
+       if (llist_empty(&ctx->work_llist))
+               return 0;
+
+       __set_current_state(TASK_RUNNING);
+       locked = mutex_trylock(&ctx->uring_lock);
+       ret = __io_run_local_work(ctx, locked);
+       if (locked)
+               mutex_unlock(&ctx->uring_lock);
+
+       return ret;
+}
+
 static void io_req_tw_post(struct io_kiocb *req, bool *locked)
 {
        io_req_complete_post(req);
@@ -1183,7 +1337,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
        struct io_wq_work_node *node, *prev;
        struct io_submit_state *state = &ctx->submit_state;
 
-       spin_lock(&ctx->completion_lock);
+       io_cq_lock(ctx);
        wq_list_for_each(node, prev, &state->compl_reqs) {
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                            comp_list);
@@ -1254,6 +1408,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
        int ret = 0;
        unsigned long check_cq;
 
+       if (!io_allowed_run_tw(ctx))
+               return -EEXIST;
+
        check_cq = READ_ONCE(ctx->check_cq);
        if (unlikely(check_cq)) {
                if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
@@ -1284,13 +1441,19 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                 * forever, while the workqueue is stuck trying to acquire the
                 * very same mutex.
                 */
-               if (wq_list_empty(&ctx->iopoll_list)) {
+               if (wq_list_empty(&ctx->iopoll_list) ||
+                   io_task_work_pending(ctx)) {
                        u32 tail = ctx->cached_cq_tail;
 
-                       mutex_unlock(&ctx->uring_lock);
-                       io_run_task_work();
-                       mutex_lock(&ctx->uring_lock);
+                       if (!llist_empty(&ctx->work_llist))
+                               __io_run_local_work(ctx, true);
 
+                       if (task_work_pending(current) ||
+                           wq_list_empty(&ctx->iopoll_list)) {
+                               mutex_unlock(&ctx->uring_lock);
+                               io_run_task_work();
+                               mutex_lock(&ctx->uring_lock);
+                       }
                        /* some requests don't go through iopoll_list */
                        if (tail != ctx->cached_cq_tail ||
                            wq_list_empty(&ctx->iopoll_list))
@@ -1732,10 +1895,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
                io_req_task_queue(req);
                break;
        case IO_APOLL_ABORTED:
-               /*
-                * Queued up for async execution, worker will release
-                * submit reference when the iocb is actually submitted.
-                */
                io_kbuf_recycle(req, 0);
                io_queue_iowq(req, NULL);
                break;
@@ -2149,6 +2308,13 @@ struct io_wait_queue {
        unsigned nr_timeouts;
 };
 
+static inline bool io_has_work(struct io_ring_ctx *ctx)
+{
+       return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
+              ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
+               !llist_empty(&ctx->work_llist));
+}
+
 static inline bool io_should_wake(struct io_wait_queue *iowq)
 {
        struct io_ring_ctx *ctx = iowq->ctx;
@@ -2167,20 +2333,20 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
 {
        struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
                                                        wq);
+       struct io_ring_ctx *ctx = iowq->ctx;
 
        /*
         * Cannot safely flush overflowed CQEs from here, ensure we wake up
         * the task, and the next invocation will do it.
         */
-       if (io_should_wake(iowq) ||
-           test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq))
+       if (io_should_wake(iowq) || io_has_work(ctx))
                return autoremove_wake_function(curr, mode, wake_flags, key);
        return -1;
 }
 
-int io_run_task_work_sig(void)
+int io_run_task_work_sig(struct io_ring_ctx *ctx)
 {
-       if (io_run_task_work())
+       if (io_run_task_work_ctx(ctx) > 0)
                return 1;
        if (task_sigpending(current))
                return -EINTR;
@@ -2196,7 +2362,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
        unsigned long check_cq;
 
        /* make sure we run task_work before checking for signals */
-       ret = io_run_task_work_sig();
+       ret = io_run_task_work_sig(ctx);
        if (ret || io_should_wake(iowq))
                return ret;
 
@@ -2226,13 +2392,19 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        ktime_t timeout = KTIME_MAX;
        int ret;
 
+       if (!io_allowed_run_tw(ctx))
+               return -EEXIST;
+
        do {
+               /* always run at least 1 task work to process local work */
+               ret = io_run_task_work_ctx(ctx);
+               if (ret < 0)
+                       return ret;
                io_cqring_overflow_flush(ctx);
+
                if (io_cqring_events(ctx) >= min_events)
                        return 0;
-               if (!io_run_task_work())
-                       break;
-       } while (1);
+       } while (ret > 0);
 
        if (sig) {
 #ifdef CONFIG_COMPAT
@@ -2366,17 +2538,11 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
        ev_fd->eventfd_async = eventfd_async;
        ctx->has_evfd = true;
        rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
+       atomic_set(&ev_fd->refs, 1);
+       atomic_set(&ev_fd->ops, 0);
        return 0;
 }
 
-static void io_eventfd_put(struct rcu_head *rcu)
-{
-       struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
-
-       eventfd_ctx_put(ev_fd->cq_ev_fd);
-       kfree(ev_fd);
-}
-
 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
 {
        struct io_ev_fd *ev_fd;
@@ -2386,7 +2552,8 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
        if (ev_fd) {
                ctx->has_evfd = false;
                rcu_assign_pointer(ctx->io_ev_fd, NULL);
-               call_rcu(&ev_fd->rcu, io_eventfd_put);
+               if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
+                       call_rcu(&ev_fd->rcu, io_eventfd_ops);
                return 0;
        }
 
@@ -2509,8 +2676,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
         * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
         * pushs them to do the flush.
         */
-       if (io_cqring_events(ctx) ||
-           test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
+
+       if (io_cqring_events(ctx) || io_has_work(ctx))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
@@ -2573,6 +2740,9 @@ static __cold void io_ring_exit_work(struct work_struct *work)
         * as nobody else will be looking for them.
         */
        do {
+               if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+                       io_move_task_work_from_local(ctx);
+
                while (io_uring_try_cancel_requests(ctx, NULL, true))
                        cond_resched();
 
@@ -2770,13 +2940,15 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
                }
        }
 
+       if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+               ret |= io_run_local_work(ctx) > 0;
        ret |= io_cancel_defer_files(ctx, task, cancel_all);
        mutex_lock(&ctx->uring_lock);
        ret |= io_poll_remove_all(ctx, task, cancel_all);
        mutex_unlock(&ctx->uring_lock);
        ret |= io_kill_timeouts(ctx, task, cancel_all);
        if (task)
-               ret |= io_run_task_work();
+               ret |= io_run_task_work() > 0;
        return ret;
 }
 
@@ -2992,8 +3164,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
        struct fd f;
        long ret;
 
-       io_run_task_work();
-
        if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
                               IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
                               IORING_ENTER_REGISTERED_RING)))
@@ -3063,8 +3233,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                        goto iopoll_locked;
                mutex_unlock(&ctx->uring_lock);
        }
+
        if (flags & IORING_ENTER_GETEVENTS) {
                int ret2;
+
                if (ctx->syscall_iopoll) {
                        /*
                         * We disallow the app entering submit/complete with
@@ -3293,18 +3465,30 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                /* IPI related flags don't make sense with SQPOLL */
                if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
-                                 IORING_SETUP_TASKRUN_FLAG))
+                                 IORING_SETUP_TASKRUN_FLAG |
+                                 IORING_SETUP_DEFER_TASKRUN))
                        goto err;
                ctx->notify_method = TWA_SIGNAL_NO_IPI;
        } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
                ctx->notify_method = TWA_SIGNAL_NO_IPI;
        } else {
-               if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
+               if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
+                   !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
                        goto err;
                ctx->notify_method = TWA_SIGNAL;
        }
 
        /*
+        * For DEFER_TASKRUN we require the completion task to be the same as the
+        * submission task. This implies that there is only one submitter, so enforce
+        * that.
+        */
+       if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
+           !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
+               goto err;
+       }
+
+       /*
         * This is just grabbed for accounting purposes. When a process exits,
         * the mm is exited and dropped before the files, hence we need to hang
         * on to this mm purely for the purposes of being able to unaccount
@@ -3408,7 +3592,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
                        IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
                        IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
                        IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
-                       IORING_SETUP_SINGLE_ISSUER))
+                       IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN))
                return -EINVAL;
 
        return io_uring_create(entries, &p, params);
@@ -3874,7 +4058,7 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
 
        ctx = f.file->private_data;
 
-       io_run_task_work();
+       io_run_task_work_ctx(ctx);
 
        mutex_lock(&ctx->uring_lock);
        ret = __io_uring_register(ctx, opcode, arg, nr_args);
index 2f73f83..48ce234 100644 (file)
@@ -24,9 +24,11 @@ enum {
        IOU_STOP_MULTISHOT      = -ECANCELED,
 };
 
-struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
+struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
 bool io_req_cqe_overflow(struct io_kiocb *req);
-int io_run_task_work_sig(void);
+int io_run_task_work_sig(struct io_ring_ctx *ctx);
+int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
+int io_run_local_work(struct io_ring_ctx *ctx);
 void io_req_complete_failed(struct io_kiocb *req, s32 res);
 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
 void io_req_complete_post(struct io_kiocb *req);
@@ -91,7 +93,8 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx)
 
 void io_cq_unlock_post(struct io_ring_ctx *ctx);
 
-static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
+static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
+                                                      bool overflow)
 {
        if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
                struct io_uring_cqe *cqe = ctx->cqe_cached;
@@ -103,7 +106,12 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
                return cqe;
        }
 
-       return __io_get_cqe(ctx);
+       return __io_get_cqe(ctx, overflow);
+}
+
+static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
+{
+       return io_get_cqe_overflow(ctx, false);
 }
 
 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
@@ -221,17 +229,43 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
        return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
 }
 
-static inline bool io_run_task_work(void)
+static inline int io_run_task_work(void)
 {
-       if (test_thread_flag(TIF_NOTIFY_SIGNAL)) {
+       if (task_work_pending(current)) {
+               if (test_thread_flag(TIF_NOTIFY_SIGNAL))
+                       clear_notify_signal();
                __set_current_state(TASK_RUNNING);
-               clear_notify_signal();
-               if (task_work_pending(current))
-                       task_work_run();
-               return true;
+               task_work_run();
+               return 1;
        }
 
-       return false;
+       return 0;
+}
+
+static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+{
+       return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
+               !wq_list_empty(&ctx->work_llist);
+}
+
+static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
+{
+       int ret = 0;
+       int ret2;
+
+       if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+               ret = io_run_local_work(ctx);
+
+       /* want to run this after in case more is added */
+       ret2 = io_run_task_work();
+
+       /* Try propagate error in favour of if tasks were run,
+        * but still make sure to run them if requested
+        */
+       if (ret >= 0)
+               ret += ret2;
+
+       return ret;
 }
 
 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
@@ -301,4 +335,10 @@ static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
        return container_of(node, struct io_kiocb, comp_list);
 }
 
+static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
+{
+       return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
+                     ctx->submitter_task == current);
+}
+
 #endif
index 746fbf3..c23e15d 100644 (file)
@@ -86,18 +86,6 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
 
 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 {
-       /*
-        * READV uses fields in `struct io_rw` (len/addr) to stash the selected
-        * buffer data. However if that buffer is recycled the original request
-        * data stored in addr is lost. Therefore forbid recycling for now.
-        */
-       if (req->opcode == IORING_OP_READV) {
-               if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
-                       req->buf_list->head++;
-                       req->buf_list = NULL;
-               }
-               return;
-       }
        if (req->flags & REQ_F_BUFFER_SELECTED)
                io_kbuf_recycle_legacy(req, issue_flags);
        if (req->flags & REQ_F_BUFFER_RING)
index 60e392f..caa6a80 100644 (file)
@@ -55,21 +55,14 @@ struct io_sr_msg {
                struct user_msghdr __user       *umsg;
                void __user                     *buf;
        };
+       unsigned                        len;
+       unsigned                        done_io;
        unsigned                        msg_flags;
-       unsigned                        flags;
-       size_t                          len;
-       size_t                          done_io;
-};
-
-struct io_sendzc {
-       struct file                     *file;
-       void __user                     *buf;
-       size_t                          len;
-       unsigned                        msg_flags;
-       unsigned                        flags;
-       unsigned                        addr_len;
+       u16                             flags;
+       /* initialised and used only by !msg send variants */
+       u16                             addr_len;
        void __user                     *addr;
-       size_t                          done_io;
+       /* used only for send zerocopy */
        struct io_kiocb                 *notif;
 };
 
@@ -126,28 +119,36 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
        }
 }
 
-static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req,
-                                                     unsigned int issue_flags)
+static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
+                                                 unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_cache_entry *entry;
+       struct io_async_msghdr *hdr;
 
        if (!(issue_flags & IO_URING_F_UNLOCKED) &&
            (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
-               struct io_async_msghdr *hdr;
-
                hdr = container_of(entry, struct io_async_msghdr, cache);
+               hdr->free_iov = NULL;
                req->flags |= REQ_F_ASYNC_DATA;
                req->async_data = hdr;
                return hdr;
        }
 
-       if (!io_alloc_async_data(req))
-               return req->async_data;
-
+       if (!io_alloc_async_data(req)) {
+               hdr = req->async_data;
+               hdr->free_iov = NULL;
+               return hdr;
+       }
        return NULL;
 }
 
+static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
+{
+       /* ->prep_async is always called from the submission context */
+       return io_msg_alloc_async(req, 0);
+}
+
 static int io_setup_async_msg(struct io_kiocb *req,
                              struct io_async_msghdr *kmsg,
                              unsigned int issue_flags)
@@ -156,17 +157,20 @@ static int io_setup_async_msg(struct io_kiocb *req,
 
        if (req_has_async_data(req))
                return -EAGAIN;
-       async_msg = io_recvmsg_alloc_async(req, issue_flags);
+       async_msg = io_msg_alloc_async(req, issue_flags);
        if (!async_msg) {
                kfree(kmsg->free_iov);
                return -ENOMEM;
        }
        req->flags |= REQ_F_NEED_CLEANUP;
        memcpy(async_msg, kmsg, sizeof(*kmsg));
-       async_msg->msg.msg_name = &async_msg->addr;
+       if (async_msg->msg.msg_name)
+               async_msg->msg.msg_name = &async_msg->addr;
        /* if were using fast_iov, set it to the new one */
-       if (!async_msg->free_iov)
-               async_msg->msg.msg_iter.iov = async_msg->fast_iov;
+       if (!kmsg->free_iov) {
+               size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
+               async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
+       }
 
        return -EAGAIN;
 }
@@ -182,34 +186,34 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
                                        &iomsg->free_iov);
 }
 
-int io_sendzc_prep_async(struct io_kiocb *req)
+int io_send_prep_async(struct io_kiocb *req)
 {
-       struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+       struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct io_async_msghdr *io;
        int ret;
 
        if (!zc->addr || req_has_async_data(req))
                return 0;
-       if (io_alloc_async_data(req))
+       io = io_msg_alloc_async_prep(req);
+       if (!io)
                return -ENOMEM;
-
-       io = req->async_data;
        ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
        return ret;
 }
 
 static int io_setup_async_addr(struct io_kiocb *req,
-                             struct sockaddr_storage *addr,
+                             struct sockaddr_storage *addr_storage,
                              unsigned int issue_flags)
 {
+       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct io_async_msghdr *io;
 
-       if (!addr || req_has_async_data(req))
+       if (!sr->addr || req_has_async_data(req))
                return -EAGAIN;
-       if (io_alloc_async_data(req))
+       io = io_msg_alloc_async(req, issue_flags);
+       if (!io)
                return -ENOMEM;
-       io = req->async_data;
-       memcpy(&io->addr, addr, sizeof(io->addr));
+       memcpy(&io->addr, addr_storage, sizeof(io->addr));
        return -EAGAIN;
 }
 
@@ -217,6 +221,8 @@ int io_sendmsg_prep_async(struct io_kiocb *req)
 {
        int ret;
 
+       if (!io_msg_alloc_async_prep(req))
+               return -ENOMEM;
        ret = io_sendmsg_copy_hdr(req, req->async_data);
        if (!ret)
                req->flags |= REQ_F_NEED_CLEANUP;
@@ -234,8 +240,14 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 
-       if (unlikely(sqe->file_index || sqe->addr2))
+       if (req->opcode == IORING_OP_SEND) {
+               if (READ_ONCE(sqe->__pad3[0]))
+                       return -EINVAL;
+               sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+               sr->addr_len = READ_ONCE(sqe->addr_len);
+       } else if (sqe->addr2 || sqe->file_index) {
                return -EINVAL;
+       }
 
        sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
        sr->len = READ_ONCE(sqe->len);
@@ -291,13 +303,13 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
        if (ret < min_ret) {
                if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
                        return io_setup_async_msg(req, kmsg, issue_flags);
-               if (ret == -ERESTARTSYS)
-                       ret = -EINTR;
                if (ret > 0 && io_net_retry(sock, flags)) {
                        sr->done_io += ret;
                        req->flags |= REQ_F_PARTIAL_IO;
                        return io_setup_async_msg(req, kmsg, issue_flags);
                }
+               if (ret == -ERESTARTSYS)
+                       ret = -EINTR;
                req_set_fail(req);
        }
        /* fast path, check for non-NULL to avoid function call */
@@ -315,6 +327,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 
 int io_send(struct io_kiocb *req, unsigned int issue_flags)
 {
+       struct sockaddr_storage __address;
        struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct msghdr msg;
        struct iovec iov;
@@ -323,9 +336,29 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
        int min_ret = 0;
        int ret;
 
+       msg.msg_name = NULL;
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_namelen = 0;
+       msg.msg_ubuf = NULL;
+
+       if (sr->addr) {
+               if (req_has_async_data(req)) {
+                       struct io_async_msghdr *io = req->async_data;
+
+                       msg.msg_name = &io->addr;
+               } else {
+                       ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
+                       if (unlikely(ret < 0))
+                               return ret;
+                       msg.msg_name = (struct sockaddr *)&__address;
+               }
+               msg.msg_namelen = sr->addr_len;
+       }
+
        if (!(req->flags & REQ_F_POLLED) &&
            (sr->flags & IORING_RECVSEND_POLL_FIRST))
-               return -EAGAIN;
+               return io_setup_async_addr(req, &__address, issue_flags);
 
        sock = sock_from_file(req->file);
        if (unlikely(!sock))
@@ -335,12 +368,6 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
        if (unlikely(ret))
                return ret;
 
-       msg.msg_name = NULL;
-       msg.msg_control = NULL;
-       msg.msg_controllen = 0;
-       msg.msg_namelen = 0;
-       msg.msg_ubuf = NULL;
-
        flags = sr->msg_flags;
        if (issue_flags & IO_URING_F_NONBLOCK)
                flags |= MSG_DONTWAIT;
@@ -351,16 +378,17 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
        ret = sock_sendmsg(sock, &msg);
        if (ret < min_ret) {
                if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-                       return -EAGAIN;
-               if (ret == -ERESTARTSYS)
-                       ret = -EINTR;
+                       return io_setup_async_addr(req, &__address, issue_flags);
+
                if (ret > 0 && io_net_retry(sock, flags)) {
                        sr->len -= ret;
                        sr->buf += ret;
                        sr->done_io += ret;
                        req->flags |= REQ_F_PARTIAL_IO;
-                       return -EAGAIN;
+                       return io_setup_async_addr(req, &__address, issue_flags);
                }
+               if (ret == -ERESTARTSYS)
+                       ret = -EINTR;
                req_set_fail(req);
        }
        if (ret >= 0)
@@ -454,7 +482,6 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
 
                if (msg.msg_iovlen == 0) {
                        sr->len = 0;
-                       iomsg->free_iov = NULL;
                } else if (msg.msg_iovlen > 1) {
                        return -EINVAL;
                } else {
@@ -465,7 +492,6 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
                        if (clen < 0)
                                return -EINVAL;
                        sr->len = clen;
-                       iomsg->free_iov = NULL;
                }
 
                if (req->flags & REQ_F_APOLL_MULTISHOT) {
@@ -504,6 +530,8 @@ int io_recvmsg_prep_async(struct io_kiocb *req)
 {
        int ret;
 
+       if (!io_msg_alloc_async_prep(req))
+               return -ENOMEM;
        ret = io_recvmsg_copy_hdr(req, req->async_data);
        if (!ret)
                req->flags |= REQ_F_NEED_CLEANUP;
@@ -751,13 +779,13 @@ retry_multishot:
                        }
                        return ret;
                }
-               if (ret == -ERESTARTSYS)
-                       ret = -EINTR;
                if (ret > 0 && io_net_retry(sock, flags)) {
                        sr->done_io += ret;
                        req->flags |= REQ_F_PARTIAL_IO;
                        return io_setup_async_msg(req, kmsg, issue_flags);
                }
+               if (ret == -ERESTARTSYS)
+                       ret = -EINTR;
                req_set_fail(req);
        } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
                req_set_fail(req);
@@ -847,8 +875,6 @@ retry_multishot:
 
                        return -EAGAIN;
                }
-               if (ret == -ERESTARTSYS)
-                       ret = -EINTR;
                if (ret > 0 && io_net_retry(sock, flags)) {
                        sr->len -= ret;
                        sr->buf += ret;
@@ -856,6 +882,8 @@ retry_multishot:
                        req->flags |= REQ_F_PARTIAL_IO;
                        return -EAGAIN;
                }
+               if (ret == -ERESTARTSYS)
+                       ret = -EINTR;
                req_set_fail(req);
        } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 out_free:
@@ -879,23 +907,30 @@ out_free:
        return ret;
 }
 
-void io_sendzc_cleanup(struct io_kiocb *req)
+void io_send_zc_cleanup(struct io_kiocb *req)
 {
-       struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+       struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
+       struct io_async_msghdr *io;
 
-       zc->notif->flags |= REQ_F_CQE_SKIP;
-       io_notif_flush(zc->notif);
-       zc->notif = NULL;
+       if (req_has_async_data(req)) {
+               io = req->async_data;
+               /* might be ->fast_iov if *msg_copy_hdr failed */
+               if (io->free_iov != io->fast_iov)
+                       kfree(io->free_iov);
+       }
+       if (zc->notif) {
+               io_notif_flush(zc->notif);
+               zc->notif = NULL;
+       }
 }
 
-int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+       struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct io_ring_ctx *ctx = req->ctx;
        struct io_kiocb *notif;
 
-       if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3) ||
-           READ_ONCE(sqe->__pad3[0]))
+       if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
                return -EINVAL;
        /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
        if (req->flags & REQ_F_CQE_SKIP)
@@ -922,14 +957,24 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                io_req_set_rsrc_node(notif, ctx, 0);
        }
 
+       if (req->opcode == IORING_OP_SEND_ZC) {
+               if (READ_ONCE(sqe->__pad3[0]))
+                       return -EINVAL;
+               zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+               zc->addr_len = READ_ONCE(sqe->addr_len);
+       } else {
+               if (unlikely(sqe->addr2 || sqe->file_index))
+                       return -EINVAL;
+               if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
+                       return -EINVAL;
+       }
+
        zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
        zc->len = READ_ONCE(sqe->len);
        zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
        if (zc->msg_flags & MSG_DONTWAIT)
                req->flags |= REQ_F_NOWAIT;
 
-       zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
-       zc->addr_len = READ_ONCE(sqe->addr_len);
        zc->done_io = 0;
 
 #ifdef CONFIG_COMPAT
@@ -939,6 +984,13 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        return 0;
 }
 
+static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
+                                struct iov_iter *from, size_t length)
+{
+       skb_zcopy_downgrade_managed(skb);
+       return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
+}
+
 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
                           struct iov_iter *from, size_t length)
 {
@@ -949,13 +1001,10 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
        ssize_t copied = 0;
        unsigned long truesize = 0;
 
-       if (!shinfo->nr_frags)
+       if (!frag)
                shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
-
-       if (!skb_zcopy_managed(skb) || !iov_iter_is_bvec(from)) {
-               skb_zcopy_downgrade_managed(skb);
+       else if (unlikely(!skb_zcopy_managed(skb)))
                return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
-       }
 
        bi.bi_size = min(from->count, length);
        bi.bi_bvec_done = from->iov_offset;
@@ -993,14 +1042,14 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
        return ret;
 }
 
-int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
+int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 {
-       struct sockaddr_storage __address, *addr = NULL;
-       struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+       struct sockaddr_storage __address;
+       struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct msghdr msg;
        struct iovec iov;
        struct socket *sock;
-       unsigned msg_flags, cflags;
+       unsigned msg_flags;
        int ret, min_ret = 0;
 
        sock = sock_from_file(req->file);
@@ -1016,26 +1065,26 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
                if (req_has_async_data(req)) {
                        struct io_async_msghdr *io = req->async_data;
 
-                       msg.msg_name = addr = &io->addr;
+                       msg.msg_name = &io->addr;
                } else {
                        ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
                        if (unlikely(ret < 0))
                                return ret;
                        msg.msg_name = (struct sockaddr *)&__address;
-                       addr = &__address;
                }
                msg.msg_namelen = zc->addr_len;
        }
 
        if (!(req->flags & REQ_F_POLLED) &&
            (zc->flags & IORING_RECVSEND_POLL_FIRST))
-               return io_setup_async_addr(req, addr, issue_flags);
+               return io_setup_async_addr(req, &__address, issue_flags);
 
        if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
                ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
                                        (u64)(uintptr_t)zc->buf, zc->len);
                if (unlikely(ret))
                        return ret;
+               msg.sg_from_iter = io_sg_from_iter;
        } else {
                ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
                                          &msg.msg_iter);
@@ -1044,6 +1093,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
                ret = io_notif_account_mem(zc->notif, zc->len);
                if (unlikely(ret))
                        return ret;
+               msg.sg_from_iter = io_sg_from_iter_iovec;
        }
 
        msg_flags = zc->msg_flags | MSG_ZEROCOPY;
@@ -1054,22 +1104,19 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
 
        msg.msg_flags = msg_flags;
        msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
-       msg.sg_from_iter = io_sg_from_iter;
        ret = sock_sendmsg(sock, &msg);
 
        if (unlikely(ret < min_ret)) {
                if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-                       return io_setup_async_addr(req, addr, issue_flags);
+                       return io_setup_async_addr(req, &__address, issue_flags);
 
                if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
                        zc->len -= ret;
                        zc->buf += ret;
                        zc->done_io += ret;
                        req->flags |= REQ_F_PARTIAL_IO;
-                       return io_setup_async_addr(req, addr, issue_flags);
+                       return io_setup_async_addr(req, &__address, issue_flags);
                }
-               if (ret < 0 && !zc->done_io)
-                       zc->notif->flags |= REQ_F_CQE_SKIP;
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
                req_set_fail(req);
@@ -1080,13 +1127,102 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
        else if (zc->done_io)
                ret = zc->done_io;
 
-       io_notif_flush(zc->notif);
-       req->flags &= ~REQ_F_NEED_CLEANUP;
-       cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
-       io_req_set_res(req, ret, cflags);
+       /*
+        * If we're in io-wq we can't rely on tw ordering guarantees, defer
+        * flushing notif to io_send_zc_cleanup()
+        */
+       if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+               io_notif_flush(zc->notif);
+               req->flags &= ~REQ_F_NEED_CLEANUP;
+       }
+       io_req_set_res(req, ret, IORING_CQE_F_MORE);
        return IOU_OK;
 }
 
+int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
+{
+       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+       struct io_async_msghdr iomsg, *kmsg;
+       struct socket *sock;
+       unsigned flags;
+       int ret, min_ret = 0;
+
+       sock = sock_from_file(req->file);
+       if (unlikely(!sock))
+               return -ENOTSOCK;
+
+       if (req_has_async_data(req)) {
+               kmsg = req->async_data;
+       } else {
+               ret = io_sendmsg_copy_hdr(req, &iomsg);
+               if (ret)
+                       return ret;
+               kmsg = &iomsg;
+       }
+
+       if (!(req->flags & REQ_F_POLLED) &&
+           (sr->flags & IORING_RECVSEND_POLL_FIRST))
+               return io_setup_async_msg(req, kmsg, issue_flags);
+
+       flags = sr->msg_flags | MSG_ZEROCOPY;
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               flags |= MSG_DONTWAIT;
+       if (flags & MSG_WAITALL)
+               min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+
+       kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
+       kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
+       ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
+
+       if (unlikely(ret < min_ret)) {
+               if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
+                       return io_setup_async_msg(req, kmsg, issue_flags);
+
+               if (ret > 0 && io_net_retry(sock, flags)) {
+                       sr->done_io += ret;
+                       req->flags |= REQ_F_PARTIAL_IO;
+                       return io_setup_async_msg(req, kmsg, issue_flags);
+               }
+               if (ret == -ERESTARTSYS)
+                       ret = -EINTR;
+               req_set_fail(req);
+       }
+       /* fast path, check for non-NULL to avoid function call */
+       if (kmsg->free_iov) {
+               kfree(kmsg->free_iov);
+               kmsg->free_iov = NULL;
+       }
+
+       io_netmsg_recycle(req, issue_flags);
+       if (ret >= 0)
+               ret += sr->done_io;
+       else if (sr->done_io)
+               ret = sr->done_io;
+
+       /*
+        * If we're in io-wq we can't rely on tw ordering guarantees, defer
+        * flushing notif to io_send_zc_cleanup()
+        */
+       if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+               io_notif_flush(sr->notif);
+               req->flags &= ~REQ_F_NEED_CLEANUP;
+       }
+       io_req_set_res(req, ret, IORING_CQE_F_MORE);
+       return IOU_OK;
+}
+
+void io_sendrecv_fail(struct io_kiocb *req)
+{
+       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+
+       if (req->flags & REQ_F_PARTIAL_IO)
+               req->cqe.res = sr->done_io;
+
+       if ((req->flags & REQ_F_NEED_CLEANUP) &&
+           (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
+               req->cqe.flags |= IORING_CQE_F_MORE;
+}
+
 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
index d744a0a..5ffa11b 100644 (file)
@@ -31,18 +31,21 @@ struct io_async_connect {
 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
 
-int io_sendzc_prep_async(struct io_kiocb *req);
 int io_sendmsg_prep_async(struct io_kiocb *req);
 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
+
 int io_send(struct io_kiocb *req, unsigned int issue_flags);
+int io_send_prep_async(struct io_kiocb *req);
 
 int io_recvmsg_prep_async(struct io_kiocb *req);
 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
 int io_recv(struct io_kiocb *req, unsigned int issue_flags);
 
+void io_sendrecv_fail(struct io_kiocb *req);
+
 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_accept(struct io_kiocb *req, unsigned int issue_flags);
 
@@ -53,9 +56,10 @@ int io_connect_prep_async(struct io_kiocb *req);
 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_connect(struct io_kiocb *req, unsigned int issue_flags);
 
-int io_sendzc(struct io_kiocb *req, unsigned int issue_flags);
-int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
-void io_sendzc_cleanup(struct io_kiocb *req);
+int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
+int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
+int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+void io_send_zc_cleanup(struct io_kiocb *req);
 
 void io_netmsg_cache_free(struct io_cache_entry *entry);
 #else
index c4dddd0..2330f6d 100644 (file)
@@ -69,6 +69,7 @@ const struct io_op_def io_op_defs[] = {
                .issue                  = io_read,
                .prep_async             = io_readv_prep_async,
                .cleanup                = io_readv_writev_cleanup,
+               .fail                   = io_rw_fail,
        },
        [IORING_OP_WRITEV] = {
                .needs_file             = 1,
@@ -85,6 +86,7 @@ const struct io_op_def io_op_defs[] = {
                .issue                  = io_write,
                .prep_async             = io_writev_prep_async,
                .cleanup                = io_readv_writev_cleanup,
+               .fail                   = io_rw_fail,
        },
        [IORING_OP_FSYNC] = {
                .needs_file             = 1,
@@ -105,6 +107,7 @@ const struct io_op_def io_op_defs[] = {
                .name                   = "READ_FIXED",
                .prep                   = io_prep_rw,
                .issue                  = io_read,
+               .fail                   = io_rw_fail,
        },
        [IORING_OP_WRITE_FIXED] = {
                .needs_file             = 1,
@@ -119,6 +122,7 @@ const struct io_op_def io_op_defs[] = {
                .name                   = "WRITE_FIXED",
                .prep                   = io_prep_rw,
                .issue                  = io_write,
+               .fail                   = io_rw_fail,
        },
        [IORING_OP_POLL_ADD] = {
                .needs_file             = 1,
@@ -146,6 +150,7 @@ const struct io_op_def io_op_defs[] = {
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
                .ioprio                 = 1,
+               .manual_alloc           = 1,
                .name                   = "SENDMSG",
 #if defined(CONFIG_NET)
                .async_size             = sizeof(struct io_async_msghdr),
@@ -153,6 +158,7 @@ const struct io_op_def io_op_defs[] = {
                .issue                  = io_sendmsg,
                .prep_async             = io_sendmsg_prep_async,
                .cleanup                = io_sendmsg_recvmsg_cleanup,
+               .fail                   = io_sendrecv_fail,
 #else
                .prep                   = io_eopnotsupp_prep,
 #endif
@@ -163,6 +169,7 @@ const struct io_op_def io_op_defs[] = {
                .pollin                 = 1,
                .buffer_select          = 1,
                .ioprio                 = 1,
+               .manual_alloc           = 1,
                .name                   = "RECVMSG",
 #if defined(CONFIG_NET)
                .async_size             = sizeof(struct io_async_msghdr),
@@ -170,6 +177,7 @@ const struct io_op_def io_op_defs[] = {
                .issue                  = io_recvmsg,
                .prep_async             = io_recvmsg_prep_async,
                .cleanup                = io_sendmsg_recvmsg_cleanup,
+               .fail                   = io_sendrecv_fail,
 #else
                .prep                   = io_eopnotsupp_prep,
 #endif
@@ -273,6 +281,7 @@ const struct io_op_def io_op_defs[] = {
                .name                   = "READ",
                .prep                   = io_prep_rw,
                .issue                  = io_read,
+               .fail                   = io_rw_fail,
        },
        [IORING_OP_WRITE] = {
                .needs_file             = 1,
@@ -287,6 +296,7 @@ const struct io_op_def io_op_defs[] = {
                .name                   = "WRITE",
                .prep                   = io_prep_rw,
                .issue                  = io_write,
+               .fail                   = io_rw_fail,
        },
        [IORING_OP_FADVISE] = {
                .needs_file             = 1,
@@ -306,10 +316,14 @@ const struct io_op_def io_op_defs[] = {
                .pollout                = 1,
                .audit_skip             = 1,
                .ioprio                 = 1,
+               .manual_alloc           = 1,
                .name                   = "SEND",
 #if defined(CONFIG_NET)
+               .async_size             = sizeof(struct io_async_msghdr),
                .prep                   = io_sendmsg_prep,
                .issue                  = io_send,
+               .fail                   = io_sendrecv_fail,
+               .prep_async             = io_send_prep_async,
 #else
                .prep                   = io_eopnotsupp_prep,
 #endif
@@ -325,6 +339,7 @@ const struct io_op_def io_op_defs[] = {
 #if defined(CONFIG_NET)
                .prep                   = io_recvmsg_prep,
                .issue                  = io_recv,
+               .fail                   = io_sendrecv_fail,
 #else
                .prep                   = io_eopnotsupp_prep,
 #endif
@@ -465,6 +480,7 @@ const struct io_op_def io_op_defs[] = {
                .needs_file             = 1,
                .plug                   = 1,
                .name                   = "URING_CMD",
+               .iopoll                 = 1,
                .async_size             = uring_cmd_pdu_size(1),
                .prep                   = io_uring_cmd_prep,
                .issue                  = io_uring_cmd,
@@ -480,10 +496,30 @@ const struct io_op_def io_op_defs[] = {
                .manual_alloc           = 1,
 #if defined(CONFIG_NET)
                .async_size             = sizeof(struct io_async_msghdr),
-               .prep                   = io_sendzc_prep,
-               .issue                  = io_sendzc,
-               .prep_async             = io_sendzc_prep_async,
-               .cleanup                = io_sendzc_cleanup,
+               .prep                   = io_send_zc_prep,
+               .issue                  = io_send_zc,
+               .prep_async             = io_send_prep_async,
+               .cleanup                = io_send_zc_cleanup,
+               .fail                   = io_sendrecv_fail,
+#else
+               .prep                   = io_eopnotsupp_prep,
+#endif
+       },
+       [IORING_OP_SENDMSG_ZC] = {
+               .name                   = "SENDMSG_ZC",
+               .needs_file             = 1,
+               .unbound_nonreg_file    = 1,
+               .pollout                = 1,
+               .audit_skip             = 1,
+               .ioprio                 = 1,
+               .manual_alloc           = 1,
+#if defined(CONFIG_NET)
+               .async_size             = sizeof(struct io_async_msghdr),
+               .prep                   = io_send_zc_prep,
+               .issue                  = io_sendmsg_zc,
+               .prep_async             = io_sendmsg_prep_async,
+               .cleanup                = io_send_zc_cleanup,
+               .fail                   = io_sendrecv_fail,
 #else
                .prep                   = io_eopnotsupp_prep,
 #endif
index 763c6e5..3efe06d 100644 (file)
@@ -36,6 +36,7 @@ struct io_op_def {
        int (*issue)(struct io_kiocb *, unsigned int);
        int (*prep_async)(struct io_kiocb *);
        void (*cleanup)(struct io_kiocb *);
+       void (*fail)(struct io_kiocb *);
 };
 
 extern const struct io_op_def io_op_defs[];
index cf32721..6f88ded 100644 (file)
@@ -341,7 +341,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
                flush_delayed_work(&ctx->rsrc_put_work);
                reinit_completion(&data->done);
 
-               ret = io_run_task_work_sig();
+               ret = io_run_task_work_sig(ctx);
                mutex_lock(&ctx->uring_lock);
        } while (ret >= 0);
        data->quiesce = false;
index 76ebcfe..a25cd44 100644 (file)
@@ -33,6 +33,46 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
        return req->flags & REQ_F_SUPPORT_NOWAIT;
 }
 
+#ifdef CONFIG_COMPAT
+static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
+{
+       struct compat_iovec __user *uiov;
+       compat_ssize_t clen;
+
+       uiov = u64_to_user_ptr(rw->addr);
+       if (!access_ok(uiov, sizeof(*uiov)))
+               return -EFAULT;
+       if (__get_user(clen, &uiov->iov_len))
+               return -EFAULT;
+       if (clen < 0)
+               return -EINVAL;
+
+       rw->len = clen;
+       return 0;
+}
+#endif
+
+static int io_iov_buffer_select_prep(struct io_kiocb *req)
+{
+       struct iovec __user *uiov;
+       struct iovec iov;
+       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+
+       if (rw->len != 1)
+               return -EINVAL;
+
+#ifdef CONFIG_COMPAT
+       if (req->ctx->compat)
+               return io_iov_compat_buffer_select_prep(rw);
+#endif
+
+       uiov = u64_to_user_ptr(rw->addr);
+       if (copy_from_user(&iov, uiov, sizeof(*uiov)))
+               return -EFAULT;
+       rw->len = iov.iov_len;
+       return 0;
+}
+
 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
@@ -69,6 +109,16 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        rw->addr = READ_ONCE(sqe->addr);
        rw->len = READ_ONCE(sqe->len);
        rw->flags = READ_ONCE(sqe->rw_flags);
+
+       /* Have to do this validation here, as this is in io_read() rw->len might
+        * have chanaged due to buffer selection
+        */
+       if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
+               ret = io_iov_buffer_select_prep(req);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -186,14 +236,6 @@ static void kiocb_end_write(struct io_kiocb *req)
 
 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 {
-       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-
-       if (rw->kiocb.ki_flags & IOCB_WRITE) {
-               kiocb_end_write(req);
-               fsnotify_modify(req->file);
-       } else {
-               fsnotify_access(req->file);
-       }
        if (unlikely(res != req->cqe.res)) {
                if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
                    io_rw_should_reissue(req)) {
@@ -220,6 +262,20 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
        return res;
 }
 
+static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
+{
+       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+
+       if (rw->kiocb.ki_flags & IOCB_WRITE) {
+               kiocb_end_write(req);
+               fsnotify_modify(req->file);
+       } else {
+               fsnotify_access(req->file);
+       }
+
+       io_req_task_complete(req, locked);
+}
+
 static void io_complete_rw(struct kiocb *kiocb, long res)
 {
        struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
@@ -228,7 +284,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
        if (__io_complete_rw_common(req, res))
                return;
        io_req_set_res(req, io_fixup_rw_res(req, res), 0);
-       req->io_task_work.func = io_req_task_complete;
+       req->io_task_work.func = io_req_rw_complete;
        io_req_task_work_add(req);
 }
 
@@ -279,79 +335,6 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
        return IOU_ISSUE_SKIP_COMPLETE;
 }
 
-#ifdef CONFIG_COMPAT
-static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
-                               unsigned int issue_flags)
-{
-       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-       struct compat_iovec __user *uiov;
-       compat_ssize_t clen;
-       void __user *buf;
-       size_t len;
-
-       uiov = u64_to_user_ptr(rw->addr);
-       if (!access_ok(uiov, sizeof(*uiov)))
-               return -EFAULT;
-       if (__get_user(clen, &uiov->iov_len))
-               return -EFAULT;
-       if (clen < 0)
-               return -EINVAL;
-
-       len = clen;
-       buf = io_buffer_select(req, &len, issue_flags);
-       if (!buf)
-               return -ENOBUFS;
-       rw->addr = (unsigned long) buf;
-       iov[0].iov_base = buf;
-       rw->len = iov[0].iov_len = (compat_size_t) len;
-       return 0;
-}
-#endif
-
-static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
-                                     unsigned int issue_flags)
-{
-       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-       struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
-       void __user *buf;
-       ssize_t len;
-
-       if (copy_from_user(iov, uiov, sizeof(*uiov)))
-               return -EFAULT;
-
-       len = iov[0].iov_len;
-       if (len < 0)
-               return -EINVAL;
-       buf = io_buffer_select(req, &len, issue_flags);
-       if (!buf)
-               return -ENOBUFS;
-       rw->addr = (unsigned long) buf;
-       iov[0].iov_base = buf;
-       rw->len = iov[0].iov_len = len;
-       return 0;
-}
-
-static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
-                                   unsigned int issue_flags)
-{
-       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-
-       if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
-               iov[0].iov_base = u64_to_user_ptr(rw->addr);
-               iov[0].iov_len = rw->len;
-               return 0;
-       }
-       if (rw->len != 1)
-               return -EINVAL;
-
-#ifdef CONFIG_COMPAT
-       if (req->ctx->compat)
-               return io_compat_import(req, iov, issue_flags);
-#endif
-
-       return __io_iov_buffer_select(req, iov, issue_flags);
-}
-
 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
                                       struct io_rw_state *s,
                                       unsigned int issue_flags)
@@ -374,7 +357,8 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
        buf = u64_to_user_ptr(rw->addr);
        sqe_len = rw->len;
 
-       if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
+       if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
+           (req->flags & REQ_F_BUFFER_SELECT)) {
                if (io_do_buffer_select(req)) {
                        buf = io_buffer_select(req, &sqe_len, issue_flags);
                        if (!buf)
@@ -390,14 +374,6 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
        }
 
        iovec = s->fast_iov;
-       if (req->flags & REQ_F_BUFFER_SELECT) {
-               ret = io_iov_buffer_select(req, iovec, issue_flags);
-               if (ret)
-                       return ERR_PTR(ret);
-               iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len);
-               return NULL;
-       }
-
        ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
                              req->ctx->compat);
        if (unlikely(ret < 0))
@@ -794,10 +770,12 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
        iov_iter_restore(&s->iter, &s->iter_state);
 
        ret2 = io_setup_async_rw(req, iovec, s, true);
-       if (ret2)
-               return ret2;
-
        iovec = NULL;
+       if (ret2) {
+               ret = ret > 0 ? ret : ret2;
+               goto done;
+       }
+
        io = req->async_data;
        s = &io->s;
        /*
@@ -823,6 +801,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
                        return -EAGAIN;
                }
 
+               req->cqe.res = iov_iter_count(&s->iter);
                /*
                 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
                 * we get -EIOCBQUEUED, then we'll get a notification when the
@@ -984,6 +963,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
                io_cqring_wake(ctx);
 }
 
+void io_rw_fail(struct io_kiocb *req)
+{
+       int res;
+
+       res = io_fixup_rw_res(req, req->cqe.res);
+       io_req_set_res(req, res, req->cqe.flags);
+}
+
 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 {
        struct io_wq_work_node *pos, *start, *prev;
@@ -1000,7 +987,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 
        wq_list_for_each(pos, start, &ctx->iopoll_list) {
                struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
-               struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+               struct file *file = req->file;
                int ret;
 
                /*
@@ -1011,7 +998,17 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
                if (READ_ONCE(req->iopoll_completed))
                        break;
 
-               ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
+               if (req->opcode == IORING_OP_URING_CMD) {
+                       struct io_uring_cmd *ioucmd;
+
+                       ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+                       ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
+                                                               poll_flags);
+               } else {
+                       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+
+                       ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
+               }
                if (unlikely(ret < 0))
                        return ret;
                else if (ret)
index 0204c3f..3b733f4 100644 (file)
@@ -21,3 +21,4 @@ int io_readv_prep_async(struct io_kiocb *req);
 int io_write(struct io_kiocb *req, unsigned int issue_flags);
 int io_writev_prep_async(struct io_kiocb *req);
 void io_readv_writev_cleanup(struct io_kiocb *req);
+void io_rw_fail(struct io_kiocb *req);
index 78ea2c6..e8a8c20 100644 (file)
@@ -149,11 +149,10 @@ static inline void io_remove_next_linked(struct io_kiocb *req)
        nxt->link = NULL;
 }
 
-bool io_disarm_next(struct io_kiocb *req)
+void io_disarm_next(struct io_kiocb *req)
        __must_hold(&req->ctx->completion_lock)
 {
        struct io_kiocb *link = NULL;
-       bool posted = false;
 
        if (req->flags & REQ_F_ARM_LTIMEOUT) {
                link = req->link;
@@ -161,7 +160,6 @@ bool io_disarm_next(struct io_kiocb *req)
                if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
                        io_remove_next_linked(req);
                        io_req_tw_post_queue(link, -ECANCELED, 0);
-                       posted = true;
                }
        } else if (req->flags & REQ_F_LINK_TIMEOUT) {
                struct io_ring_ctx *ctx = req->ctx;
@@ -169,17 +167,12 @@ bool io_disarm_next(struct io_kiocb *req)
                spin_lock_irq(&ctx->timeout_lock);
                link = io_disarm_linked_timeout(req);
                spin_unlock_irq(&ctx->timeout_lock);
-               if (link) {
-                       posted = true;
+               if (link)
                        io_req_tw_post_queue(link, -ECANCELED, 0);
-               }
        }
        if (unlikely((req->flags & REQ_F_FAIL) &&
-                    !(req->flags & REQ_F_HARDLINK))) {
-               posted |= (req->link != NULL);
+                    !(req->flags & REQ_F_HARDLINK)))
                io_fail_links(req);
-       }
-       return posted;
 }
 
 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
index 858c626..a6939f1 100644 (file)
@@ -27,7 +27,7 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
                             bool cancel_all);
 void io_queue_linked_timeout(struct io_kiocb *req);
-bool io_disarm_next(struct io_kiocb *req);
+void io_disarm_next(struct io_kiocb *req);
 
 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
index e78b6f9..f3ed61e 100644 (file)
@@ -50,7 +50,11 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
        io_req_set_res(req, ret, 0);
        if (req->ctx->flags & IORING_SETUP_CQE32)
                io_req_set_cqe32_extra(req, res2, 0);
-       __io_req_complete(req, 0);
+       if (req->ctx->flags & IORING_SETUP_IOPOLL)
+               /* order with io_iopoll_req_issued() checking ->iopoll_complete */
+               smp_store_release(&req->iopoll_completed, 1);
+       else
+               __io_req_complete(req, 0);
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
 
@@ -97,8 +101,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
                issue_flags |= IO_URING_F_SQE128;
        if (ctx->flags & IORING_SETUP_CQE32)
                issue_flags |= IO_URING_F_CQE32;
-       if (ctx->flags & IORING_SETUP_IOPOLL)
+       if (ctx->flags & IORING_SETUP_IOPOLL) {
                issue_flags |= IO_URING_F_IOPOLL;
+               req->iopoll_completed = 0;
+               WRITE_ONCE(ioucmd->cookie, NULL);
+       }
 
        if (req_has_async_data(req))
                ioucmd->cmd = req->async_data;
index 8ce48ac..1542877 100644 (file)
@@ -400,7 +400,6 @@ static void do_tx(int domain, int type, int protocol)
                                                   cfg_payload_len, msg_flags);
                                sqe->user_data = NONZC_TAG;
                        } else {
-                               compl_cqes++;
                                io_uring_prep_sendzc(sqe, fd, payload,
                                                     cfg_payload_len,
                                                     msg_flags, zc_flags);
@@ -430,18 +429,23 @@ static void do_tx(int domain, int type, int protocol)
                        if (cqe->flags & IORING_CQE_F_NOTIF) {
                                if (cqe->flags & IORING_CQE_F_MORE)
                                        error(1, -EINVAL, "invalid notif flags");
+                               if (compl_cqes <= 0)
+                                       error(1, -EINVAL, "notification mismatch");
                                compl_cqes--;
                                i--;
-                       } else if (cqe->res <= 0) {
-                               if (cqe->flags & IORING_CQE_F_MORE)
-                                       error(1, cqe->res, "more with a failed send");
-                               error(1, cqe->res, "send failed");
-                       } else {
-                               if (cqe->user_data == ZC_TAG &&
-                                   !(cqe->flags & IORING_CQE_F_MORE))
-                                       error(1, cqe->res, "missing more flag");
+                               io_uring_cqe_seen(&ring);
+                               continue;
+                       }
+                       if (cqe->flags & IORING_CQE_F_MORE) {
+                               if (cqe->user_data != ZC_TAG)
+                                       error(1, cqe->res, "unexpected F_MORE");
+                               compl_cqes++;
+                       }
+                       if (cqe->res >= 0) {
                                packets++;
                                bytes += cqe->res;
+                       } else if (cqe->res != -EAGAIN) {
+                               error(1, cqe->res, "send failed");
                        }
                        io_uring_cqe_seen(&ring);
                }