io_uring/net: combine fail handlers
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 21 Sep 2022 11:17:53 +0000 (12:17 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 21 Sep 2022 19:15:02 +0000 (13:15 -0600)
Merge io_send_zc_fail() into io_sendrecv_fail(), saves a few lines of
code and some headache for following patch.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e0eba1d577413aef5602cd45f588b9230207082d.1663668091.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/net.c
io_uring/net.h
io_uring/opdef.c

index 145beb4..209bc69 100644 (file)
@@ -192,6 +192,7 @@ int io_send_prep_async(struct io_kiocb *req)
        io = io_msg_alloc_async_prep(req);
        if (!io)
                return -ENOMEM;
+       io->free_iov = NULL;
        ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
        return ret;
 }
@@ -208,6 +209,7 @@ static int io_setup_async_addr(struct io_kiocb *req,
        io = io_msg_alloc_async(req, issue_flags);
        if (!io)
                return -ENOMEM;
+       io->free_iov = NULL;
        memcpy(&io->addr, addr_storage, sizeof(io->addr));
        return -EAGAIN;
 }
@@ -1119,26 +1121,25 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 void io_sendrecv_fail(struct io_kiocb *req)
 {
        struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+       struct io_async_msghdr *io;
        int res = req->cqe.res;
 
        if (req->flags & REQ_F_PARTIAL_IO)
                res = sr->done_io;
-       io_req_set_res(req, res, req->cqe.flags);
-}
-
-void io_send_zc_fail(struct io_kiocb *req)
-{
-       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-       int res = req->cqe.res;
-
-       if (req->flags & REQ_F_PARTIAL_IO) {
-               if (req->flags & REQ_F_NEED_CLEANUP) {
-                       io_notif_flush(sr->notif);
-                       sr->notif = NULL;
-                       req->flags &= ~REQ_F_NEED_CLEANUP;
-               }
-               res = sr->done_io;
+       if ((req->flags & REQ_F_NEED_CLEANUP) &&
+           req->opcode == IORING_OP_SEND_ZC) {
+               /* preserve notification for partial I/O */
+               if (res < 0)
+                       sr->notif->flags |= REQ_F_CQE_SKIP;
+               io_notif_flush(sr->notif);
+               sr->notif = NULL;
        }
+       if (req_has_async_data(req)) {
+               io = req->async_data;
+               kfree(io->free_iov);
+               io->free_iov = NULL;
+       }
+       req->flags &= ~REQ_F_NEED_CLEANUP;
        io_req_set_res(req, res, req->cqe.flags);
 }
 
index 337541f..45558e2 100644 (file)
@@ -59,7 +59,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags);
 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 void io_send_zc_cleanup(struct io_kiocb *req);
-void io_send_zc_fail(struct io_kiocb *req);
 
 void io_netmsg_cache_free(struct io_cache_entry *entry);
 #else
index c7d0a2f..0fdeb1b 100644 (file)
@@ -500,7 +500,7 @@ const struct io_op_def io_op_defs[] = {
                .issue                  = io_send_zc,
                .prep_async             = io_send_prep_async,
                .cleanup                = io_send_zc_cleanup,
-               .fail                   = io_send_zc_fail,
+               .fail                   = io_sendrecv_fail,
 #else
                .prep                   = io_eopnotsupp_prep,
 #endif