1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
50 bool seen_econnaborted;
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
64 /* initialised and used only by !msg send variants */
68 /* used only for send zerocopy */
69 struct io_kiocb *notif;
72 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
74 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
76 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
77 sqe->buf_index || sqe->splice_fd_in))
80 shutdown->how = READ_ONCE(sqe->len);
84 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
86 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
90 if (issue_flags & IO_URING_F_NONBLOCK)
93 sock = sock_from_file(req->file);
97 ret = __sys_shutdown_sock(sock, shutdown->how);
98 io_req_set_res(req, ret, 0);
102 static bool io_net_retry(struct socket *sock, int flags)
104 if (!(flags & MSG_WAITALL))
106 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
109 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
111 struct io_async_msghdr *hdr = req->async_data;
113 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
116 /* Let normal cleanup path reap it if we fail adding to the cache */
117 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
118 req->async_data = NULL;
119 req->flags &= ~REQ_F_ASYNC_DATA;
123 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
124 unsigned int issue_flags)
126 struct io_ring_ctx *ctx = req->ctx;
127 struct io_cache_entry *entry;
128 struct io_async_msghdr *hdr;
130 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
131 entry = io_alloc_cache_get(&ctx->netmsg_cache);
133 hdr = container_of(entry, struct io_async_msghdr, cache);
134 hdr->free_iov = NULL;
135 req->flags |= REQ_F_ASYNC_DATA;
136 req->async_data = hdr;
141 if (!io_alloc_async_data(req)) {
142 hdr = req->async_data;
143 hdr->free_iov = NULL;
149 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
151 /* ->prep_async is always called from the submission context */
152 return io_msg_alloc_async(req, 0);
155 static int io_setup_async_msg(struct io_kiocb *req,
156 struct io_async_msghdr *kmsg,
157 unsigned int issue_flags)
159 struct io_async_msghdr *async_msg;
161 if (req_has_async_data(req))
163 async_msg = io_msg_alloc_async(req, issue_flags);
165 kfree(kmsg->free_iov);
168 req->flags |= REQ_F_NEED_CLEANUP;
169 memcpy(async_msg, kmsg, sizeof(*kmsg));
170 if (async_msg->msg.msg_name)
171 async_msg->msg.msg_name = &async_msg->addr;
172 /* if were using fast_iov, set it to the new one */
173 if (!kmsg->free_iov) {
174 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
175 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
181 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
182 struct io_async_msghdr *iomsg)
184 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
186 iomsg->msg.msg_name = &iomsg->addr;
187 iomsg->free_iov = iomsg->fast_iov;
188 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
192 int io_send_prep_async(struct io_kiocb *req)
194 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
195 struct io_async_msghdr *io;
198 if (!zc->addr || req_has_async_data(req))
200 io = io_msg_alloc_async_prep(req);
203 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
207 static int io_setup_async_addr(struct io_kiocb *req,
208 struct sockaddr_storage *addr_storage,
209 unsigned int issue_flags)
211 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
212 struct io_async_msghdr *io;
214 if (!sr->addr || req_has_async_data(req))
216 io = io_msg_alloc_async(req, issue_flags);
219 memcpy(&io->addr, addr_storage, sizeof(io->addr));
223 int io_sendmsg_prep_async(struct io_kiocb *req)
227 if (!io_msg_alloc_async_prep(req))
229 ret = io_sendmsg_copy_hdr(req, req->async_data);
231 req->flags |= REQ_F_NEED_CLEANUP;
235 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
237 struct io_async_msghdr *io = req->async_data;
242 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
244 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
246 if (req->opcode == IORING_OP_SEND) {
247 if (READ_ONCE(sqe->__pad3[0]))
249 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
250 sr->addr_len = READ_ONCE(sqe->addr_len);
251 } else if (sqe->addr2 || sqe->file_index) {
255 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
256 sr->len = READ_ONCE(sqe->len);
257 sr->flags = READ_ONCE(sqe->ioprio);
258 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
260 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
261 if (sr->msg_flags & MSG_DONTWAIT)
262 req->flags |= REQ_F_NOWAIT;
265 if (req->ctx->compat)
266 sr->msg_flags |= MSG_CMSG_COMPAT;
272 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
274 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
275 struct io_async_msghdr iomsg, *kmsg;
281 sock = sock_from_file(req->file);
285 if (req_has_async_data(req)) {
286 kmsg = req->async_data;
288 ret = io_sendmsg_copy_hdr(req, &iomsg);
294 if (!(req->flags & REQ_F_POLLED) &&
295 (sr->flags & IORING_RECVSEND_POLL_FIRST))
296 return io_setup_async_msg(req, kmsg, issue_flags);
298 flags = sr->msg_flags;
299 if (issue_flags & IO_URING_F_NONBLOCK)
300 flags |= MSG_DONTWAIT;
301 if (flags & MSG_WAITALL)
302 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
304 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
307 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
308 return io_setup_async_msg(req, kmsg, issue_flags);
309 if (ret > 0 && io_net_retry(sock, flags)) {
311 req->flags |= REQ_F_PARTIAL_IO;
312 return io_setup_async_msg(req, kmsg, issue_flags);
314 if (ret == -ERESTARTSYS)
318 /* fast path, check for non-NULL to avoid function call */
320 kfree(kmsg->free_iov);
321 req->flags &= ~REQ_F_NEED_CLEANUP;
322 io_netmsg_recycle(req, issue_flags);
325 else if (sr->done_io)
327 io_req_set_res(req, ret, 0);
331 int io_send(struct io_kiocb *req, unsigned int issue_flags)
333 struct sockaddr_storage __address;
334 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
343 msg.msg_control = NULL;
344 msg.msg_controllen = 0;
349 if (req_has_async_data(req)) {
350 struct io_async_msghdr *io = req->async_data;
352 msg.msg_name = &io->addr;
354 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
355 if (unlikely(ret < 0))
357 msg.msg_name = (struct sockaddr *)&__address;
359 msg.msg_namelen = sr->addr_len;
362 if (!(req->flags & REQ_F_POLLED) &&
363 (sr->flags & IORING_RECVSEND_POLL_FIRST))
364 return io_setup_async_addr(req, &__address, issue_flags);
366 sock = sock_from_file(req->file);
370 ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
374 flags = sr->msg_flags;
375 if (issue_flags & IO_URING_F_NONBLOCK)
376 flags |= MSG_DONTWAIT;
377 if (flags & MSG_WAITALL)
378 min_ret = iov_iter_count(&msg.msg_iter);
380 msg.msg_flags = flags;
381 ret = sock_sendmsg(sock, &msg);
383 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
384 return io_setup_async_addr(req, &__address, issue_flags);
386 if (ret > 0 && io_net_retry(sock, flags)) {
390 req->flags |= REQ_F_PARTIAL_IO;
391 return io_setup_async_addr(req, &__address, issue_flags);
393 if (ret == -ERESTARTSYS)
399 else if (sr->done_io)
401 io_req_set_res(req, ret, 0);
405 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
409 if (iomsg->namelen < 0)
411 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
412 iomsg->namelen, &hdr))
414 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
420 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
421 struct io_async_msghdr *iomsg)
423 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
424 struct user_msghdr msg;
427 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
430 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
434 if (req->flags & REQ_F_BUFFER_SELECT) {
435 if (msg.msg_iovlen == 0) {
436 sr->len = iomsg->fast_iov[0].iov_len = 0;
437 iomsg->fast_iov[0].iov_base = NULL;
438 iomsg->free_iov = NULL;
439 } else if (msg.msg_iovlen > 1) {
442 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
444 sr->len = iomsg->fast_iov[0].iov_len;
445 iomsg->free_iov = NULL;
448 if (req->flags & REQ_F_APOLL_MULTISHOT) {
449 iomsg->namelen = msg.msg_namelen;
450 iomsg->controllen = msg.msg_controllen;
451 if (io_recvmsg_multishot_overflow(iomsg))
455 iomsg->free_iov = iomsg->fast_iov;
456 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
457 &iomsg->free_iov, &iomsg->msg.msg_iter,
467 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
468 struct io_async_msghdr *iomsg)
470 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
471 struct compat_msghdr msg;
472 struct compat_iovec __user *uiov;
475 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
478 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
482 uiov = compat_ptr(msg.msg_iov);
483 if (req->flags & REQ_F_BUFFER_SELECT) {
486 iomsg->free_iov = NULL;
487 if (msg.msg_iovlen == 0) {
489 } else if (msg.msg_iovlen > 1) {
492 if (!access_ok(uiov, sizeof(*uiov)))
494 if (__get_user(clen, &uiov->iov_len))
501 if (req->flags & REQ_F_APOLL_MULTISHOT) {
502 iomsg->namelen = msg.msg_namelen;
503 iomsg->controllen = msg.msg_controllen;
504 if (io_recvmsg_multishot_overflow(iomsg))
508 iomsg->free_iov = iomsg->fast_iov;
509 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
510 UIO_FASTIOV, &iomsg->free_iov,
511 &iomsg->msg.msg_iter, true);
520 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
521 struct io_async_msghdr *iomsg)
523 iomsg->msg.msg_name = &iomsg->addr;
526 if (req->ctx->compat)
527 return __io_compat_recvmsg_copy_hdr(req, iomsg);
530 return __io_recvmsg_copy_hdr(req, iomsg);
533 int io_recvmsg_prep_async(struct io_kiocb *req)
537 if (!io_msg_alloc_async_prep(req))
539 ret = io_recvmsg_copy_hdr(req, req->async_data);
541 req->flags |= REQ_F_NEED_CLEANUP;
545 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
547 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
549 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
551 if (unlikely(sqe->file_index || sqe->addr2))
554 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
555 sr->len = READ_ONCE(sqe->len);
556 sr->flags = READ_ONCE(sqe->ioprio);
557 if (sr->flags & ~(RECVMSG_FLAGS))
559 sr->msg_flags = READ_ONCE(sqe->msg_flags);
560 if (sr->msg_flags & MSG_DONTWAIT)
561 req->flags |= REQ_F_NOWAIT;
562 if (sr->msg_flags & MSG_ERRQUEUE)
563 req->flags |= REQ_F_CLEAR_POLLIN;
564 if (sr->flags & IORING_RECV_MULTISHOT) {
565 if (!(req->flags & REQ_F_BUFFER_SELECT))
567 if (sr->msg_flags & MSG_WAITALL)
569 if (req->opcode == IORING_OP_RECV && sr->len)
571 req->flags |= REQ_F_APOLL_MULTISHOT;
573 * Store the buffer group for this multishot receive separately,
574 * as if we end up doing an io-wq based issue that selects a
575 * buffer, it has to be committed immediately and that will
576 * clear ->buf_list. This means we lose the link to the buffer
577 * list, and the eventual buffer put on completion then cannot
580 sr->buf_group = req->buf_index;
584 if (req->ctx->compat)
585 sr->msg_flags |= MSG_CMSG_COMPAT;
591 static inline void io_recv_prep_retry(struct io_kiocb *req)
593 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
596 sr->len = 0; /* get from the provided buffer */
597 req->buf_index = sr->buf_group;
601 * Finishes io_recv and io_recvmsg.
603 * Returns true if it is actually finished, or false if it should run
604 * again (for multishot).
606 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
607 unsigned int cflags, bool mshot_finished,
608 unsigned issue_flags)
610 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
611 io_req_set_res(req, *ret, cflags);
616 if (!mshot_finished) {
617 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
618 cflags | IORING_CQE_F_MORE, false)) {
619 io_recv_prep_retry(req);
623 * Otherwise stop multishot but use the current result.
624 * Probably will end up going into overflow, but this means
625 * we cannot trust the ordering anymore
629 io_req_set_res(req, *ret, cflags);
631 if (issue_flags & IO_URING_F_MULTISHOT)
632 *ret = IOU_STOP_MULTISHOT;
638 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
639 struct io_sr_msg *sr, void __user **buf,
642 unsigned long ubuf = (unsigned long) *buf;
645 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
650 if (kmsg->controllen) {
651 unsigned long control = ubuf + hdr - kmsg->controllen;
653 kmsg->msg.msg_control_user = (void __user *) control;
654 kmsg->msg.msg_controllen = kmsg->controllen;
657 sr->buf = *buf; /* stash for later copy */
658 *buf = (void __user *) (ubuf + hdr);
659 kmsg->payloadlen = *len = *len - hdr;
663 struct io_recvmsg_multishot_hdr {
664 struct io_uring_recvmsg_out msg;
665 struct sockaddr_storage addr;
668 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
669 struct io_async_msghdr *kmsg,
670 unsigned int flags, bool *finished)
674 struct io_recvmsg_multishot_hdr hdr;
677 kmsg->msg.msg_name = &hdr.addr;
678 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
679 kmsg->msg.msg_namelen = 0;
681 if (sock->file->f_flags & O_NONBLOCK)
682 flags |= MSG_DONTWAIT;
684 err = sock_recvmsg(sock, &kmsg->msg, flags);
685 *finished = err <= 0;
689 hdr.msg = (struct io_uring_recvmsg_out) {
690 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
691 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
694 hdr.msg.payloadlen = err;
695 if (err > kmsg->payloadlen)
696 err = kmsg->payloadlen;
698 copy_len = sizeof(struct io_uring_recvmsg_out);
699 if (kmsg->msg.msg_namelen > kmsg->namelen)
700 copy_len += kmsg->namelen;
702 copy_len += kmsg->msg.msg_namelen;
705 * "fromlen shall refer to the value before truncation.."
708 hdr.msg.namelen = kmsg->msg.msg_namelen;
710 /* ensure that there is no gap between hdr and sockaddr_storage */
711 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
712 sizeof(struct io_uring_recvmsg_out));
713 if (copy_to_user(io->buf, &hdr, copy_len)) {
718 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
719 kmsg->controllen + err;
722 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
724 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
725 struct io_async_msghdr iomsg, *kmsg;
729 int ret, min_ret = 0;
730 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
731 bool mshot_finished = true;
733 sock = sock_from_file(req->file);
737 if (req_has_async_data(req)) {
738 kmsg = req->async_data;
740 ret = io_recvmsg_copy_hdr(req, &iomsg);
746 if (!(req->flags & REQ_F_POLLED) &&
747 (sr->flags & IORING_RECVSEND_POLL_FIRST))
748 return io_setup_async_msg(req, kmsg, issue_flags);
751 if (io_do_buffer_select(req)) {
753 size_t len = sr->len;
755 buf = io_buffer_select(req, &len, issue_flags);
759 if (req->flags & REQ_F_APOLL_MULTISHOT) {
760 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
762 io_kbuf_recycle(req, issue_flags);
767 kmsg->fast_iov[0].iov_base = buf;
768 kmsg->fast_iov[0].iov_len = len;
769 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
773 flags = sr->msg_flags;
775 flags |= MSG_DONTWAIT;
776 if (flags & MSG_WAITALL)
777 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
779 kmsg->msg.msg_get_inq = 1;
780 if (req->flags & REQ_F_APOLL_MULTISHOT)
781 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
784 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
788 if (ret == -EAGAIN && force_nonblock) {
789 ret = io_setup_async_msg(req, kmsg, issue_flags);
790 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
791 io_kbuf_recycle(req, issue_flags);
792 return IOU_ISSUE_SKIP_COMPLETE;
796 if (ret > 0 && io_net_retry(sock, flags)) {
798 req->flags |= REQ_F_PARTIAL_IO;
799 return io_setup_async_msg(req, kmsg, issue_flags);
801 if (ret == -ERESTARTSYS)
804 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
810 else if (sr->done_io)
813 io_kbuf_recycle(req, issue_flags);
815 cflags = io_put_kbuf(req, issue_flags);
816 if (kmsg->msg.msg_inq)
817 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
819 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
820 goto retry_multishot;
822 if (mshot_finished) {
823 /* fast path, check for non-NULL to avoid function call */
825 kfree(kmsg->free_iov);
826 io_netmsg_recycle(req, issue_flags);
827 req->flags &= ~REQ_F_NEED_CLEANUP;
833 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
835 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
841 int ret, min_ret = 0;
842 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
843 size_t len = sr->len;
845 if (!(req->flags & REQ_F_POLLED) &&
846 (sr->flags & IORING_RECVSEND_POLL_FIRST))
849 sock = sock_from_file(req->file);
854 if (io_do_buffer_select(req)) {
857 buf = io_buffer_select(req, &len, issue_flags);
863 ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
869 msg.msg_control = NULL;
872 msg.msg_controllen = 0;
876 flags = sr->msg_flags;
878 flags |= MSG_DONTWAIT;
879 if (flags & MSG_WAITALL)
880 min_ret = iov_iter_count(&msg.msg_iter);
882 ret = sock_recvmsg(sock, &msg, flags);
884 if (ret == -EAGAIN && force_nonblock) {
885 if (issue_flags & IO_URING_F_MULTISHOT) {
886 io_kbuf_recycle(req, issue_flags);
887 return IOU_ISSUE_SKIP_COMPLETE;
892 if (ret > 0 && io_net_retry(sock, flags)) {
896 req->flags |= REQ_F_PARTIAL_IO;
899 if (ret == -ERESTARTSYS)
902 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
909 else if (sr->done_io)
912 io_kbuf_recycle(req, issue_flags);
914 cflags = io_put_kbuf(req, issue_flags);
916 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
918 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
919 goto retry_multishot;
924 void io_send_zc_cleanup(struct io_kiocb *req)
926 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
927 struct io_async_msghdr *io;
929 if (req_has_async_data(req)) {
930 io = req->async_data;
931 /* might be ->fast_iov if *msg_copy_hdr failed */
932 if (io->free_iov != io->fast_iov)
936 io_notif_flush(zc->notif);
941 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
943 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
944 struct io_ring_ctx *ctx = req->ctx;
945 struct io_kiocb *notif;
947 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
949 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
950 if (req->flags & REQ_F_CQE_SKIP)
953 zc->flags = READ_ONCE(sqe->ioprio);
954 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
955 IORING_RECVSEND_FIXED_BUF |
956 IORING_SEND_ZC_REPORT_USAGE))
958 notif = zc->notif = io_alloc_notif(ctx);
961 notif->cqe.user_data = req->cqe.user_data;
963 notif->cqe.flags = IORING_CQE_F_NOTIF;
964 req->flags |= REQ_F_NEED_CLEANUP;
965 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
966 unsigned idx = READ_ONCE(sqe->buf_index);
968 if (unlikely(idx >= ctx->nr_user_bufs))
970 idx = array_index_nospec(idx, ctx->nr_user_bufs);
971 req->imu = READ_ONCE(ctx->user_bufs[idx]);
972 io_req_set_rsrc_node(notif, ctx, 0);
974 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
975 io_notif_to_data(notif)->zc_report = true;
978 if (req->opcode == IORING_OP_SEND_ZC) {
979 if (READ_ONCE(sqe->__pad3[0]))
981 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
982 zc->addr_len = READ_ONCE(sqe->addr_len);
984 if (unlikely(sqe->addr2 || sqe->file_index))
986 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
990 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
991 zc->len = READ_ONCE(sqe->len);
992 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
993 if (zc->msg_flags & MSG_DONTWAIT)
994 req->flags |= REQ_F_NOWAIT;
999 if (req->ctx->compat)
1000 zc->msg_flags |= MSG_CMSG_COMPAT;
1005 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1006 struct iov_iter *from, size_t length)
1008 skb_zcopy_downgrade_managed(skb);
1009 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1012 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1013 struct iov_iter *from, size_t length)
1015 struct skb_shared_info *shinfo = skb_shinfo(skb);
1016 int frag = shinfo->nr_frags;
1018 struct bvec_iter bi;
1020 unsigned long truesize = 0;
1023 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1024 else if (unlikely(!skb_zcopy_managed(skb)))
1025 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1027 bi.bi_size = min(from->count, length);
1028 bi.bi_bvec_done = from->iov_offset;
1031 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1032 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1035 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1036 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1037 v.bv_offset, v.bv_len);
1038 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1043 shinfo->nr_frags = frag;
1044 from->bvec += bi.bi_idx;
1045 from->nr_segs -= bi.bi_idx;
1046 from->count -= copied;
1047 from->iov_offset = bi.bi_bvec_done;
1049 skb->data_len += copied;
1051 skb->truesize += truesize;
1053 if (sk && sk->sk_type == SOCK_STREAM) {
1054 sk_wmem_queued_add(sk, truesize);
1055 if (!skb_zcopy_pure(skb))
1056 sk_mem_charge(sk, truesize);
1058 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1063 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1065 struct sockaddr_storage __address;
1066 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1069 struct socket *sock;
1071 int ret, min_ret = 0;
1073 sock = sock_from_file(req->file);
1074 if (unlikely(!sock))
1076 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1079 msg.msg_name = NULL;
1080 msg.msg_control = NULL;
1081 msg.msg_controllen = 0;
1082 msg.msg_namelen = 0;
1085 if (req_has_async_data(req)) {
1086 struct io_async_msghdr *io = req->async_data;
1088 msg.msg_name = &io->addr;
1090 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1091 if (unlikely(ret < 0))
1093 msg.msg_name = (struct sockaddr *)&__address;
1095 msg.msg_namelen = zc->addr_len;
1098 if (!(req->flags & REQ_F_POLLED) &&
1099 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1100 return io_setup_async_addr(req, &__address, issue_flags);
1102 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1103 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1104 (u64)(uintptr_t)zc->buf, zc->len);
1107 msg.sg_from_iter = io_sg_from_iter;
1109 ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1113 ret = io_notif_account_mem(zc->notif, zc->len);
1116 msg.sg_from_iter = io_sg_from_iter_iovec;
1119 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1120 if (issue_flags & IO_URING_F_NONBLOCK)
1121 msg_flags |= MSG_DONTWAIT;
1122 if (msg_flags & MSG_WAITALL)
1123 min_ret = iov_iter_count(&msg.msg_iter);
1125 msg.msg_flags = msg_flags;
1126 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1127 ret = sock_sendmsg(sock, &msg);
1129 if (unlikely(ret < min_ret)) {
1130 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1131 return io_setup_async_addr(req, &__address, issue_flags);
1133 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1137 req->flags |= REQ_F_PARTIAL_IO;
1138 return io_setup_async_addr(req, &__address, issue_flags);
1140 if (ret == -ERESTARTSYS)
1147 else if (zc->done_io)
1151 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1152 * flushing notif to io_send_zc_cleanup()
1154 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1155 io_notif_flush(zc->notif);
1156 req->flags &= ~REQ_F_NEED_CLEANUP;
1158 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1162 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1164 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1165 struct io_async_msghdr iomsg, *kmsg;
1166 struct socket *sock;
1168 int ret, min_ret = 0;
1170 sock = sock_from_file(req->file);
1171 if (unlikely(!sock))
1173 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1176 if (req_has_async_data(req)) {
1177 kmsg = req->async_data;
1179 ret = io_sendmsg_copy_hdr(req, &iomsg);
1185 if (!(req->flags & REQ_F_POLLED) &&
1186 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1187 return io_setup_async_msg(req, kmsg, issue_flags);
1189 flags = sr->msg_flags | MSG_ZEROCOPY;
1190 if (issue_flags & IO_URING_F_NONBLOCK)
1191 flags |= MSG_DONTWAIT;
1192 if (flags & MSG_WAITALL)
1193 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1195 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1196 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1197 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1199 if (unlikely(ret < min_ret)) {
1200 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1201 return io_setup_async_msg(req, kmsg, issue_flags);
1203 if (ret > 0 && io_net_retry(sock, flags)) {
1205 req->flags |= REQ_F_PARTIAL_IO;
1206 return io_setup_async_msg(req, kmsg, issue_flags);
1208 if (ret == -ERESTARTSYS)
1212 /* fast path, check for non-NULL to avoid function call */
1213 if (kmsg->free_iov) {
1214 kfree(kmsg->free_iov);
1215 kmsg->free_iov = NULL;
1218 io_netmsg_recycle(req, issue_flags);
1221 else if (sr->done_io)
1225 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1226 * flushing notif to io_send_zc_cleanup()
1228 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1229 io_notif_flush(sr->notif);
1230 req->flags &= ~REQ_F_NEED_CLEANUP;
1232 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1236 void io_sendrecv_fail(struct io_kiocb *req)
1238 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1240 if (req->flags & REQ_F_PARTIAL_IO)
1241 req->cqe.res = sr->done_io;
1243 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1244 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1245 req->cqe.flags |= IORING_CQE_F_MORE;
1248 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1250 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1253 if (sqe->len || sqe->buf_index)
1256 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1257 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1258 accept->flags = READ_ONCE(sqe->accept_flags);
1259 accept->nofile = rlimit(RLIMIT_NOFILE);
1260 flags = READ_ONCE(sqe->ioprio);
1261 if (flags & ~IORING_ACCEPT_MULTISHOT)
1264 accept->file_slot = READ_ONCE(sqe->file_index);
1265 if (accept->file_slot) {
1266 if (accept->flags & SOCK_CLOEXEC)
1268 if (flags & IORING_ACCEPT_MULTISHOT &&
1269 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1272 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1274 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1275 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1276 if (flags & IORING_ACCEPT_MULTISHOT)
1277 req->flags |= REQ_F_APOLL_MULTISHOT;
1281 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1283 struct io_ring_ctx *ctx = req->ctx;
1284 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1285 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1286 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1287 bool fixed = !!accept->file_slot;
1293 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1294 if (unlikely(fd < 0))
1297 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1302 ret = PTR_ERR(file);
1303 if (ret == -EAGAIN && force_nonblock) {
1305 * if it's multishot and polled, we don't need to
1306 * return EAGAIN to arm the poll infra since it
1307 * has already been done
1309 if (issue_flags & IO_URING_F_MULTISHOT)
1310 ret = IOU_ISSUE_SKIP_COMPLETE;
1313 if (ret == -ERESTARTSYS)
1316 } else if (!fixed) {
1317 fd_install(fd, file);
1320 ret = io_fixed_fd_install(req, issue_flags, file,
1324 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1325 io_req_set_res(req, ret, 0);
1330 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1333 io_req_set_res(req, ret, 0);
1334 return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
1337 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1339 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1341 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1344 sock->domain = READ_ONCE(sqe->fd);
1345 sock->type = READ_ONCE(sqe->off);
1346 sock->protocol = READ_ONCE(sqe->len);
1347 sock->file_slot = READ_ONCE(sqe->file_index);
1348 sock->nofile = rlimit(RLIMIT_NOFILE);
1350 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1351 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1353 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1358 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1360 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1361 bool fixed = !!sock->file_slot;
1366 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1367 if (unlikely(fd < 0))
1370 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1374 ret = PTR_ERR(file);
1375 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1377 if (ret == -ERESTARTSYS)
1380 } else if (!fixed) {
1381 fd_install(fd, file);
1384 ret = io_fixed_fd_install(req, issue_flags, file,
1387 io_req_set_res(req, ret, 0);
1391 int io_connect_prep_async(struct io_kiocb *req)
1393 struct io_async_connect *io = req->async_data;
1394 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1396 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1399 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1401 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1403 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1406 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1407 conn->addr_len = READ_ONCE(sqe->addr2);
1408 conn->in_progress = conn->seen_econnaborted = false;
1412 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1414 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1415 struct io_async_connect __io, *io;
1416 unsigned file_flags;
1418 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1420 if (connect->in_progress) {
1421 struct socket *socket;
1424 socket = sock_from_file(req->file);
1426 ret = sock_error(socket->sk);
1430 if (req_has_async_data(req)) {
1431 io = req->async_data;
1433 ret = move_addr_to_kernel(connect->addr,
1441 file_flags = force_nonblock ? O_NONBLOCK : 0;
1443 ret = __sys_connect_file(req->file, &io->address,
1444 connect->addr_len, file_flags);
1445 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1446 && force_nonblock) {
1447 if (ret == -EINPROGRESS) {
1448 connect->in_progress = true;
1451 if (ret == -ECONNABORTED) {
1452 if (connect->seen_econnaborted)
1454 connect->seen_econnaborted = true;
1456 if (req_has_async_data(req))
1458 if (io_alloc_async_data(req)) {
1462 memcpy(req->async_data, &__io, sizeof(__io));
1465 if (ret == -ERESTARTSYS)
1470 io_req_set_res(req, ret, 0);
1474 void io_netmsg_cache_free(struct io_cache_entry *entry)
1476 kfree(container_of(entry, struct io_async_msghdr, cache));