1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
50 bool seen_econnaborted;
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
64 /* initialised and used only by !msg send variants */
68 /* used only for send zerocopy */
69 struct io_kiocb *notif;
72 static inline bool io_check_multishot(struct io_kiocb *req,
73 unsigned int issue_flags)
76 * When ->locked_cq is set we only allow to post CQEs from the original
77 * task context. Usual request completions will be handled in other
78 * generic paths but multipoll may decide to post extra cqes.
80 return !(issue_flags & IO_URING_F_IOWQ) ||
81 !(issue_flags & IO_URING_F_MULTISHOT) ||
82 !req->ctx->task_complete;
85 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
87 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
89 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
90 sqe->buf_index || sqe->splice_fd_in))
93 shutdown->how = READ_ONCE(sqe->len);
94 req->flags |= REQ_F_FORCE_ASYNC;
98 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
100 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
104 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
106 sock = sock_from_file(req->file);
110 ret = __sys_shutdown_sock(sock, shutdown->how);
111 io_req_set_res(req, ret, 0);
115 static bool io_net_retry(struct socket *sock, int flags)
117 if (!(flags & MSG_WAITALL))
119 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
122 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
124 struct io_async_msghdr *hdr = req->async_data;
126 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
129 /* Let normal cleanup path reap it if we fail adding to the cache */
130 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
131 req->async_data = NULL;
132 req->flags &= ~REQ_F_ASYNC_DATA;
136 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
137 unsigned int issue_flags)
139 struct io_ring_ctx *ctx = req->ctx;
140 struct io_cache_entry *entry;
141 struct io_async_msghdr *hdr;
143 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
144 entry = io_alloc_cache_get(&ctx->netmsg_cache);
146 hdr = container_of(entry, struct io_async_msghdr, cache);
147 hdr->free_iov = NULL;
148 req->flags |= REQ_F_ASYNC_DATA;
149 req->async_data = hdr;
154 if (!io_alloc_async_data(req)) {
155 hdr = req->async_data;
156 hdr->free_iov = NULL;
162 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
164 /* ->prep_async is always called from the submission context */
165 return io_msg_alloc_async(req, 0);
168 static int io_setup_async_msg(struct io_kiocb *req,
169 struct io_async_msghdr *kmsg,
170 unsigned int issue_flags)
172 struct io_async_msghdr *async_msg;
174 if (req_has_async_data(req))
176 async_msg = io_msg_alloc_async(req, issue_flags);
178 kfree(kmsg->free_iov);
181 req->flags |= REQ_F_NEED_CLEANUP;
182 memcpy(async_msg, kmsg, sizeof(*kmsg));
183 if (async_msg->msg.msg_name)
184 async_msg->msg.msg_name = &async_msg->addr;
185 /* if were using fast_iov, set it to the new one */
186 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
187 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
188 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
194 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
195 struct io_async_msghdr *iomsg)
197 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
199 iomsg->msg.msg_name = &iomsg->addr;
200 iomsg->free_iov = iomsg->fast_iov;
201 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
205 int io_send_prep_async(struct io_kiocb *req)
207 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
208 struct io_async_msghdr *io;
211 if (!zc->addr || req_has_async_data(req))
213 io = io_msg_alloc_async_prep(req);
216 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
220 static int io_setup_async_addr(struct io_kiocb *req,
221 struct sockaddr_storage *addr_storage,
222 unsigned int issue_flags)
224 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
225 struct io_async_msghdr *io;
227 if (!sr->addr || req_has_async_data(req))
229 io = io_msg_alloc_async(req, issue_flags);
232 memcpy(&io->addr, addr_storage, sizeof(io->addr));
236 int io_sendmsg_prep_async(struct io_kiocb *req)
240 if (!io_msg_alloc_async_prep(req))
242 ret = io_sendmsg_copy_hdr(req, req->async_data);
244 req->flags |= REQ_F_NEED_CLEANUP;
248 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
250 struct io_async_msghdr *io = req->async_data;
255 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
257 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
259 if (req->opcode == IORING_OP_SEND) {
260 if (READ_ONCE(sqe->__pad3[0]))
262 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
263 sr->addr_len = READ_ONCE(sqe->addr_len);
264 } else if (sqe->addr2 || sqe->file_index) {
268 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
269 sr->len = READ_ONCE(sqe->len);
270 sr->flags = READ_ONCE(sqe->ioprio);
271 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
273 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
274 if (sr->msg_flags & MSG_DONTWAIT)
275 req->flags |= REQ_F_NOWAIT;
278 if (req->ctx->compat)
279 sr->msg_flags |= MSG_CMSG_COMPAT;
285 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
287 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
288 struct io_async_msghdr iomsg, *kmsg;
294 sock = sock_from_file(req->file);
298 if (req_has_async_data(req)) {
299 kmsg = req->async_data;
301 ret = io_sendmsg_copy_hdr(req, &iomsg);
307 if (!(req->flags & REQ_F_POLLED) &&
308 (sr->flags & IORING_RECVSEND_POLL_FIRST))
309 return io_setup_async_msg(req, kmsg, issue_flags);
311 flags = sr->msg_flags;
312 if (issue_flags & IO_URING_F_NONBLOCK)
313 flags |= MSG_DONTWAIT;
314 if (flags & MSG_WAITALL)
315 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
317 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
320 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
321 return io_setup_async_msg(req, kmsg, issue_flags);
322 if (ret > 0 && io_net_retry(sock, flags)) {
324 req->flags |= REQ_F_PARTIAL_IO;
325 return io_setup_async_msg(req, kmsg, issue_flags);
327 if (ret == -ERESTARTSYS)
331 /* fast path, check for non-NULL to avoid function call */
333 kfree(kmsg->free_iov);
334 req->flags &= ~REQ_F_NEED_CLEANUP;
335 io_netmsg_recycle(req, issue_flags);
338 else if (sr->done_io)
340 io_req_set_res(req, ret, 0);
344 int io_send(struct io_kiocb *req, unsigned int issue_flags)
346 struct sockaddr_storage __address;
347 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
355 msg.msg_control = NULL;
356 msg.msg_controllen = 0;
361 if (req_has_async_data(req)) {
362 struct io_async_msghdr *io = req->async_data;
364 msg.msg_name = &io->addr;
366 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
367 if (unlikely(ret < 0))
369 msg.msg_name = (struct sockaddr *)&__address;
371 msg.msg_namelen = sr->addr_len;
374 if (!(req->flags & REQ_F_POLLED) &&
375 (sr->flags & IORING_RECVSEND_POLL_FIRST))
376 return io_setup_async_addr(req, &__address, issue_flags);
378 sock = sock_from_file(req->file);
382 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
386 flags = sr->msg_flags;
387 if (issue_flags & IO_URING_F_NONBLOCK)
388 flags |= MSG_DONTWAIT;
389 if (flags & MSG_WAITALL)
390 min_ret = iov_iter_count(&msg.msg_iter);
392 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
393 msg.msg_flags = flags;
394 ret = sock_sendmsg(sock, &msg);
396 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
397 return io_setup_async_addr(req, &__address, issue_flags);
399 if (ret > 0 && io_net_retry(sock, flags)) {
403 req->flags |= REQ_F_PARTIAL_IO;
404 return io_setup_async_addr(req, &__address, issue_flags);
406 if (ret == -ERESTARTSYS)
412 else if (sr->done_io)
414 io_req_set_res(req, ret, 0);
418 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
422 if (iomsg->namelen < 0)
424 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
425 iomsg->namelen, &hdr))
427 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
433 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
434 struct io_async_msghdr *iomsg)
436 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
437 struct user_msghdr msg;
440 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
443 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
447 if (req->flags & REQ_F_BUFFER_SELECT) {
448 if (msg.msg_iovlen == 0) {
449 sr->len = iomsg->fast_iov[0].iov_len = 0;
450 iomsg->fast_iov[0].iov_base = NULL;
451 iomsg->free_iov = NULL;
452 } else if (msg.msg_iovlen > 1) {
455 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
457 sr->len = iomsg->fast_iov[0].iov_len;
458 iomsg->free_iov = NULL;
461 if (req->flags & REQ_F_APOLL_MULTISHOT) {
462 iomsg->namelen = msg.msg_namelen;
463 iomsg->controllen = msg.msg_controllen;
464 if (io_recvmsg_multishot_overflow(iomsg))
468 iomsg->free_iov = iomsg->fast_iov;
469 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
470 &iomsg->free_iov, &iomsg->msg.msg_iter,
480 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
481 struct io_async_msghdr *iomsg)
483 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
484 struct compat_msghdr msg;
485 struct compat_iovec __user *uiov;
488 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
491 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
495 uiov = compat_ptr(msg.msg_iov);
496 if (req->flags & REQ_F_BUFFER_SELECT) {
499 iomsg->free_iov = NULL;
500 if (msg.msg_iovlen == 0) {
502 } else if (msg.msg_iovlen > 1) {
505 if (!access_ok(uiov, sizeof(*uiov)))
507 if (__get_user(clen, &uiov->iov_len))
514 if (req->flags & REQ_F_APOLL_MULTISHOT) {
515 iomsg->namelen = msg.msg_namelen;
516 iomsg->controllen = msg.msg_controllen;
517 if (io_recvmsg_multishot_overflow(iomsg))
521 iomsg->free_iov = iomsg->fast_iov;
522 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
523 UIO_FASTIOV, &iomsg->free_iov,
524 &iomsg->msg.msg_iter, true);
533 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
534 struct io_async_msghdr *iomsg)
536 iomsg->msg.msg_name = &iomsg->addr;
539 if (req->ctx->compat)
540 return __io_compat_recvmsg_copy_hdr(req, iomsg);
543 return __io_recvmsg_copy_hdr(req, iomsg);
546 int io_recvmsg_prep_async(struct io_kiocb *req)
550 if (!io_msg_alloc_async_prep(req))
552 ret = io_recvmsg_copy_hdr(req, req->async_data);
554 req->flags |= REQ_F_NEED_CLEANUP;
558 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
560 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
562 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
564 if (unlikely(sqe->file_index || sqe->addr2))
567 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
568 sr->len = READ_ONCE(sqe->len);
569 sr->flags = READ_ONCE(sqe->ioprio);
570 if (sr->flags & ~(RECVMSG_FLAGS))
572 sr->msg_flags = READ_ONCE(sqe->msg_flags);
573 if (sr->msg_flags & MSG_DONTWAIT)
574 req->flags |= REQ_F_NOWAIT;
575 if (sr->msg_flags & MSG_ERRQUEUE)
576 req->flags |= REQ_F_CLEAR_POLLIN;
577 if (sr->flags & IORING_RECV_MULTISHOT) {
578 if (!(req->flags & REQ_F_BUFFER_SELECT))
580 if (sr->msg_flags & MSG_WAITALL)
582 if (req->opcode == IORING_OP_RECV && sr->len)
584 req->flags |= REQ_F_APOLL_MULTISHOT;
586 * Store the buffer group for this multishot receive separately,
587 * as if we end up doing an io-wq based issue that selects a
588 * buffer, it has to be committed immediately and that will
589 * clear ->buf_list. This means we lose the link to the buffer
590 * list, and the eventual buffer put on completion then cannot
593 sr->buf_group = req->buf_index;
597 if (req->ctx->compat)
598 sr->msg_flags |= MSG_CMSG_COMPAT;
604 static inline void io_recv_prep_retry(struct io_kiocb *req)
606 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
609 sr->len = 0; /* get from the provided buffer */
610 req->buf_index = sr->buf_group;
614 * Finishes io_recv and io_recvmsg.
616 * Returns true if it is actually finished, or false if it should run
617 * again (for multishot).
619 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
620 unsigned int cflags, bool mshot_finished,
621 unsigned issue_flags)
623 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
624 io_req_set_res(req, *ret, cflags);
629 if (!mshot_finished) {
630 if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
631 req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
632 io_recv_prep_retry(req);
635 /* Otherwise stop multishot but use the current result. */
638 io_req_set_res(req, *ret, cflags);
640 if (issue_flags & IO_URING_F_MULTISHOT)
641 *ret = IOU_STOP_MULTISHOT;
647 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
648 struct io_sr_msg *sr, void __user **buf,
651 unsigned long ubuf = (unsigned long) *buf;
654 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
659 if (kmsg->controllen) {
660 unsigned long control = ubuf + hdr - kmsg->controllen;
662 kmsg->msg.msg_control_user = (void __user *) control;
663 kmsg->msg.msg_controllen = kmsg->controllen;
666 sr->buf = *buf; /* stash for later copy */
667 *buf = (void __user *) (ubuf + hdr);
668 kmsg->payloadlen = *len = *len - hdr;
672 struct io_recvmsg_multishot_hdr {
673 struct io_uring_recvmsg_out msg;
674 struct sockaddr_storage addr;
677 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
678 struct io_async_msghdr *kmsg,
679 unsigned int flags, bool *finished)
683 struct io_recvmsg_multishot_hdr hdr;
686 kmsg->msg.msg_name = &hdr.addr;
687 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
688 kmsg->msg.msg_namelen = 0;
690 if (sock->file->f_flags & O_NONBLOCK)
691 flags |= MSG_DONTWAIT;
693 err = sock_recvmsg(sock, &kmsg->msg, flags);
694 *finished = err <= 0;
698 hdr.msg = (struct io_uring_recvmsg_out) {
699 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
700 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
703 hdr.msg.payloadlen = err;
704 if (err > kmsg->payloadlen)
705 err = kmsg->payloadlen;
707 copy_len = sizeof(struct io_uring_recvmsg_out);
708 if (kmsg->msg.msg_namelen > kmsg->namelen)
709 copy_len += kmsg->namelen;
711 copy_len += kmsg->msg.msg_namelen;
714 * "fromlen shall refer to the value before truncation.."
717 hdr.msg.namelen = kmsg->msg.msg_namelen;
719 /* ensure that there is no gap between hdr and sockaddr_storage */
720 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
721 sizeof(struct io_uring_recvmsg_out));
722 if (copy_to_user(io->buf, &hdr, copy_len)) {
727 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
728 kmsg->controllen + err;
731 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
733 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
734 struct io_async_msghdr iomsg, *kmsg;
738 int ret, min_ret = 0;
739 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
740 bool mshot_finished = true;
742 sock = sock_from_file(req->file);
746 if (req_has_async_data(req)) {
747 kmsg = req->async_data;
749 ret = io_recvmsg_copy_hdr(req, &iomsg);
755 if (!(req->flags & REQ_F_POLLED) &&
756 (sr->flags & IORING_RECVSEND_POLL_FIRST))
757 return io_setup_async_msg(req, kmsg, issue_flags);
759 if (!io_check_multishot(req, issue_flags))
760 return io_setup_async_msg(req, kmsg, issue_flags);
763 if (io_do_buffer_select(req)) {
765 size_t len = sr->len;
767 buf = io_buffer_select(req, &len, issue_flags);
771 if (req->flags & REQ_F_APOLL_MULTISHOT) {
772 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
774 io_kbuf_recycle(req, issue_flags);
779 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
782 flags = sr->msg_flags;
784 flags |= MSG_DONTWAIT;
785 if (flags & MSG_WAITALL)
786 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
788 kmsg->msg.msg_get_inq = 1;
789 if (req->flags & REQ_F_APOLL_MULTISHOT)
790 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
793 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
797 if (ret == -EAGAIN && force_nonblock) {
798 ret = io_setup_async_msg(req, kmsg, issue_flags);
799 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
800 io_kbuf_recycle(req, issue_flags);
801 return IOU_ISSUE_SKIP_COMPLETE;
805 if (ret > 0 && io_net_retry(sock, flags)) {
807 req->flags |= REQ_F_PARTIAL_IO;
808 return io_setup_async_msg(req, kmsg, issue_flags);
810 if (ret == -ERESTARTSYS)
813 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
819 else if (sr->done_io)
822 io_kbuf_recycle(req, issue_flags);
824 cflags = io_put_kbuf(req, issue_flags);
825 if (kmsg->msg.msg_inq)
826 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
828 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
829 goto retry_multishot;
831 if (mshot_finished) {
832 /* fast path, check for non-NULL to avoid function call */
834 kfree(kmsg->free_iov);
835 io_netmsg_recycle(req, issue_flags);
836 req->flags &= ~REQ_F_NEED_CLEANUP;
842 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
844 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
849 int ret, min_ret = 0;
850 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
851 size_t len = sr->len;
853 if (!(req->flags & REQ_F_POLLED) &&
854 (sr->flags & IORING_RECVSEND_POLL_FIRST))
857 if (!io_check_multishot(req, issue_flags))
860 sock = sock_from_file(req->file);
865 if (io_do_buffer_select(req)) {
868 buf = io_buffer_select(req, &len, issue_flags);
874 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
880 msg.msg_control = NULL;
883 msg.msg_controllen = 0;
887 flags = sr->msg_flags;
889 flags |= MSG_DONTWAIT;
890 if (flags & MSG_WAITALL)
891 min_ret = iov_iter_count(&msg.msg_iter);
893 ret = sock_recvmsg(sock, &msg, flags);
895 if (ret == -EAGAIN && force_nonblock) {
896 if (issue_flags & IO_URING_F_MULTISHOT) {
897 io_kbuf_recycle(req, issue_flags);
898 return IOU_ISSUE_SKIP_COMPLETE;
903 if (ret > 0 && io_net_retry(sock, flags)) {
907 req->flags |= REQ_F_PARTIAL_IO;
910 if (ret == -ERESTARTSYS)
913 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
920 else if (sr->done_io)
923 io_kbuf_recycle(req, issue_flags);
925 cflags = io_put_kbuf(req, issue_flags);
927 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
929 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
930 goto retry_multishot;
935 void io_send_zc_cleanup(struct io_kiocb *req)
937 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
938 struct io_async_msghdr *io;
940 if (req_has_async_data(req)) {
941 io = req->async_data;
942 /* might be ->fast_iov if *msg_copy_hdr failed */
943 if (io->free_iov != io->fast_iov)
947 io_notif_flush(zc->notif);
952 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
953 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
955 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
957 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
958 struct io_ring_ctx *ctx = req->ctx;
959 struct io_kiocb *notif;
961 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
963 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
964 if (req->flags & REQ_F_CQE_SKIP)
967 notif = zc->notif = io_alloc_notif(ctx);
970 notif->cqe.user_data = req->cqe.user_data;
972 notif->cqe.flags = IORING_CQE_F_NOTIF;
973 req->flags |= REQ_F_NEED_CLEANUP;
975 zc->flags = READ_ONCE(sqe->ioprio);
976 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
977 if (zc->flags & ~IO_ZC_FLAGS_VALID)
979 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
980 io_notif_set_extended(notif);
981 io_notif_to_data(notif)->zc_report = true;
985 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
986 unsigned idx = READ_ONCE(sqe->buf_index);
988 if (unlikely(idx >= ctx->nr_user_bufs))
990 idx = array_index_nospec(idx, ctx->nr_user_bufs);
991 req->imu = READ_ONCE(ctx->user_bufs[idx]);
992 io_req_set_rsrc_node(notif, ctx, 0);
995 if (req->opcode == IORING_OP_SEND_ZC) {
996 if (READ_ONCE(sqe->__pad3[0]))
998 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
999 zc->addr_len = READ_ONCE(sqe->addr_len);
1001 if (unlikely(sqe->addr2 || sqe->file_index))
1003 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1007 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1008 zc->len = READ_ONCE(sqe->len);
1009 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1010 if (zc->msg_flags & MSG_DONTWAIT)
1011 req->flags |= REQ_F_NOWAIT;
1015 #ifdef CONFIG_COMPAT
1016 if (req->ctx->compat)
1017 zc->msg_flags |= MSG_CMSG_COMPAT;
1022 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1023 struct iov_iter *from, size_t length)
1025 skb_zcopy_downgrade_managed(skb);
1026 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1029 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1030 struct iov_iter *from, size_t length)
1032 struct skb_shared_info *shinfo = skb_shinfo(skb);
1033 int frag = shinfo->nr_frags;
1035 struct bvec_iter bi;
1037 unsigned long truesize = 0;
1040 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1041 else if (unlikely(!skb_zcopy_managed(skb)))
1042 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1044 bi.bi_size = min(from->count, length);
1045 bi.bi_bvec_done = from->iov_offset;
1048 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1049 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1052 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1053 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1054 v.bv_offset, v.bv_len);
1055 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1060 shinfo->nr_frags = frag;
1061 from->bvec += bi.bi_idx;
1062 from->nr_segs -= bi.bi_idx;
1063 from->count -= copied;
1064 from->iov_offset = bi.bi_bvec_done;
1066 skb->data_len += copied;
1068 skb->truesize += truesize;
1070 if (sk && sk->sk_type == SOCK_STREAM) {
1071 sk_wmem_queued_add(sk, truesize);
1072 if (!skb_zcopy_pure(skb))
1073 sk_mem_charge(sk, truesize);
1075 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1080 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1082 struct sockaddr_storage __address;
1083 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1085 struct socket *sock;
1087 int ret, min_ret = 0;
1089 sock = sock_from_file(req->file);
1090 if (unlikely(!sock))
1092 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1095 msg.msg_name = NULL;
1096 msg.msg_control = NULL;
1097 msg.msg_controllen = 0;
1098 msg.msg_namelen = 0;
1101 if (req_has_async_data(req)) {
1102 struct io_async_msghdr *io = req->async_data;
1104 msg.msg_name = &io->addr;
1106 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1107 if (unlikely(ret < 0))
1109 msg.msg_name = (struct sockaddr *)&__address;
1111 msg.msg_namelen = zc->addr_len;
1114 if (!(req->flags & REQ_F_POLLED) &&
1115 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1116 return io_setup_async_addr(req, &__address, issue_flags);
1118 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1119 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1120 (u64)(uintptr_t)zc->buf, zc->len);
1123 msg.sg_from_iter = io_sg_from_iter;
1125 io_notif_set_extended(zc->notif);
1126 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1129 ret = io_notif_account_mem(zc->notif, zc->len);
1132 msg.sg_from_iter = io_sg_from_iter_iovec;
1135 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1136 if (issue_flags & IO_URING_F_NONBLOCK)
1137 msg_flags |= MSG_DONTWAIT;
1138 if (msg_flags & MSG_WAITALL)
1139 min_ret = iov_iter_count(&msg.msg_iter);
1140 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1142 msg.msg_flags = msg_flags;
1143 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1144 ret = sock_sendmsg(sock, &msg);
1146 if (unlikely(ret < min_ret)) {
1147 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1148 return io_setup_async_addr(req, &__address, issue_flags);
1150 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1154 req->flags |= REQ_F_PARTIAL_IO;
1155 return io_setup_async_addr(req, &__address, issue_flags);
1157 if (ret == -ERESTARTSYS)
1164 else if (zc->done_io)
1168 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1169 * flushing notif to io_send_zc_cleanup()
1171 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1172 io_notif_flush(zc->notif);
1173 req->flags &= ~REQ_F_NEED_CLEANUP;
1175 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1179 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1181 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1182 struct io_async_msghdr iomsg, *kmsg;
1183 struct socket *sock;
1185 int ret, min_ret = 0;
1187 io_notif_set_extended(sr->notif);
1189 sock = sock_from_file(req->file);
1190 if (unlikely(!sock))
1192 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1195 if (req_has_async_data(req)) {
1196 kmsg = req->async_data;
1198 ret = io_sendmsg_copy_hdr(req, &iomsg);
1204 if (!(req->flags & REQ_F_POLLED) &&
1205 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1206 return io_setup_async_msg(req, kmsg, issue_flags);
1208 flags = sr->msg_flags | MSG_ZEROCOPY;
1209 if (issue_flags & IO_URING_F_NONBLOCK)
1210 flags |= MSG_DONTWAIT;
1211 if (flags & MSG_WAITALL)
1212 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1214 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1215 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1216 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1218 if (unlikely(ret < min_ret)) {
1219 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1220 return io_setup_async_msg(req, kmsg, issue_flags);
1222 if (ret > 0 && io_net_retry(sock, flags)) {
1224 req->flags |= REQ_F_PARTIAL_IO;
1225 return io_setup_async_msg(req, kmsg, issue_flags);
1227 if (ret == -ERESTARTSYS)
1231 /* fast path, check for non-NULL to avoid function call */
1232 if (kmsg->free_iov) {
1233 kfree(kmsg->free_iov);
1234 kmsg->free_iov = NULL;
1237 io_netmsg_recycle(req, issue_flags);
1240 else if (sr->done_io)
1244 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1245 * flushing notif to io_send_zc_cleanup()
1247 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1248 io_notif_flush(sr->notif);
1249 req->flags &= ~REQ_F_NEED_CLEANUP;
1251 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1255 void io_sendrecv_fail(struct io_kiocb *req)
1257 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1259 if (req->flags & REQ_F_PARTIAL_IO)
1260 req->cqe.res = sr->done_io;
1262 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1263 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1264 req->cqe.flags |= IORING_CQE_F_MORE;
1267 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1269 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1272 if (sqe->len || sqe->buf_index)
1275 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1276 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1277 accept->flags = READ_ONCE(sqe->accept_flags);
1278 accept->nofile = rlimit(RLIMIT_NOFILE);
1279 flags = READ_ONCE(sqe->ioprio);
1280 if (flags & ~IORING_ACCEPT_MULTISHOT)
1283 accept->file_slot = READ_ONCE(sqe->file_index);
1284 if (accept->file_slot) {
1285 if (accept->flags & SOCK_CLOEXEC)
1287 if (flags & IORING_ACCEPT_MULTISHOT &&
1288 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1291 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1293 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1294 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1295 if (flags & IORING_ACCEPT_MULTISHOT)
1296 req->flags |= REQ_F_APOLL_MULTISHOT;
1300 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1302 struct io_ring_ctx *ctx = req->ctx;
1303 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1304 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1305 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1306 bool fixed = !!accept->file_slot;
1310 if (!io_check_multishot(req, issue_flags))
1314 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1315 if (unlikely(fd < 0))
1318 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1323 ret = PTR_ERR(file);
1324 if (ret == -EAGAIN && force_nonblock) {
1326 * if it's multishot and polled, we don't need to
1327 * return EAGAIN to arm the poll infra since it
1328 * has already been done
1330 if (issue_flags & IO_URING_F_MULTISHOT)
1331 ret = IOU_ISSUE_SKIP_COMPLETE;
1334 if (ret == -ERESTARTSYS)
1337 } else if (!fixed) {
1338 fd_install(fd, file);
1341 ret = io_fixed_fd_install(req, issue_flags, file,
1345 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1346 io_req_set_res(req, ret, 0);
1352 if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1353 req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1359 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1361 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1363 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1366 sock->domain = READ_ONCE(sqe->fd);
1367 sock->type = READ_ONCE(sqe->off);
1368 sock->protocol = READ_ONCE(sqe->len);
1369 sock->file_slot = READ_ONCE(sqe->file_index);
1370 sock->nofile = rlimit(RLIMIT_NOFILE);
1372 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1373 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1375 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1380 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1382 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1383 bool fixed = !!sock->file_slot;
1388 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1389 if (unlikely(fd < 0))
1392 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1396 ret = PTR_ERR(file);
1397 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1399 if (ret == -ERESTARTSYS)
1402 } else if (!fixed) {
1403 fd_install(fd, file);
1406 ret = io_fixed_fd_install(req, issue_flags, file,
1409 io_req_set_res(req, ret, 0);
1413 int io_connect_prep_async(struct io_kiocb *req)
1415 struct io_async_connect *io = req->async_data;
1416 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1418 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1421 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1423 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1425 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1428 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1429 conn->addr_len = READ_ONCE(sqe->addr2);
1430 conn->in_progress = conn->seen_econnaborted = false;
1434 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1436 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1437 struct io_async_connect __io, *io;
1438 unsigned file_flags;
1440 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1442 if (connect->in_progress) {
1443 struct socket *socket;
1446 socket = sock_from_file(req->file);
1448 ret = sock_error(socket->sk);
1452 if (req_has_async_data(req)) {
1453 io = req->async_data;
1455 ret = move_addr_to_kernel(connect->addr,
1463 file_flags = force_nonblock ? O_NONBLOCK : 0;
1465 ret = __sys_connect_file(req->file, &io->address,
1466 connect->addr_len, file_flags);
1467 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1468 && force_nonblock) {
1469 if (ret == -EINPROGRESS) {
1470 connect->in_progress = true;
1473 if (ret == -ECONNABORTED) {
1474 if (connect->seen_econnaborted)
1476 connect->seen_econnaborted = true;
1478 if (req_has_async_data(req))
1480 if (io_alloc_async_data(req)) {
1484 memcpy(req->async_data, &__io, sizeof(__io));
1487 if (ret == -ERESTARTSYS)
1492 io_req_set_res(req, ret, 0);
1496 void io_netmsg_cache_free(struct io_cache_entry *entry)
1498 kfree(container_of(entry, struct io_async_msghdr, cache));