1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
50 bool seen_econnaborted;
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
64 /* initialised and used only by !msg send variants */
68 void __user *msg_control;
69 /* used only for send zerocopy */
70 struct io_kiocb *notif;
73 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
75 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
77 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
78 sqe->buf_index || sqe->splice_fd_in))
81 shutdown->how = READ_ONCE(sqe->len);
85 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
87 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
91 if (issue_flags & IO_URING_F_NONBLOCK)
94 sock = sock_from_file(req->file);
98 ret = __sys_shutdown_sock(sock, shutdown->how);
99 io_req_set_res(req, ret, 0);
103 static bool io_net_retry(struct socket *sock, int flags)
105 if (!(flags & MSG_WAITALL))
107 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
110 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
112 struct io_async_msghdr *hdr = req->async_data;
114 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
117 /* Let normal cleanup path reap it if we fail adding to the cache */
118 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
119 req->async_data = NULL;
120 req->flags &= ~REQ_F_ASYNC_DATA;
124 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
125 unsigned int issue_flags)
127 struct io_ring_ctx *ctx = req->ctx;
128 struct io_cache_entry *entry;
129 struct io_async_msghdr *hdr;
131 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
132 entry = io_alloc_cache_get(&ctx->netmsg_cache);
134 hdr = container_of(entry, struct io_async_msghdr, cache);
135 hdr->free_iov = NULL;
136 req->flags |= REQ_F_ASYNC_DATA;
137 req->async_data = hdr;
142 if (!io_alloc_async_data(req)) {
143 hdr = req->async_data;
144 hdr->free_iov = NULL;
150 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
152 /* ->prep_async is always called from the submission context */
153 return io_msg_alloc_async(req, 0);
156 static int io_setup_async_msg(struct io_kiocb *req,
157 struct io_async_msghdr *kmsg,
158 unsigned int issue_flags)
160 struct io_async_msghdr *async_msg;
162 if (req_has_async_data(req))
164 async_msg = io_msg_alloc_async(req, issue_flags);
166 kfree(kmsg->free_iov);
169 req->flags |= REQ_F_NEED_CLEANUP;
170 memcpy(async_msg, kmsg, sizeof(*kmsg));
171 if (async_msg->msg.msg_name)
172 async_msg->msg.msg_name = &async_msg->addr;
174 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
177 /* if were using fast_iov, set it to the new one */
178 if (!kmsg->free_iov) {
179 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
180 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
186 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
187 struct io_async_msghdr *iomsg)
189 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
192 iomsg->msg.msg_name = &iomsg->addr;
193 iomsg->free_iov = iomsg->fast_iov;
194 ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
196 /* save msg_control as sys_sendmsg() overwrites it */
197 sr->msg_control = iomsg->msg.msg_control_user;
201 int io_send_prep_async(struct io_kiocb *req)
203 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
204 struct io_async_msghdr *io;
207 if (!zc->addr || req_has_async_data(req))
209 io = io_msg_alloc_async_prep(req);
212 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
216 static int io_setup_async_addr(struct io_kiocb *req,
217 struct sockaddr_storage *addr_storage,
218 unsigned int issue_flags)
220 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
221 struct io_async_msghdr *io;
223 if (!sr->addr || req_has_async_data(req))
225 io = io_msg_alloc_async(req, issue_flags);
228 memcpy(&io->addr, addr_storage, sizeof(io->addr));
232 int io_sendmsg_prep_async(struct io_kiocb *req)
236 if (!io_msg_alloc_async_prep(req))
238 ret = io_sendmsg_copy_hdr(req, req->async_data);
240 req->flags |= REQ_F_NEED_CLEANUP;
244 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
246 struct io_async_msghdr *io = req->async_data;
251 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
253 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
255 if (req->opcode == IORING_OP_SEND) {
256 if (READ_ONCE(sqe->__pad3[0]))
258 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
259 sr->addr_len = READ_ONCE(sqe->addr_len);
260 } else if (sqe->addr2 || sqe->file_index) {
264 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
265 sr->len = READ_ONCE(sqe->len);
266 sr->flags = READ_ONCE(sqe->ioprio);
267 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
269 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
270 if (sr->msg_flags & MSG_DONTWAIT)
271 req->flags |= REQ_F_NOWAIT;
274 if (req->ctx->compat)
275 sr->msg_flags |= MSG_CMSG_COMPAT;
281 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
283 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
284 struct io_async_msghdr iomsg, *kmsg;
290 sock = sock_from_file(req->file);
294 if (req_has_async_data(req)) {
295 kmsg = req->async_data;
296 kmsg->msg.msg_control_user = sr->msg_control;
298 ret = io_sendmsg_copy_hdr(req, &iomsg);
304 if (!(req->flags & REQ_F_POLLED) &&
305 (sr->flags & IORING_RECVSEND_POLL_FIRST))
306 return io_setup_async_msg(req, kmsg, issue_flags);
308 flags = sr->msg_flags;
309 if (issue_flags & IO_URING_F_NONBLOCK)
310 flags |= MSG_DONTWAIT;
311 if (flags & MSG_WAITALL)
312 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
314 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
317 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
318 return io_setup_async_msg(req, kmsg, issue_flags);
319 if (ret > 0 && io_net_retry(sock, flags)) {
320 kmsg->msg.msg_controllen = 0;
321 kmsg->msg.msg_control = NULL;
323 req->flags |= REQ_F_PARTIAL_IO;
324 return io_setup_async_msg(req, kmsg, issue_flags);
326 if (ret == -ERESTARTSYS)
330 /* fast path, check for non-NULL to avoid function call */
332 kfree(kmsg->free_iov);
333 req->flags &= ~REQ_F_NEED_CLEANUP;
334 io_netmsg_recycle(req, issue_flags);
337 else if (sr->done_io)
339 io_req_set_res(req, ret, 0);
343 int io_send(struct io_kiocb *req, unsigned int issue_flags)
345 struct sockaddr_storage __address;
346 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
355 msg.msg_control = NULL;
356 msg.msg_controllen = 0;
361 if (req_has_async_data(req)) {
362 struct io_async_msghdr *io = req->async_data;
364 msg.msg_name = &io->addr;
366 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
367 if (unlikely(ret < 0))
369 msg.msg_name = (struct sockaddr *)&__address;
371 msg.msg_namelen = sr->addr_len;
374 if (!(req->flags & REQ_F_POLLED) &&
375 (sr->flags & IORING_RECVSEND_POLL_FIRST))
376 return io_setup_async_addr(req, &__address, issue_flags);
378 sock = sock_from_file(req->file);
382 ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
386 flags = sr->msg_flags;
387 if (issue_flags & IO_URING_F_NONBLOCK)
388 flags |= MSG_DONTWAIT;
389 if (flags & MSG_WAITALL)
390 min_ret = iov_iter_count(&msg.msg_iter);
392 msg.msg_flags = flags;
393 ret = sock_sendmsg(sock, &msg);
395 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
396 return io_setup_async_addr(req, &__address, issue_flags);
398 if (ret > 0 && io_net_retry(sock, flags)) {
402 req->flags |= REQ_F_PARTIAL_IO;
403 return io_setup_async_addr(req, &__address, issue_flags);
405 if (ret == -ERESTARTSYS)
411 else if (sr->done_io)
413 io_req_set_res(req, ret, 0);
417 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
421 if (iomsg->namelen < 0)
423 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
424 iomsg->namelen, &hdr))
426 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
432 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
433 struct io_async_msghdr *iomsg)
435 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
436 struct user_msghdr msg;
439 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
442 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
446 if (req->flags & REQ_F_BUFFER_SELECT) {
447 if (msg.msg_iovlen == 0) {
448 sr->len = iomsg->fast_iov[0].iov_len = 0;
449 iomsg->fast_iov[0].iov_base = NULL;
450 iomsg->free_iov = NULL;
451 } else if (msg.msg_iovlen > 1) {
454 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
456 sr->len = iomsg->fast_iov[0].iov_len;
457 iomsg->free_iov = NULL;
460 if (req->flags & REQ_F_APOLL_MULTISHOT) {
461 iomsg->namelen = msg.msg_namelen;
462 iomsg->controllen = msg.msg_controllen;
463 if (io_recvmsg_multishot_overflow(iomsg))
467 iomsg->free_iov = iomsg->fast_iov;
468 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
469 &iomsg->free_iov, &iomsg->msg.msg_iter,
479 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
480 struct io_async_msghdr *iomsg)
482 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
483 struct compat_msghdr msg;
484 struct compat_iovec __user *uiov;
487 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
490 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
494 uiov = compat_ptr(msg.msg_iov);
495 if (req->flags & REQ_F_BUFFER_SELECT) {
498 iomsg->free_iov = NULL;
499 if (msg.msg_iovlen == 0) {
501 } else if (msg.msg_iovlen > 1) {
504 if (!access_ok(uiov, sizeof(*uiov)))
506 if (__get_user(clen, &uiov->iov_len))
513 if (req->flags & REQ_F_APOLL_MULTISHOT) {
514 iomsg->namelen = msg.msg_namelen;
515 iomsg->controllen = msg.msg_controllen;
516 if (io_recvmsg_multishot_overflow(iomsg))
520 iomsg->free_iov = iomsg->fast_iov;
521 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
522 UIO_FASTIOV, &iomsg->free_iov,
523 &iomsg->msg.msg_iter, true);
532 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
533 struct io_async_msghdr *iomsg)
535 iomsg->msg.msg_name = &iomsg->addr;
536 iomsg->msg.msg_iter.nr_segs = 0;
539 if (req->ctx->compat)
540 return __io_compat_recvmsg_copy_hdr(req, iomsg);
543 return __io_recvmsg_copy_hdr(req, iomsg);
546 int io_recvmsg_prep_async(struct io_kiocb *req)
550 if (!io_msg_alloc_async_prep(req))
552 ret = io_recvmsg_copy_hdr(req, req->async_data);
554 req->flags |= REQ_F_NEED_CLEANUP;
558 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
560 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
562 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
564 if (unlikely(sqe->file_index || sqe->addr2))
567 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
568 sr->len = READ_ONCE(sqe->len);
569 sr->flags = READ_ONCE(sqe->ioprio);
570 if (sr->flags & ~(RECVMSG_FLAGS))
572 sr->msg_flags = READ_ONCE(sqe->msg_flags);
573 if (sr->msg_flags & MSG_DONTWAIT)
574 req->flags |= REQ_F_NOWAIT;
575 if (sr->msg_flags & MSG_ERRQUEUE)
576 req->flags |= REQ_F_CLEAR_POLLIN;
577 if (sr->flags & IORING_RECV_MULTISHOT) {
578 if (!(req->flags & REQ_F_BUFFER_SELECT))
580 if (sr->msg_flags & MSG_WAITALL)
582 if (req->opcode == IORING_OP_RECV && sr->len)
584 req->flags |= REQ_F_APOLL_MULTISHOT;
586 * Store the buffer group for this multishot receive separately,
587 * as if we end up doing an io-wq based issue that selects a
588 * buffer, it has to be committed immediately and that will
589 * clear ->buf_list. This means we lose the link to the buffer
590 * list, and the eventual buffer put on completion then cannot
593 sr->buf_group = req->buf_index;
597 if (req->ctx->compat)
598 sr->msg_flags |= MSG_CMSG_COMPAT;
604 static inline void io_recv_prep_retry(struct io_kiocb *req)
606 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
609 sr->len = 0; /* get from the provided buffer */
610 req->buf_index = sr->buf_group;
614 * Finishes io_recv and io_recvmsg.
616 * Returns true if it is actually finished, or false if it should run
617 * again (for multishot).
619 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
620 unsigned int cflags, bool mshot_finished,
621 unsigned issue_flags)
623 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
624 io_req_set_res(req, *ret, cflags);
629 if (!mshot_finished) {
630 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
631 cflags | IORING_CQE_F_MORE, false)) {
632 io_recv_prep_retry(req);
636 * Otherwise stop multishot but use the current result.
637 * Probably will end up going into overflow, but this means
638 * we cannot trust the ordering anymore
642 io_req_set_res(req, *ret, cflags);
644 if (issue_flags & IO_URING_F_MULTISHOT)
645 *ret = IOU_STOP_MULTISHOT;
651 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
652 struct io_sr_msg *sr, void __user **buf,
655 unsigned long ubuf = (unsigned long) *buf;
658 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
663 if (kmsg->controllen) {
664 unsigned long control = ubuf + hdr - kmsg->controllen;
666 kmsg->msg.msg_control_user = (void __user *) control;
667 kmsg->msg.msg_controllen = kmsg->controllen;
670 sr->buf = *buf; /* stash for later copy */
671 *buf = (void __user *) (ubuf + hdr);
672 kmsg->payloadlen = *len = *len - hdr;
676 struct io_recvmsg_multishot_hdr {
677 struct io_uring_recvmsg_out msg;
678 struct sockaddr_storage addr;
681 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
682 struct io_async_msghdr *kmsg,
683 unsigned int flags, bool *finished)
687 struct io_recvmsg_multishot_hdr hdr;
690 kmsg->msg.msg_name = &hdr.addr;
691 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
692 kmsg->msg.msg_namelen = 0;
694 if (sock->file->f_flags & O_NONBLOCK)
695 flags |= MSG_DONTWAIT;
697 err = sock_recvmsg(sock, &kmsg->msg, flags);
698 *finished = err <= 0;
702 hdr.msg = (struct io_uring_recvmsg_out) {
703 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
704 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
707 hdr.msg.payloadlen = err;
708 if (err > kmsg->payloadlen)
709 err = kmsg->payloadlen;
711 copy_len = sizeof(struct io_uring_recvmsg_out);
712 if (kmsg->msg.msg_namelen > kmsg->namelen)
713 copy_len += kmsg->namelen;
715 copy_len += kmsg->msg.msg_namelen;
718 * "fromlen shall refer to the value before truncation.."
721 hdr.msg.namelen = kmsg->msg.msg_namelen;
723 /* ensure that there is no gap between hdr and sockaddr_storage */
724 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
725 sizeof(struct io_uring_recvmsg_out));
726 if (copy_to_user(io->buf, &hdr, copy_len)) {
731 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
732 kmsg->controllen + err;
735 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
737 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
738 struct io_async_msghdr iomsg, *kmsg;
742 int ret, min_ret = 0;
743 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
744 bool mshot_finished = true;
746 sock = sock_from_file(req->file);
750 if (req_has_async_data(req)) {
751 kmsg = req->async_data;
753 ret = io_recvmsg_copy_hdr(req, &iomsg);
759 if (!(req->flags & REQ_F_POLLED) &&
760 (sr->flags & IORING_RECVSEND_POLL_FIRST))
761 return io_setup_async_msg(req, kmsg, issue_flags);
764 if (io_do_buffer_select(req)) {
766 size_t len = sr->len;
768 buf = io_buffer_select(req, &len, issue_flags);
772 if (req->flags & REQ_F_APOLL_MULTISHOT) {
773 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
775 io_kbuf_recycle(req, issue_flags);
780 kmsg->fast_iov[0].iov_base = buf;
781 kmsg->fast_iov[0].iov_len = len;
782 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
786 flags = sr->msg_flags;
788 flags |= MSG_DONTWAIT;
790 kmsg->msg.msg_get_inq = 1;
791 if (req->flags & REQ_F_APOLL_MULTISHOT) {
792 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
795 /* disable partial retry for recvmsg with cmsg attached */
796 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
797 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
799 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
804 if (ret == -EAGAIN && force_nonblock) {
805 ret = io_setup_async_msg(req, kmsg, issue_flags);
806 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
807 io_kbuf_recycle(req, issue_flags);
808 return IOU_ISSUE_SKIP_COMPLETE;
812 if (ret > 0 && io_net_retry(sock, flags)) {
814 req->flags |= REQ_F_PARTIAL_IO;
815 return io_setup_async_msg(req, kmsg, issue_flags);
817 if (ret == -ERESTARTSYS)
820 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
826 else if (sr->done_io)
829 io_kbuf_recycle(req, issue_flags);
831 cflags = io_put_kbuf(req, issue_flags);
832 if (kmsg->msg.msg_inq)
833 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
835 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
836 goto retry_multishot;
838 if (mshot_finished) {
839 /* fast path, check for non-NULL to avoid function call */
841 kfree(kmsg->free_iov);
842 io_netmsg_recycle(req, issue_flags);
843 req->flags &= ~REQ_F_NEED_CLEANUP;
849 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
851 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
857 int ret, min_ret = 0;
858 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
859 size_t len = sr->len;
861 if (!(req->flags & REQ_F_POLLED) &&
862 (sr->flags & IORING_RECVSEND_POLL_FIRST))
865 sock = sock_from_file(req->file);
870 if (io_do_buffer_select(req)) {
873 buf = io_buffer_select(req, &len, issue_flags);
879 ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
885 msg.msg_control = NULL;
888 msg.msg_controllen = 0;
892 flags = sr->msg_flags;
894 flags |= MSG_DONTWAIT;
895 if (flags & MSG_WAITALL)
896 min_ret = iov_iter_count(&msg.msg_iter);
898 ret = sock_recvmsg(sock, &msg, flags);
900 if (ret == -EAGAIN && force_nonblock) {
901 if (issue_flags & IO_URING_F_MULTISHOT) {
902 io_kbuf_recycle(req, issue_flags);
903 return IOU_ISSUE_SKIP_COMPLETE;
908 if (ret > 0 && io_net_retry(sock, flags)) {
912 req->flags |= REQ_F_PARTIAL_IO;
915 if (ret == -ERESTARTSYS)
918 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
925 else if (sr->done_io)
928 io_kbuf_recycle(req, issue_flags);
930 cflags = io_put_kbuf(req, issue_flags);
932 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
934 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
935 goto retry_multishot;
940 void io_send_zc_cleanup(struct io_kiocb *req)
942 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
943 struct io_async_msghdr *io;
945 if (req_has_async_data(req)) {
946 io = req->async_data;
947 /* might be ->fast_iov if *msg_copy_hdr failed */
948 if (io->free_iov != io->fast_iov)
952 io_notif_flush(zc->notif);
957 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
959 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
960 struct io_ring_ctx *ctx = req->ctx;
961 struct io_kiocb *notif;
963 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
965 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
966 if (req->flags & REQ_F_CQE_SKIP)
969 zc->flags = READ_ONCE(sqe->ioprio);
970 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
971 IORING_RECVSEND_FIXED_BUF |
972 IORING_SEND_ZC_REPORT_USAGE))
974 notif = zc->notif = io_alloc_notif(ctx);
977 notif->cqe.user_data = req->cqe.user_data;
979 notif->cqe.flags = IORING_CQE_F_NOTIF;
980 req->flags |= REQ_F_NEED_CLEANUP;
981 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
982 unsigned idx = READ_ONCE(sqe->buf_index);
984 if (unlikely(idx >= ctx->nr_user_bufs))
986 idx = array_index_nospec(idx, ctx->nr_user_bufs);
987 req->imu = READ_ONCE(ctx->user_bufs[idx]);
988 io_req_set_rsrc_node(notif, ctx, 0);
990 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
991 io_notif_to_data(notif)->zc_report = true;
994 if (req->opcode == IORING_OP_SEND_ZC) {
995 if (READ_ONCE(sqe->__pad3[0]))
997 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
998 zc->addr_len = READ_ONCE(sqe->addr_len);
1000 if (unlikely(sqe->addr2 || sqe->file_index))
1002 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1006 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1007 zc->len = READ_ONCE(sqe->len);
1008 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1009 if (zc->msg_flags & MSG_DONTWAIT)
1010 req->flags |= REQ_F_NOWAIT;
1014 #ifdef CONFIG_COMPAT
1015 if (req->ctx->compat)
1016 zc->msg_flags |= MSG_CMSG_COMPAT;
1021 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1022 struct iov_iter *from, size_t length)
1024 skb_zcopy_downgrade_managed(skb);
1025 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1028 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1029 struct iov_iter *from, size_t length)
1031 struct skb_shared_info *shinfo = skb_shinfo(skb);
1032 int frag = shinfo->nr_frags;
1034 struct bvec_iter bi;
1036 unsigned long truesize = 0;
1039 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1040 else if (unlikely(!skb_zcopy_managed(skb)))
1041 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1043 bi.bi_size = min(from->count, length);
1044 bi.bi_bvec_done = from->iov_offset;
1047 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1048 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1051 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1052 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1053 v.bv_offset, v.bv_len);
1054 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1059 shinfo->nr_frags = frag;
1060 from->bvec += bi.bi_idx;
1061 from->nr_segs -= bi.bi_idx;
1062 from->count -= copied;
1063 from->iov_offset = bi.bi_bvec_done;
1065 skb->data_len += copied;
1067 skb->truesize += truesize;
1069 if (sk && sk->sk_type == SOCK_STREAM) {
1070 sk_wmem_queued_add(sk, truesize);
1071 if (!skb_zcopy_pure(skb))
1072 sk_mem_charge(sk, truesize);
1074 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1079 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1081 struct sockaddr_storage __address;
1082 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1085 struct socket *sock;
1087 int ret, min_ret = 0;
1089 sock = sock_from_file(req->file);
1090 if (unlikely(!sock))
1092 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1095 msg.msg_name = NULL;
1096 msg.msg_control = NULL;
1097 msg.msg_controllen = 0;
1098 msg.msg_namelen = 0;
1101 if (req_has_async_data(req)) {
1102 struct io_async_msghdr *io = req->async_data;
1104 msg.msg_name = &io->addr;
1106 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1107 if (unlikely(ret < 0))
1109 msg.msg_name = (struct sockaddr *)&__address;
1111 msg.msg_namelen = zc->addr_len;
1114 if (!(req->flags & REQ_F_POLLED) &&
1115 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1116 return io_setup_async_addr(req, &__address, issue_flags);
1118 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1119 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1120 (u64)(uintptr_t)zc->buf, zc->len);
1123 msg.sg_from_iter = io_sg_from_iter;
1125 ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1129 ret = io_notif_account_mem(zc->notif, zc->len);
1132 msg.sg_from_iter = io_sg_from_iter_iovec;
1135 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1136 if (issue_flags & IO_URING_F_NONBLOCK)
1137 msg_flags |= MSG_DONTWAIT;
1138 if (msg_flags & MSG_WAITALL)
1139 min_ret = iov_iter_count(&msg.msg_iter);
1141 msg.msg_flags = msg_flags;
1142 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1143 ret = sock_sendmsg(sock, &msg);
1145 if (unlikely(ret < min_ret)) {
1146 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1147 return io_setup_async_addr(req, &__address, issue_flags);
1149 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1153 req->flags |= REQ_F_PARTIAL_IO;
1154 return io_setup_async_addr(req, &__address, issue_flags);
1156 if (ret == -ERESTARTSYS)
1163 else if (zc->done_io)
1167 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1168 * flushing notif to io_send_zc_cleanup()
1170 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1171 io_notif_flush(zc->notif);
1172 req->flags &= ~REQ_F_NEED_CLEANUP;
1174 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1178 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1180 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1181 struct io_async_msghdr iomsg, *kmsg;
1182 struct socket *sock;
1184 int ret, min_ret = 0;
1186 sock = sock_from_file(req->file);
1187 if (unlikely(!sock))
1189 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1192 if (req_has_async_data(req)) {
1193 kmsg = req->async_data;
1195 ret = io_sendmsg_copy_hdr(req, &iomsg);
1201 if (!(req->flags & REQ_F_POLLED) &&
1202 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1203 return io_setup_async_msg(req, kmsg, issue_flags);
1205 flags = sr->msg_flags | MSG_ZEROCOPY;
1206 if (issue_flags & IO_URING_F_NONBLOCK)
1207 flags |= MSG_DONTWAIT;
1208 if (flags & MSG_WAITALL)
1209 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1211 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1212 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1213 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1215 if (unlikely(ret < min_ret)) {
1216 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1217 return io_setup_async_msg(req, kmsg, issue_flags);
1219 if (ret > 0 && io_net_retry(sock, flags)) {
1221 req->flags |= REQ_F_PARTIAL_IO;
1222 return io_setup_async_msg(req, kmsg, issue_flags);
1224 if (ret == -ERESTARTSYS)
1228 /* fast path, check for non-NULL to avoid function call */
1229 if (kmsg->free_iov) {
1230 kfree(kmsg->free_iov);
1231 kmsg->free_iov = NULL;
1234 io_netmsg_recycle(req, issue_flags);
1237 else if (sr->done_io)
1241 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1242 * flushing notif to io_send_zc_cleanup()
1244 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1245 io_notif_flush(sr->notif);
1246 req->flags &= ~REQ_F_NEED_CLEANUP;
1248 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1252 void io_sendrecv_fail(struct io_kiocb *req)
1254 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1256 if (req->flags & REQ_F_PARTIAL_IO)
1257 req->cqe.res = sr->done_io;
1259 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1260 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1261 req->cqe.flags |= IORING_CQE_F_MORE;
1264 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1266 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1269 if (sqe->len || sqe->buf_index)
1272 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1273 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1274 accept->flags = READ_ONCE(sqe->accept_flags);
1275 accept->nofile = rlimit(RLIMIT_NOFILE);
1276 flags = READ_ONCE(sqe->ioprio);
1277 if (flags & ~IORING_ACCEPT_MULTISHOT)
1280 accept->file_slot = READ_ONCE(sqe->file_index);
1281 if (accept->file_slot) {
1282 if (accept->flags & SOCK_CLOEXEC)
1284 if (flags & IORING_ACCEPT_MULTISHOT &&
1285 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1288 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1290 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1291 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1292 if (flags & IORING_ACCEPT_MULTISHOT)
1293 req->flags |= REQ_F_APOLL_MULTISHOT;
1297 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1299 struct io_ring_ctx *ctx = req->ctx;
1300 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1301 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1302 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1303 bool fixed = !!accept->file_slot;
1309 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1310 if (unlikely(fd < 0))
1313 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1318 ret = PTR_ERR(file);
1319 if (ret == -EAGAIN && force_nonblock) {
1321 * if it's multishot and polled, we don't need to
1322 * return EAGAIN to arm the poll infra since it
1323 * has already been done
1325 if (issue_flags & IO_URING_F_MULTISHOT)
1326 ret = IOU_ISSUE_SKIP_COMPLETE;
1329 if (ret == -ERESTARTSYS)
1332 } else if (!fixed) {
1333 fd_install(fd, file);
1336 ret = io_fixed_fd_install(req, issue_flags, file,
1340 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1341 io_req_set_res(req, ret, 0);
1347 if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1353 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1355 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1357 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1360 sock->domain = READ_ONCE(sqe->fd);
1361 sock->type = READ_ONCE(sqe->off);
1362 sock->protocol = READ_ONCE(sqe->len);
1363 sock->file_slot = READ_ONCE(sqe->file_index);
1364 sock->nofile = rlimit(RLIMIT_NOFILE);
1366 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1367 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1369 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1374 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1376 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1377 bool fixed = !!sock->file_slot;
1382 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1383 if (unlikely(fd < 0))
1386 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1390 ret = PTR_ERR(file);
1391 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1393 if (ret == -ERESTARTSYS)
1396 } else if (!fixed) {
1397 fd_install(fd, file);
1400 ret = io_fixed_fd_install(req, issue_flags, file,
1403 io_req_set_res(req, ret, 0);
1407 int io_connect_prep_async(struct io_kiocb *req)
1409 struct io_async_connect *io = req->async_data;
1410 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1412 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1415 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1417 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1419 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1422 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1423 conn->addr_len = READ_ONCE(sqe->addr2);
1424 conn->in_progress = conn->seen_econnaborted = false;
1428 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1430 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1431 struct io_async_connect __io, *io;
1432 unsigned file_flags;
1434 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1436 if (connect->in_progress) {
1437 struct socket *socket;
1440 socket = sock_from_file(req->file);
1442 ret = sock_error(socket->sk);
1446 if (req_has_async_data(req)) {
1447 io = req->async_data;
1449 ret = move_addr_to_kernel(connect->addr,
1457 file_flags = force_nonblock ? O_NONBLOCK : 0;
1459 ret = __sys_connect_file(req->file, &io->address,
1460 connect->addr_len, file_flags);
1461 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1462 && force_nonblock) {
1463 if (ret == -EINPROGRESS) {
1464 connect->in_progress = true;
1467 if (ret == -ECONNABORTED) {
1468 if (connect->seen_econnaborted)
1470 connect->seen_econnaborted = true;
1472 if (req_has_async_data(req))
1474 if (io_alloc_async_data(req)) {
1478 memcpy(req->async_data, &__io, sizeof(__io));
1481 if (ret == -ERESTARTSYS)
1486 io_req_set_res(req, ret, 0);
1490 void io_netmsg_cache_free(struct io_cache_entry *entry)
1492 kfree(container_of(entry, struct io_async_msghdr, cache));