1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
50 bool seen_econnaborted;
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
64 /* initialised and used only by !msg send variants */
68 void __user *msg_control;
69 /* used only for send zerocopy */
70 struct io_kiocb *notif;
73 static inline bool io_check_multishot(struct io_kiocb *req,
74 unsigned int issue_flags)
77 * When ->locked_cq is set we only allow to post CQEs from the original
78 * task context. Usual request completions will be handled in other
79 * generic paths but multipoll may decide to post extra cqes.
81 return !(issue_flags & IO_URING_F_IOWQ) ||
82 !(issue_flags & IO_URING_F_MULTISHOT) ||
83 !req->ctx->task_complete;
86 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
88 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
90 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
91 sqe->buf_index || sqe->splice_fd_in))
94 shutdown->how = READ_ONCE(sqe->len);
95 req->flags |= REQ_F_FORCE_ASYNC;
99 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
101 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
107 sock = sock_from_file(req->file);
111 ret = __sys_shutdown_sock(sock, shutdown->how);
112 io_req_set_res(req, ret, 0);
116 static bool io_net_retry(struct socket *sock, int flags)
118 if (!(flags & MSG_WAITALL))
120 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
123 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
125 struct io_async_msghdr *hdr = req->async_data;
127 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
130 /* Let normal cleanup path reap it if we fail adding to the cache */
131 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
132 req->async_data = NULL;
133 req->flags &= ~REQ_F_ASYNC_DATA;
137 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
138 unsigned int issue_flags)
140 struct io_ring_ctx *ctx = req->ctx;
141 struct io_cache_entry *entry;
142 struct io_async_msghdr *hdr;
144 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
145 entry = io_alloc_cache_get(&ctx->netmsg_cache);
147 hdr = container_of(entry, struct io_async_msghdr, cache);
148 hdr->free_iov = NULL;
149 req->flags |= REQ_F_ASYNC_DATA;
150 req->async_data = hdr;
155 if (!io_alloc_async_data(req)) {
156 hdr = req->async_data;
157 hdr->free_iov = NULL;
163 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
165 /* ->prep_async is always called from the submission context */
166 return io_msg_alloc_async(req, 0);
169 static int io_setup_async_msg(struct io_kiocb *req,
170 struct io_async_msghdr *kmsg,
171 unsigned int issue_flags)
173 struct io_async_msghdr *async_msg;
175 if (req_has_async_data(req))
177 async_msg = io_msg_alloc_async(req, issue_flags);
179 kfree(kmsg->free_iov);
182 req->flags |= REQ_F_NEED_CLEANUP;
183 memcpy(async_msg, kmsg, sizeof(*kmsg));
184 if (async_msg->msg.msg_name)
185 async_msg->msg.msg_name = &async_msg->addr;
186 /* if were using fast_iov, set it to the new one */
187 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
188 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
189 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
195 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
196 struct io_async_msghdr *iomsg)
198 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
201 iomsg->msg.msg_name = &iomsg->addr;
202 iomsg->free_iov = iomsg->fast_iov;
203 ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
205 /* save msg_control as sys_sendmsg() overwrites it */
206 sr->msg_control = iomsg->msg.msg_control;
210 int io_send_prep_async(struct io_kiocb *req)
212 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
213 struct io_async_msghdr *io;
216 if (!zc->addr || req_has_async_data(req))
218 io = io_msg_alloc_async_prep(req);
221 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
225 static int io_setup_async_addr(struct io_kiocb *req,
226 struct sockaddr_storage *addr_storage,
227 unsigned int issue_flags)
229 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
230 struct io_async_msghdr *io;
232 if (!sr->addr || req_has_async_data(req))
234 io = io_msg_alloc_async(req, issue_flags);
237 memcpy(&io->addr, addr_storage, sizeof(io->addr));
241 int io_sendmsg_prep_async(struct io_kiocb *req)
245 if (!io_msg_alloc_async_prep(req))
247 ret = io_sendmsg_copy_hdr(req, req->async_data);
249 req->flags |= REQ_F_NEED_CLEANUP;
253 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
255 struct io_async_msghdr *io = req->async_data;
260 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
262 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
264 if (req->opcode == IORING_OP_SEND) {
265 if (READ_ONCE(sqe->__pad3[0]))
267 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
268 sr->addr_len = READ_ONCE(sqe->addr_len);
269 } else if (sqe->addr2 || sqe->file_index) {
273 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
274 sr->len = READ_ONCE(sqe->len);
275 sr->flags = READ_ONCE(sqe->ioprio);
276 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
278 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
279 if (sr->msg_flags & MSG_DONTWAIT)
280 req->flags |= REQ_F_NOWAIT;
283 if (req->ctx->compat)
284 sr->msg_flags |= MSG_CMSG_COMPAT;
290 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
292 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
293 struct io_async_msghdr iomsg, *kmsg;
299 sock = sock_from_file(req->file);
303 if (req_has_async_data(req)) {
304 kmsg = req->async_data;
305 kmsg->msg.msg_control = sr->msg_control;
307 ret = io_sendmsg_copy_hdr(req, &iomsg);
313 if (!(req->flags & REQ_F_POLLED) &&
314 (sr->flags & IORING_RECVSEND_POLL_FIRST))
315 return io_setup_async_msg(req, kmsg, issue_flags);
317 flags = sr->msg_flags;
318 if (issue_flags & IO_URING_F_NONBLOCK)
319 flags |= MSG_DONTWAIT;
320 if (flags & MSG_WAITALL)
321 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
323 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
326 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
327 return io_setup_async_msg(req, kmsg, issue_flags);
328 if (ret > 0 && io_net_retry(sock, flags)) {
330 req->flags |= REQ_F_PARTIAL_IO;
331 return io_setup_async_msg(req, kmsg, issue_flags);
333 if (ret == -ERESTARTSYS)
337 /* fast path, check for non-NULL to avoid function call */
339 kfree(kmsg->free_iov);
340 req->flags &= ~REQ_F_NEED_CLEANUP;
341 io_netmsg_recycle(req, issue_flags);
344 else if (sr->done_io)
346 io_req_set_res(req, ret, 0);
350 int io_send(struct io_kiocb *req, unsigned int issue_flags)
352 struct sockaddr_storage __address;
353 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
361 msg.msg_control = NULL;
362 msg.msg_controllen = 0;
367 if (req_has_async_data(req)) {
368 struct io_async_msghdr *io = req->async_data;
370 msg.msg_name = &io->addr;
372 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
373 if (unlikely(ret < 0))
375 msg.msg_name = (struct sockaddr *)&__address;
377 msg.msg_namelen = sr->addr_len;
380 if (!(req->flags & REQ_F_POLLED) &&
381 (sr->flags & IORING_RECVSEND_POLL_FIRST))
382 return io_setup_async_addr(req, &__address, issue_flags);
384 sock = sock_from_file(req->file);
388 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
392 flags = sr->msg_flags;
393 if (issue_flags & IO_URING_F_NONBLOCK)
394 flags |= MSG_DONTWAIT;
395 if (flags & MSG_WAITALL)
396 min_ret = iov_iter_count(&msg.msg_iter);
398 msg.msg_flags = flags;
399 ret = sock_sendmsg(sock, &msg);
401 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
402 return io_setup_async_addr(req, &__address, issue_flags);
404 if (ret > 0 && io_net_retry(sock, flags)) {
408 req->flags |= REQ_F_PARTIAL_IO;
409 return io_setup_async_addr(req, &__address, issue_flags);
411 if (ret == -ERESTARTSYS)
417 else if (sr->done_io)
419 io_req_set_res(req, ret, 0);
423 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
427 if (iomsg->namelen < 0)
429 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
430 iomsg->namelen, &hdr))
432 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
438 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
439 struct io_async_msghdr *iomsg)
441 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
442 struct user_msghdr msg;
445 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
448 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
452 if (req->flags & REQ_F_BUFFER_SELECT) {
453 if (msg.msg_iovlen == 0) {
454 sr->len = iomsg->fast_iov[0].iov_len = 0;
455 iomsg->fast_iov[0].iov_base = NULL;
456 iomsg->free_iov = NULL;
457 } else if (msg.msg_iovlen > 1) {
460 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
462 sr->len = iomsg->fast_iov[0].iov_len;
463 iomsg->free_iov = NULL;
466 if (req->flags & REQ_F_APOLL_MULTISHOT) {
467 iomsg->namelen = msg.msg_namelen;
468 iomsg->controllen = msg.msg_controllen;
469 if (io_recvmsg_multishot_overflow(iomsg))
473 iomsg->free_iov = iomsg->fast_iov;
474 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
475 &iomsg->free_iov, &iomsg->msg.msg_iter,
485 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
486 struct io_async_msghdr *iomsg)
488 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
489 struct compat_msghdr msg;
490 struct compat_iovec __user *uiov;
493 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
496 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
500 uiov = compat_ptr(msg.msg_iov);
501 if (req->flags & REQ_F_BUFFER_SELECT) {
504 iomsg->free_iov = NULL;
505 if (msg.msg_iovlen == 0) {
507 } else if (msg.msg_iovlen > 1) {
510 if (!access_ok(uiov, sizeof(*uiov)))
512 if (__get_user(clen, &uiov->iov_len))
519 if (req->flags & REQ_F_APOLL_MULTISHOT) {
520 iomsg->namelen = msg.msg_namelen;
521 iomsg->controllen = msg.msg_controllen;
522 if (io_recvmsg_multishot_overflow(iomsg))
526 iomsg->free_iov = iomsg->fast_iov;
527 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
528 UIO_FASTIOV, &iomsg->free_iov,
529 &iomsg->msg.msg_iter, true);
538 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
539 struct io_async_msghdr *iomsg)
541 iomsg->msg.msg_name = &iomsg->addr;
544 if (req->ctx->compat)
545 return __io_compat_recvmsg_copy_hdr(req, iomsg);
548 return __io_recvmsg_copy_hdr(req, iomsg);
551 int io_recvmsg_prep_async(struct io_kiocb *req)
555 if (!io_msg_alloc_async_prep(req))
557 ret = io_recvmsg_copy_hdr(req, req->async_data);
559 req->flags |= REQ_F_NEED_CLEANUP;
563 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
565 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
567 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
569 if (unlikely(sqe->file_index || sqe->addr2))
572 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
573 sr->len = READ_ONCE(sqe->len);
574 sr->flags = READ_ONCE(sqe->ioprio);
575 if (sr->flags & ~(RECVMSG_FLAGS))
577 sr->msg_flags = READ_ONCE(sqe->msg_flags);
578 if (sr->msg_flags & MSG_DONTWAIT)
579 req->flags |= REQ_F_NOWAIT;
580 if (sr->msg_flags & MSG_ERRQUEUE)
581 req->flags |= REQ_F_CLEAR_POLLIN;
582 if (sr->flags & IORING_RECV_MULTISHOT) {
583 if (!(req->flags & REQ_F_BUFFER_SELECT))
585 if (sr->msg_flags & MSG_WAITALL)
587 if (req->opcode == IORING_OP_RECV && sr->len)
589 req->flags |= REQ_F_APOLL_MULTISHOT;
591 * Store the buffer group for this multishot receive separately,
592 * as if we end up doing an io-wq based issue that selects a
593 * buffer, it has to be committed immediately and that will
594 * clear ->buf_list. This means we lose the link to the buffer
595 * list, and the eventual buffer put on completion then cannot
598 sr->buf_group = req->buf_index;
602 if (req->ctx->compat)
603 sr->msg_flags |= MSG_CMSG_COMPAT;
609 static inline void io_recv_prep_retry(struct io_kiocb *req)
611 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
614 sr->len = 0; /* get from the provided buffer */
615 req->buf_index = sr->buf_group;
619 * Finishes io_recv and io_recvmsg.
621 * Returns true if it is actually finished, or false if it should run
622 * again (for multishot).
624 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
625 unsigned int cflags, bool mshot_finished,
626 unsigned issue_flags)
628 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
629 io_req_set_res(req, *ret, cflags);
634 if (!mshot_finished) {
635 if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
636 req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
637 io_recv_prep_retry(req);
640 /* Otherwise stop multishot but use the current result. */
643 io_req_set_res(req, *ret, cflags);
645 if (issue_flags & IO_URING_F_MULTISHOT)
646 *ret = IOU_STOP_MULTISHOT;
652 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
653 struct io_sr_msg *sr, void __user **buf,
656 unsigned long ubuf = (unsigned long) *buf;
659 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
664 if (kmsg->controllen) {
665 unsigned long control = ubuf + hdr - kmsg->controllen;
667 kmsg->msg.msg_control_user = (void __user *) control;
668 kmsg->msg.msg_controllen = kmsg->controllen;
671 sr->buf = *buf; /* stash for later copy */
672 *buf = (void __user *) (ubuf + hdr);
673 kmsg->payloadlen = *len = *len - hdr;
677 struct io_recvmsg_multishot_hdr {
678 struct io_uring_recvmsg_out msg;
679 struct sockaddr_storage addr;
682 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
683 struct io_async_msghdr *kmsg,
684 unsigned int flags, bool *finished)
688 struct io_recvmsg_multishot_hdr hdr;
691 kmsg->msg.msg_name = &hdr.addr;
692 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
693 kmsg->msg.msg_namelen = 0;
695 if (sock->file->f_flags & O_NONBLOCK)
696 flags |= MSG_DONTWAIT;
698 err = sock_recvmsg(sock, &kmsg->msg, flags);
699 *finished = err <= 0;
703 hdr.msg = (struct io_uring_recvmsg_out) {
704 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
705 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
708 hdr.msg.payloadlen = err;
709 if (err > kmsg->payloadlen)
710 err = kmsg->payloadlen;
712 copy_len = sizeof(struct io_uring_recvmsg_out);
713 if (kmsg->msg.msg_namelen > kmsg->namelen)
714 copy_len += kmsg->namelen;
716 copy_len += kmsg->msg.msg_namelen;
719 * "fromlen shall refer to the value before truncation.."
722 hdr.msg.namelen = kmsg->msg.msg_namelen;
724 /* ensure that there is no gap between hdr and sockaddr_storage */
725 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
726 sizeof(struct io_uring_recvmsg_out));
727 if (copy_to_user(io->buf, &hdr, copy_len)) {
732 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
733 kmsg->controllen + err;
736 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
738 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
739 struct io_async_msghdr iomsg, *kmsg;
743 int ret, min_ret = 0;
744 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
745 bool mshot_finished = true;
747 sock = sock_from_file(req->file);
751 if (req_has_async_data(req)) {
752 kmsg = req->async_data;
754 ret = io_recvmsg_copy_hdr(req, &iomsg);
760 if (!(req->flags & REQ_F_POLLED) &&
761 (sr->flags & IORING_RECVSEND_POLL_FIRST))
762 return io_setup_async_msg(req, kmsg, issue_flags);
764 if (!io_check_multishot(req, issue_flags))
765 return io_setup_async_msg(req, kmsg, issue_flags);
768 if (io_do_buffer_select(req)) {
770 size_t len = sr->len;
772 buf = io_buffer_select(req, &len, issue_flags);
776 if (req->flags & REQ_F_APOLL_MULTISHOT) {
777 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
779 io_kbuf_recycle(req, issue_flags);
784 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
787 flags = sr->msg_flags;
789 flags |= MSG_DONTWAIT;
790 if (flags & MSG_WAITALL)
791 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
793 kmsg->msg.msg_get_inq = 1;
794 if (req->flags & REQ_F_APOLL_MULTISHOT)
795 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
798 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
802 if (ret == -EAGAIN && force_nonblock) {
803 ret = io_setup_async_msg(req, kmsg, issue_flags);
804 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
805 io_kbuf_recycle(req, issue_flags);
806 return IOU_ISSUE_SKIP_COMPLETE;
810 if (ret > 0 && io_net_retry(sock, flags)) {
812 req->flags |= REQ_F_PARTIAL_IO;
813 return io_setup_async_msg(req, kmsg, issue_flags);
815 if (ret == -ERESTARTSYS)
818 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
824 else if (sr->done_io)
827 io_kbuf_recycle(req, issue_flags);
829 cflags = io_put_kbuf(req, issue_flags);
830 if (kmsg->msg.msg_inq)
831 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
833 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
834 goto retry_multishot;
836 if (mshot_finished) {
837 /* fast path, check for non-NULL to avoid function call */
839 kfree(kmsg->free_iov);
840 io_netmsg_recycle(req, issue_flags);
841 req->flags &= ~REQ_F_NEED_CLEANUP;
847 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
849 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
854 int ret, min_ret = 0;
855 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
856 size_t len = sr->len;
858 if (!(req->flags & REQ_F_POLLED) &&
859 (sr->flags & IORING_RECVSEND_POLL_FIRST))
862 if (!io_check_multishot(req, issue_flags))
865 sock = sock_from_file(req->file);
870 if (io_do_buffer_select(req)) {
873 buf = io_buffer_select(req, &len, issue_flags);
879 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
885 msg.msg_control = NULL;
888 msg.msg_controllen = 0;
892 flags = sr->msg_flags;
894 flags |= MSG_DONTWAIT;
895 if (flags & MSG_WAITALL)
896 min_ret = iov_iter_count(&msg.msg_iter);
898 ret = sock_recvmsg(sock, &msg, flags);
900 if (ret == -EAGAIN && force_nonblock) {
901 if (issue_flags & IO_URING_F_MULTISHOT) {
902 io_kbuf_recycle(req, issue_flags);
903 return IOU_ISSUE_SKIP_COMPLETE;
908 if (ret > 0 && io_net_retry(sock, flags)) {
912 req->flags |= REQ_F_PARTIAL_IO;
915 if (ret == -ERESTARTSYS)
918 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
925 else if (sr->done_io)
928 io_kbuf_recycle(req, issue_flags);
930 cflags = io_put_kbuf(req, issue_flags);
932 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
934 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
935 goto retry_multishot;
940 void io_send_zc_cleanup(struct io_kiocb *req)
942 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
943 struct io_async_msghdr *io;
945 if (req_has_async_data(req)) {
946 io = req->async_data;
947 /* might be ->fast_iov if *msg_copy_hdr failed */
948 if (io->free_iov != io->fast_iov)
952 io_notif_flush(zc->notif);
957 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
958 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
960 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
962 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
963 struct io_ring_ctx *ctx = req->ctx;
964 struct io_kiocb *notif;
966 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
968 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
969 if (req->flags & REQ_F_CQE_SKIP)
972 notif = zc->notif = io_alloc_notif(ctx);
975 notif->cqe.user_data = req->cqe.user_data;
977 notif->cqe.flags = IORING_CQE_F_NOTIF;
978 req->flags |= REQ_F_NEED_CLEANUP;
980 zc->flags = READ_ONCE(sqe->ioprio);
981 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
982 if (zc->flags & ~IO_ZC_FLAGS_VALID)
984 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
985 io_notif_set_extended(notif);
986 io_notif_to_data(notif)->zc_report = true;
990 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
991 unsigned idx = READ_ONCE(sqe->buf_index);
993 if (unlikely(idx >= ctx->nr_user_bufs))
995 idx = array_index_nospec(idx, ctx->nr_user_bufs);
996 req->imu = READ_ONCE(ctx->user_bufs[idx]);
997 io_req_set_rsrc_node(notif, ctx, 0);
1000 if (req->opcode == IORING_OP_SEND_ZC) {
1001 if (READ_ONCE(sqe->__pad3[0]))
1003 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1004 zc->addr_len = READ_ONCE(sqe->addr_len);
1006 if (unlikely(sqe->addr2 || sqe->file_index))
1008 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1012 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1013 zc->len = READ_ONCE(sqe->len);
1014 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1015 if (zc->msg_flags & MSG_DONTWAIT)
1016 req->flags |= REQ_F_NOWAIT;
1020 #ifdef CONFIG_COMPAT
1021 if (req->ctx->compat)
1022 zc->msg_flags |= MSG_CMSG_COMPAT;
1027 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1028 struct iov_iter *from, size_t length)
1030 skb_zcopy_downgrade_managed(skb);
1031 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1034 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1035 struct iov_iter *from, size_t length)
1037 struct skb_shared_info *shinfo = skb_shinfo(skb);
1038 int frag = shinfo->nr_frags;
1040 struct bvec_iter bi;
1042 unsigned long truesize = 0;
1045 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1046 else if (unlikely(!skb_zcopy_managed(skb)))
1047 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1049 bi.bi_size = min(from->count, length);
1050 bi.bi_bvec_done = from->iov_offset;
1053 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1054 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1057 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1058 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1059 v.bv_offset, v.bv_len);
1060 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1065 shinfo->nr_frags = frag;
1066 from->bvec += bi.bi_idx;
1067 from->nr_segs -= bi.bi_idx;
1068 from->count -= copied;
1069 from->iov_offset = bi.bi_bvec_done;
1071 skb->data_len += copied;
1073 skb->truesize += truesize;
1075 if (sk && sk->sk_type == SOCK_STREAM) {
1076 sk_wmem_queued_add(sk, truesize);
1077 if (!skb_zcopy_pure(skb))
1078 sk_mem_charge(sk, truesize);
1080 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1085 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1087 struct sockaddr_storage __address;
1088 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1090 struct socket *sock;
1092 int ret, min_ret = 0;
1094 sock = sock_from_file(req->file);
1095 if (unlikely(!sock))
1097 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1100 msg.msg_name = NULL;
1101 msg.msg_control = NULL;
1102 msg.msg_controllen = 0;
1103 msg.msg_namelen = 0;
1106 if (req_has_async_data(req)) {
1107 struct io_async_msghdr *io = req->async_data;
1109 msg.msg_name = &io->addr;
1111 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1112 if (unlikely(ret < 0))
1114 msg.msg_name = (struct sockaddr *)&__address;
1116 msg.msg_namelen = zc->addr_len;
1119 if (!(req->flags & REQ_F_POLLED) &&
1120 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1121 return io_setup_async_addr(req, &__address, issue_flags);
1123 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1124 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1125 (u64)(uintptr_t)zc->buf, zc->len);
1128 msg.sg_from_iter = io_sg_from_iter;
1130 io_notif_set_extended(zc->notif);
1131 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1134 ret = io_notif_account_mem(zc->notif, zc->len);
1137 msg.sg_from_iter = io_sg_from_iter_iovec;
1140 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1141 if (issue_flags & IO_URING_F_NONBLOCK)
1142 msg_flags |= MSG_DONTWAIT;
1143 if (msg_flags & MSG_WAITALL)
1144 min_ret = iov_iter_count(&msg.msg_iter);
1146 msg.msg_flags = msg_flags;
1147 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1148 ret = sock_sendmsg(sock, &msg);
1150 if (unlikely(ret < min_ret)) {
1151 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1152 return io_setup_async_addr(req, &__address, issue_flags);
1154 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1158 req->flags |= REQ_F_PARTIAL_IO;
1159 return io_setup_async_addr(req, &__address, issue_flags);
1161 if (ret == -ERESTARTSYS)
1168 else if (zc->done_io)
1172 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1173 * flushing notif to io_send_zc_cleanup()
1175 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1176 io_notif_flush(zc->notif);
1177 req->flags &= ~REQ_F_NEED_CLEANUP;
1179 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1183 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1185 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1186 struct io_async_msghdr iomsg, *kmsg;
1187 struct socket *sock;
1189 int ret, min_ret = 0;
1191 io_notif_set_extended(sr->notif);
1193 sock = sock_from_file(req->file);
1194 if (unlikely(!sock))
1196 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1199 if (req_has_async_data(req)) {
1200 kmsg = req->async_data;
1202 ret = io_sendmsg_copy_hdr(req, &iomsg);
1208 if (!(req->flags & REQ_F_POLLED) &&
1209 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1210 return io_setup_async_msg(req, kmsg, issue_flags);
1212 flags = sr->msg_flags | MSG_ZEROCOPY;
1213 if (issue_flags & IO_URING_F_NONBLOCK)
1214 flags |= MSG_DONTWAIT;
1215 if (flags & MSG_WAITALL)
1216 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1218 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1219 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1220 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1222 if (unlikely(ret < min_ret)) {
1223 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1224 return io_setup_async_msg(req, kmsg, issue_flags);
1226 if (ret > 0 && io_net_retry(sock, flags)) {
1228 req->flags |= REQ_F_PARTIAL_IO;
1229 return io_setup_async_msg(req, kmsg, issue_flags);
1231 if (ret == -ERESTARTSYS)
1235 /* fast path, check for non-NULL to avoid function call */
1236 if (kmsg->free_iov) {
1237 kfree(kmsg->free_iov);
1238 kmsg->free_iov = NULL;
1241 io_netmsg_recycle(req, issue_flags);
1244 else if (sr->done_io)
1248 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1249 * flushing notif to io_send_zc_cleanup()
1251 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1252 io_notif_flush(sr->notif);
1253 req->flags &= ~REQ_F_NEED_CLEANUP;
1255 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1259 void io_sendrecv_fail(struct io_kiocb *req)
1261 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1263 if (req->flags & REQ_F_PARTIAL_IO)
1264 req->cqe.res = sr->done_io;
1266 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1267 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1268 req->cqe.flags |= IORING_CQE_F_MORE;
1271 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1273 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1276 if (sqe->len || sqe->buf_index)
1279 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1280 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1281 accept->flags = READ_ONCE(sqe->accept_flags);
1282 accept->nofile = rlimit(RLIMIT_NOFILE);
1283 flags = READ_ONCE(sqe->ioprio);
1284 if (flags & ~IORING_ACCEPT_MULTISHOT)
1287 accept->file_slot = READ_ONCE(sqe->file_index);
1288 if (accept->file_slot) {
1289 if (accept->flags & SOCK_CLOEXEC)
1291 if (flags & IORING_ACCEPT_MULTISHOT &&
1292 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1295 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1297 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1298 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1299 if (flags & IORING_ACCEPT_MULTISHOT)
1300 req->flags |= REQ_F_APOLL_MULTISHOT;
1304 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1306 struct io_ring_ctx *ctx = req->ctx;
1307 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1308 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1309 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1310 bool fixed = !!accept->file_slot;
1314 if (!io_check_multishot(req, issue_flags))
1318 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1319 if (unlikely(fd < 0))
1322 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1327 ret = PTR_ERR(file);
1328 if (ret == -EAGAIN && force_nonblock) {
1330 * if it's multishot and polled, we don't need to
1331 * return EAGAIN to arm the poll infra since it
1332 * has already been done
1334 if (issue_flags & IO_URING_F_MULTISHOT)
1335 ret = IOU_ISSUE_SKIP_COMPLETE;
1338 if (ret == -ERESTARTSYS)
1341 } else if (!fixed) {
1342 fd_install(fd, file);
1345 ret = io_fixed_fd_install(req, issue_flags, file,
1349 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1350 io_req_set_res(req, ret, 0);
1356 if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1357 req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1363 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1365 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1367 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1370 sock->domain = READ_ONCE(sqe->fd);
1371 sock->type = READ_ONCE(sqe->off);
1372 sock->protocol = READ_ONCE(sqe->len);
1373 sock->file_slot = READ_ONCE(sqe->file_index);
1374 sock->nofile = rlimit(RLIMIT_NOFILE);
1376 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1377 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1379 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1384 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1386 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1387 bool fixed = !!sock->file_slot;
1392 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1393 if (unlikely(fd < 0))
1396 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1400 ret = PTR_ERR(file);
1401 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1403 if (ret == -ERESTARTSYS)
1406 } else if (!fixed) {
1407 fd_install(fd, file);
1410 ret = io_fixed_fd_install(req, issue_flags, file,
1413 io_req_set_res(req, ret, 0);
1417 int io_connect_prep_async(struct io_kiocb *req)
1419 struct io_async_connect *io = req->async_data;
1420 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1422 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1425 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1427 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1429 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1432 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1433 conn->addr_len = READ_ONCE(sqe->addr2);
1434 conn->in_progress = conn->seen_econnaborted = false;
1438 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1440 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1441 struct io_async_connect __io, *io;
1442 unsigned file_flags;
1444 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1446 if (connect->in_progress) {
1447 struct socket *socket;
1450 socket = sock_from_file(req->file);
1452 ret = sock_error(socket->sk);
1456 if (req_has_async_data(req)) {
1457 io = req->async_data;
1459 ret = move_addr_to_kernel(connect->addr,
1467 file_flags = force_nonblock ? O_NONBLOCK : 0;
1469 ret = __sys_connect_file(req->file, &io->address,
1470 connect->addr_len, file_flags);
1471 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1472 && force_nonblock) {
1473 if (ret == -EINPROGRESS) {
1474 connect->in_progress = true;
1477 if (ret == -ECONNABORTED) {
1478 if (connect->seen_econnaborted)
1480 connect->seen_econnaborted = true;
1482 if (req_has_async_data(req))
1484 if (io_alloc_async_data(req)) {
1488 memcpy(req->async_data, &__io, sizeof(__io));
1491 if (ret == -ERESTARTSYS)
1496 io_req_set_res(req, ret, 0);
1500 void io_netmsg_cache_free(struct io_cache_entry *entry)
1502 kfree(container_of(entry, struct io_async_msghdr, cache));