1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
50 bool seen_econnaborted;
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
64 /* initialised and used only by !msg send variants */
68 void __user *msg_control;
69 /* used only for send zerocopy */
70 struct io_kiocb *notif;
73 static inline bool io_check_multishot(struct io_kiocb *req,
74 unsigned int issue_flags)
77 * When ->locked_cq is set we only allow to post CQEs from the original
78 * task context. Usual request completions will be handled in other
79 * generic paths but multipoll may decide to post extra cqes.
81 return !(issue_flags & IO_URING_F_IOWQ) ||
82 !(issue_flags & IO_URING_F_MULTISHOT) ||
83 !req->ctx->task_complete;
86 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
88 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
90 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
91 sqe->buf_index || sqe->splice_fd_in))
94 shutdown->how = READ_ONCE(sqe->len);
95 req->flags |= REQ_F_FORCE_ASYNC;
99 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
101 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
107 sock = sock_from_file(req->file);
111 ret = __sys_shutdown_sock(sock, shutdown->how);
112 io_req_set_res(req, ret, 0);
116 static bool io_net_retry(struct socket *sock, int flags)
118 if (!(flags & MSG_WAITALL))
120 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
123 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
125 struct io_async_msghdr *hdr = req->async_data;
127 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
130 /* Let normal cleanup path reap it if we fail adding to the cache */
131 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
132 req->async_data = NULL;
133 req->flags &= ~REQ_F_ASYNC_DATA;
137 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
138 unsigned int issue_flags)
140 struct io_ring_ctx *ctx = req->ctx;
141 struct io_cache_entry *entry;
142 struct io_async_msghdr *hdr;
144 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
145 entry = io_alloc_cache_get(&ctx->netmsg_cache);
147 hdr = container_of(entry, struct io_async_msghdr, cache);
148 hdr->free_iov = NULL;
149 req->flags |= REQ_F_ASYNC_DATA;
150 req->async_data = hdr;
155 if (!io_alloc_async_data(req)) {
156 hdr = req->async_data;
157 hdr->free_iov = NULL;
163 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
165 /* ->prep_async is always called from the submission context */
166 return io_msg_alloc_async(req, 0);
169 static int io_setup_async_msg(struct io_kiocb *req,
170 struct io_async_msghdr *kmsg,
171 unsigned int issue_flags)
173 struct io_async_msghdr *async_msg;
175 if (req_has_async_data(req))
177 async_msg = io_msg_alloc_async(req, issue_flags);
179 kfree(kmsg->free_iov);
182 req->flags |= REQ_F_NEED_CLEANUP;
183 memcpy(async_msg, kmsg, sizeof(*kmsg));
184 if (async_msg->msg.msg_name)
185 async_msg->msg.msg_name = &async_msg->addr;
187 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
190 /* if were using fast_iov, set it to the new one */
191 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
192 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
193 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
199 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
200 struct io_async_msghdr *iomsg)
202 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
205 iomsg->msg.msg_name = &iomsg->addr;
206 iomsg->free_iov = iomsg->fast_iov;
207 ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
209 /* save msg_control as sys_sendmsg() overwrites it */
210 sr->msg_control = iomsg->msg.msg_control_user;
214 int io_send_prep_async(struct io_kiocb *req)
216 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
217 struct io_async_msghdr *io;
220 if (!zc->addr || req_has_async_data(req))
222 io = io_msg_alloc_async_prep(req);
225 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
229 static int io_setup_async_addr(struct io_kiocb *req,
230 struct sockaddr_storage *addr_storage,
231 unsigned int issue_flags)
233 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
234 struct io_async_msghdr *io;
236 if (!sr->addr || req_has_async_data(req))
238 io = io_msg_alloc_async(req, issue_flags);
241 memcpy(&io->addr, addr_storage, sizeof(io->addr));
245 int io_sendmsg_prep_async(struct io_kiocb *req)
249 if (!io_msg_alloc_async_prep(req))
251 ret = io_sendmsg_copy_hdr(req, req->async_data);
253 req->flags |= REQ_F_NEED_CLEANUP;
257 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
259 struct io_async_msghdr *io = req->async_data;
264 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
266 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
268 if (req->opcode == IORING_OP_SEND) {
269 if (READ_ONCE(sqe->__pad3[0]))
271 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
272 sr->addr_len = READ_ONCE(sqe->addr_len);
273 } else if (sqe->addr2 || sqe->file_index) {
277 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
278 sr->len = READ_ONCE(sqe->len);
279 sr->flags = READ_ONCE(sqe->ioprio);
280 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
282 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
283 if (sr->msg_flags & MSG_DONTWAIT)
284 req->flags |= REQ_F_NOWAIT;
287 if (req->ctx->compat)
288 sr->msg_flags |= MSG_CMSG_COMPAT;
294 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
296 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
297 struct io_async_msghdr iomsg, *kmsg;
303 sock = sock_from_file(req->file);
307 if (req_has_async_data(req)) {
308 kmsg = req->async_data;
309 kmsg->msg.msg_control_user = sr->msg_control;
311 ret = io_sendmsg_copy_hdr(req, &iomsg);
317 if (!(req->flags & REQ_F_POLLED) &&
318 (sr->flags & IORING_RECVSEND_POLL_FIRST))
319 return io_setup_async_msg(req, kmsg, issue_flags);
321 flags = sr->msg_flags;
322 if (issue_flags & IO_URING_F_NONBLOCK)
323 flags |= MSG_DONTWAIT;
324 if (flags & MSG_WAITALL)
325 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
327 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
330 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
331 return io_setup_async_msg(req, kmsg, issue_flags);
332 if (ret > 0 && io_net_retry(sock, flags)) {
333 kmsg->msg.msg_controllen = 0;
334 kmsg->msg.msg_control = NULL;
336 req->flags |= REQ_F_PARTIAL_IO;
337 return io_setup_async_msg(req, kmsg, issue_flags);
339 if (ret == -ERESTARTSYS)
343 /* fast path, check for non-NULL to avoid function call */
345 kfree(kmsg->free_iov);
346 req->flags &= ~REQ_F_NEED_CLEANUP;
347 io_netmsg_recycle(req, issue_flags);
350 else if (sr->done_io)
352 io_req_set_res(req, ret, 0);
356 int io_send(struct io_kiocb *req, unsigned int issue_flags)
358 struct sockaddr_storage __address;
359 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
367 msg.msg_control = NULL;
368 msg.msg_controllen = 0;
373 if (req_has_async_data(req)) {
374 struct io_async_msghdr *io = req->async_data;
376 msg.msg_name = &io->addr;
378 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
379 if (unlikely(ret < 0))
381 msg.msg_name = (struct sockaddr *)&__address;
383 msg.msg_namelen = sr->addr_len;
386 if (!(req->flags & REQ_F_POLLED) &&
387 (sr->flags & IORING_RECVSEND_POLL_FIRST))
388 return io_setup_async_addr(req, &__address, issue_flags);
390 sock = sock_from_file(req->file);
394 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
398 flags = sr->msg_flags;
399 if (issue_flags & IO_URING_F_NONBLOCK)
400 flags |= MSG_DONTWAIT;
401 if (flags & MSG_WAITALL)
402 min_ret = iov_iter_count(&msg.msg_iter);
404 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
405 msg.msg_flags = flags;
406 ret = sock_sendmsg(sock, &msg);
408 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
409 return io_setup_async_addr(req, &__address, issue_flags);
411 if (ret > 0 && io_net_retry(sock, flags)) {
415 req->flags |= REQ_F_PARTIAL_IO;
416 return io_setup_async_addr(req, &__address, issue_flags);
418 if (ret == -ERESTARTSYS)
424 else if (sr->done_io)
426 io_req_set_res(req, ret, 0);
430 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
434 if (iomsg->namelen < 0)
436 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
437 iomsg->namelen, &hdr))
439 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
445 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
446 struct io_async_msghdr *iomsg)
448 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
449 struct user_msghdr msg;
452 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
455 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
459 if (req->flags & REQ_F_BUFFER_SELECT) {
460 if (msg.msg_iovlen == 0) {
461 sr->len = iomsg->fast_iov[0].iov_len = 0;
462 iomsg->fast_iov[0].iov_base = NULL;
463 iomsg->free_iov = NULL;
464 } else if (msg.msg_iovlen > 1) {
467 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
469 sr->len = iomsg->fast_iov[0].iov_len;
470 iomsg->free_iov = NULL;
473 if (req->flags & REQ_F_APOLL_MULTISHOT) {
474 iomsg->namelen = msg.msg_namelen;
475 iomsg->controllen = msg.msg_controllen;
476 if (io_recvmsg_multishot_overflow(iomsg))
480 iomsg->free_iov = iomsg->fast_iov;
481 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
482 &iomsg->free_iov, &iomsg->msg.msg_iter,
492 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
493 struct io_async_msghdr *iomsg)
495 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
496 struct compat_msghdr msg;
497 struct compat_iovec __user *uiov;
500 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
503 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
507 uiov = compat_ptr(msg.msg_iov);
508 if (req->flags & REQ_F_BUFFER_SELECT) {
511 iomsg->free_iov = NULL;
512 if (msg.msg_iovlen == 0) {
514 } else if (msg.msg_iovlen > 1) {
517 if (!access_ok(uiov, sizeof(*uiov)))
519 if (__get_user(clen, &uiov->iov_len))
526 if (req->flags & REQ_F_APOLL_MULTISHOT) {
527 iomsg->namelen = msg.msg_namelen;
528 iomsg->controllen = msg.msg_controllen;
529 if (io_recvmsg_multishot_overflow(iomsg))
533 iomsg->free_iov = iomsg->fast_iov;
534 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
535 UIO_FASTIOV, &iomsg->free_iov,
536 &iomsg->msg.msg_iter, true);
545 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
546 struct io_async_msghdr *iomsg)
548 iomsg->msg.msg_name = &iomsg->addr;
549 iomsg->msg.msg_iter.nr_segs = 0;
552 if (req->ctx->compat)
553 return __io_compat_recvmsg_copy_hdr(req, iomsg);
556 return __io_recvmsg_copy_hdr(req, iomsg);
559 int io_recvmsg_prep_async(struct io_kiocb *req)
563 if (!io_msg_alloc_async_prep(req))
565 ret = io_recvmsg_copy_hdr(req, req->async_data);
567 req->flags |= REQ_F_NEED_CLEANUP;
571 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
573 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
575 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
577 if (unlikely(sqe->file_index || sqe->addr2))
580 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
581 sr->len = READ_ONCE(sqe->len);
582 sr->flags = READ_ONCE(sqe->ioprio);
583 if (sr->flags & ~(RECVMSG_FLAGS))
585 sr->msg_flags = READ_ONCE(sqe->msg_flags);
586 if (sr->msg_flags & MSG_DONTWAIT)
587 req->flags |= REQ_F_NOWAIT;
588 if (sr->msg_flags & MSG_ERRQUEUE)
589 req->flags |= REQ_F_CLEAR_POLLIN;
590 if (sr->flags & IORING_RECV_MULTISHOT) {
591 if (!(req->flags & REQ_F_BUFFER_SELECT))
593 if (sr->msg_flags & MSG_WAITALL)
595 if (req->opcode == IORING_OP_RECV && sr->len)
597 req->flags |= REQ_F_APOLL_MULTISHOT;
599 * Store the buffer group for this multishot receive separately,
600 * as if we end up doing an io-wq based issue that selects a
601 * buffer, it has to be committed immediately and that will
602 * clear ->buf_list. This means we lose the link to the buffer
603 * list, and the eventual buffer put on completion then cannot
606 sr->buf_group = req->buf_index;
610 if (req->ctx->compat)
611 sr->msg_flags |= MSG_CMSG_COMPAT;
617 static inline void io_recv_prep_retry(struct io_kiocb *req)
619 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
622 sr->len = 0; /* get from the provided buffer */
623 req->buf_index = sr->buf_group;
627 * Finishes io_recv and io_recvmsg.
629 * Returns true if it is actually finished, or false if it should run
630 * again (for multishot).
632 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
633 struct msghdr *msg, bool mshot_finished,
634 unsigned issue_flags)
638 cflags = io_put_kbuf(req, issue_flags);
639 if (msg->msg_inq && msg->msg_inq != -1)
640 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
642 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
643 io_req_set_res(req, *ret, cflags);
652 * Fill CQE for this receive and see if we should keep trying to
653 * receive from this socket.
655 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
656 *ret, cflags | IORING_CQE_F_MORE)) {
657 io_recv_prep_retry(req);
658 /* Known not-empty or unknown state, retry */
659 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1)
661 if (issue_flags & IO_URING_F_MULTISHOT)
662 *ret = IOU_ISSUE_SKIP_COMPLETE;
667 /* Otherwise stop multishot but use the current result. */
669 io_req_set_res(req, *ret, cflags);
671 if (issue_flags & IO_URING_F_MULTISHOT)
672 *ret = IOU_STOP_MULTISHOT;
678 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
679 struct io_sr_msg *sr, void __user **buf,
682 unsigned long ubuf = (unsigned long) *buf;
685 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
690 if (kmsg->controllen) {
691 unsigned long control = ubuf + hdr - kmsg->controllen;
693 kmsg->msg.msg_control_user = (void __user *) control;
694 kmsg->msg.msg_controllen = kmsg->controllen;
697 sr->buf = *buf; /* stash for later copy */
698 *buf = (void __user *) (ubuf + hdr);
699 kmsg->payloadlen = *len = *len - hdr;
703 struct io_recvmsg_multishot_hdr {
704 struct io_uring_recvmsg_out msg;
705 struct sockaddr_storage addr;
708 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
709 struct io_async_msghdr *kmsg,
710 unsigned int flags, bool *finished)
714 struct io_recvmsg_multishot_hdr hdr;
717 kmsg->msg.msg_name = &hdr.addr;
718 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
719 kmsg->msg.msg_namelen = 0;
721 if (sock->file->f_flags & O_NONBLOCK)
722 flags |= MSG_DONTWAIT;
724 err = sock_recvmsg(sock, &kmsg->msg, flags);
725 *finished = err <= 0;
729 hdr.msg = (struct io_uring_recvmsg_out) {
730 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
731 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
734 hdr.msg.payloadlen = err;
735 if (err > kmsg->payloadlen)
736 err = kmsg->payloadlen;
738 copy_len = sizeof(struct io_uring_recvmsg_out);
739 if (kmsg->msg.msg_namelen > kmsg->namelen)
740 copy_len += kmsg->namelen;
742 copy_len += kmsg->msg.msg_namelen;
745 * "fromlen shall refer to the value before truncation.."
748 hdr.msg.namelen = kmsg->msg.msg_namelen;
750 /* ensure that there is no gap between hdr and sockaddr_storage */
751 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
752 sizeof(struct io_uring_recvmsg_out));
753 if (copy_to_user(io->buf, &hdr, copy_len)) {
758 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
759 kmsg->controllen + err;
762 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
764 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
765 struct io_async_msghdr iomsg, *kmsg;
768 int ret, min_ret = 0;
769 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
770 bool mshot_finished = true;
772 sock = sock_from_file(req->file);
776 if (req_has_async_data(req)) {
777 kmsg = req->async_data;
779 ret = io_recvmsg_copy_hdr(req, &iomsg);
785 if (!(req->flags & REQ_F_POLLED) &&
786 (sr->flags & IORING_RECVSEND_POLL_FIRST))
787 return io_setup_async_msg(req, kmsg, issue_flags);
789 if (!io_check_multishot(req, issue_flags))
790 return io_setup_async_msg(req, kmsg, issue_flags);
793 if (io_do_buffer_select(req)) {
795 size_t len = sr->len;
797 buf = io_buffer_select(req, &len, issue_flags);
801 if (req->flags & REQ_F_APOLL_MULTISHOT) {
802 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
804 io_kbuf_recycle(req, issue_flags);
809 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
812 flags = sr->msg_flags;
814 flags |= MSG_DONTWAIT;
816 kmsg->msg.msg_get_inq = 1;
817 kmsg->msg.msg_inq = -1;
818 if (req->flags & REQ_F_APOLL_MULTISHOT) {
819 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
822 /* disable partial retry for recvmsg with cmsg attached */
823 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
824 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
826 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
831 if (ret == -EAGAIN && force_nonblock) {
832 ret = io_setup_async_msg(req, kmsg, issue_flags);
833 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
834 io_kbuf_recycle(req, issue_flags);
835 return IOU_ISSUE_SKIP_COMPLETE;
839 if (ret > 0 && io_net_retry(sock, flags)) {
841 req->flags |= REQ_F_PARTIAL_IO;
842 return io_setup_async_msg(req, kmsg, issue_flags);
844 if (ret == -ERESTARTSYS)
847 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
853 else if (sr->done_io)
856 io_kbuf_recycle(req, issue_flags);
858 if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
859 goto retry_multishot;
861 if (mshot_finished) {
862 /* fast path, check for non-NULL to avoid function call */
864 kfree(kmsg->free_iov);
865 io_netmsg_recycle(req, issue_flags);
866 req->flags &= ~REQ_F_NEED_CLEANUP;
872 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
874 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
878 int ret, min_ret = 0;
879 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
880 size_t len = sr->len;
882 if (!(req->flags & REQ_F_POLLED) &&
883 (sr->flags & IORING_RECVSEND_POLL_FIRST))
886 if (!io_check_multishot(req, issue_flags))
889 sock = sock_from_file(req->file);
895 msg.msg_control = NULL;
897 msg.msg_controllen = 0;
902 if (io_do_buffer_select(req)) {
905 buf = io_buffer_select(req, &len, issue_flags);
912 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
919 flags = sr->msg_flags;
921 flags |= MSG_DONTWAIT;
922 if (flags & MSG_WAITALL)
923 min_ret = iov_iter_count(&msg.msg_iter);
925 ret = sock_recvmsg(sock, &msg, flags);
927 if (ret == -EAGAIN && force_nonblock) {
928 if (issue_flags & IO_URING_F_MULTISHOT) {
929 io_kbuf_recycle(req, issue_flags);
930 return IOU_ISSUE_SKIP_COMPLETE;
935 if (ret > 0 && io_net_retry(sock, flags)) {
939 req->flags |= REQ_F_PARTIAL_IO;
942 if (ret == -ERESTARTSYS)
945 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
952 else if (sr->done_io)
955 io_kbuf_recycle(req, issue_flags);
957 if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
958 goto retry_multishot;
963 void io_send_zc_cleanup(struct io_kiocb *req)
965 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
966 struct io_async_msghdr *io;
968 if (req_has_async_data(req)) {
969 io = req->async_data;
970 /* might be ->fast_iov if *msg_copy_hdr failed */
971 if (io->free_iov != io->fast_iov)
975 io_notif_flush(zc->notif);
980 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
981 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
983 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
985 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
986 struct io_ring_ctx *ctx = req->ctx;
987 struct io_kiocb *notif;
989 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
991 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
992 if (req->flags & REQ_F_CQE_SKIP)
995 notif = zc->notif = io_alloc_notif(ctx);
998 notif->cqe.user_data = req->cqe.user_data;
1000 notif->cqe.flags = IORING_CQE_F_NOTIF;
1001 req->flags |= REQ_F_NEED_CLEANUP;
1003 zc->flags = READ_ONCE(sqe->ioprio);
1004 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1005 if (zc->flags & ~IO_ZC_FLAGS_VALID)
1007 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1008 io_notif_set_extended(notif);
1009 io_notif_to_data(notif)->zc_report = true;
1013 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1014 unsigned idx = READ_ONCE(sqe->buf_index);
1016 if (unlikely(idx >= ctx->nr_user_bufs))
1018 idx = array_index_nospec(idx, ctx->nr_user_bufs);
1019 req->imu = READ_ONCE(ctx->user_bufs[idx]);
1020 io_req_set_rsrc_node(notif, ctx, 0);
1023 if (req->opcode == IORING_OP_SEND_ZC) {
1024 if (READ_ONCE(sqe->__pad3[0]))
1026 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1027 zc->addr_len = READ_ONCE(sqe->addr_len);
1029 if (unlikely(sqe->addr2 || sqe->file_index))
1031 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1035 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1036 zc->len = READ_ONCE(sqe->len);
1037 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1038 if (zc->msg_flags & MSG_DONTWAIT)
1039 req->flags |= REQ_F_NOWAIT;
1043 #ifdef CONFIG_COMPAT
1044 if (req->ctx->compat)
1045 zc->msg_flags |= MSG_CMSG_COMPAT;
1050 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1051 struct iov_iter *from, size_t length)
1053 skb_zcopy_downgrade_managed(skb);
1054 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1057 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1058 struct iov_iter *from, size_t length)
1060 struct skb_shared_info *shinfo = skb_shinfo(skb);
1061 int frag = shinfo->nr_frags;
1063 struct bvec_iter bi;
1065 unsigned long truesize = 0;
1068 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1069 else if (unlikely(!skb_zcopy_managed(skb)))
1070 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1072 bi.bi_size = min(from->count, length);
1073 bi.bi_bvec_done = from->iov_offset;
1076 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1077 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1080 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1081 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1082 v.bv_offset, v.bv_len);
1083 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1088 shinfo->nr_frags = frag;
1089 from->bvec += bi.bi_idx;
1090 from->nr_segs -= bi.bi_idx;
1091 from->count -= copied;
1092 from->iov_offset = bi.bi_bvec_done;
1094 skb->data_len += copied;
1096 skb->truesize += truesize;
1098 if (sk && sk->sk_type == SOCK_STREAM) {
1099 sk_wmem_queued_add(sk, truesize);
1100 if (!skb_zcopy_pure(skb))
1101 sk_mem_charge(sk, truesize);
1103 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1108 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1110 struct sockaddr_storage __address;
1111 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1113 struct socket *sock;
1115 int ret, min_ret = 0;
1117 sock = sock_from_file(req->file);
1118 if (unlikely(!sock))
1120 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1123 msg.msg_name = NULL;
1124 msg.msg_control = NULL;
1125 msg.msg_controllen = 0;
1126 msg.msg_namelen = 0;
1129 if (req_has_async_data(req)) {
1130 struct io_async_msghdr *io = req->async_data;
1132 msg.msg_name = &io->addr;
1134 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1135 if (unlikely(ret < 0))
1137 msg.msg_name = (struct sockaddr *)&__address;
1139 msg.msg_namelen = zc->addr_len;
1142 if (!(req->flags & REQ_F_POLLED) &&
1143 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1144 return io_setup_async_addr(req, &__address, issue_flags);
1146 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1147 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1148 (u64)(uintptr_t)zc->buf, zc->len);
1151 msg.sg_from_iter = io_sg_from_iter;
1153 io_notif_set_extended(zc->notif);
1154 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1157 ret = io_notif_account_mem(zc->notif, zc->len);
1160 msg.sg_from_iter = io_sg_from_iter_iovec;
1163 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1164 if (issue_flags & IO_URING_F_NONBLOCK)
1165 msg_flags |= MSG_DONTWAIT;
1166 if (msg_flags & MSG_WAITALL)
1167 min_ret = iov_iter_count(&msg.msg_iter);
1168 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1170 msg.msg_flags = msg_flags;
1171 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1172 ret = sock_sendmsg(sock, &msg);
1174 if (unlikely(ret < min_ret)) {
1175 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1176 return io_setup_async_addr(req, &__address, issue_flags);
1178 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1182 req->flags |= REQ_F_PARTIAL_IO;
1183 return io_setup_async_addr(req, &__address, issue_flags);
1185 if (ret == -ERESTARTSYS)
1192 else if (zc->done_io)
1196 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1197 * flushing notif to io_send_zc_cleanup()
1199 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1200 io_notif_flush(zc->notif);
1201 req->flags &= ~REQ_F_NEED_CLEANUP;
1203 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1207 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1209 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1210 struct io_async_msghdr iomsg, *kmsg;
1211 struct socket *sock;
1213 int ret, min_ret = 0;
1215 io_notif_set_extended(sr->notif);
1217 sock = sock_from_file(req->file);
1218 if (unlikely(!sock))
1220 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1223 if (req_has_async_data(req)) {
1224 kmsg = req->async_data;
1226 ret = io_sendmsg_copy_hdr(req, &iomsg);
1232 if (!(req->flags & REQ_F_POLLED) &&
1233 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1234 return io_setup_async_msg(req, kmsg, issue_flags);
1236 flags = sr->msg_flags | MSG_ZEROCOPY;
1237 if (issue_flags & IO_URING_F_NONBLOCK)
1238 flags |= MSG_DONTWAIT;
1239 if (flags & MSG_WAITALL)
1240 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1242 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1243 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1244 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1246 if (unlikely(ret < min_ret)) {
1247 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1248 return io_setup_async_msg(req, kmsg, issue_flags);
1250 if (ret > 0 && io_net_retry(sock, flags)) {
1252 req->flags |= REQ_F_PARTIAL_IO;
1253 return io_setup_async_msg(req, kmsg, issue_flags);
1255 if (ret == -ERESTARTSYS)
1259 /* fast path, check for non-NULL to avoid function call */
1260 if (kmsg->free_iov) {
1261 kfree(kmsg->free_iov);
1262 kmsg->free_iov = NULL;
1265 io_netmsg_recycle(req, issue_flags);
1268 else if (sr->done_io)
1272 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1273 * flushing notif to io_send_zc_cleanup()
1275 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1276 io_notif_flush(sr->notif);
1277 req->flags &= ~REQ_F_NEED_CLEANUP;
1279 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1283 void io_sendrecv_fail(struct io_kiocb *req)
1285 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1287 if (req->flags & REQ_F_PARTIAL_IO)
1288 req->cqe.res = sr->done_io;
1290 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1291 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1292 req->cqe.flags |= IORING_CQE_F_MORE;
1295 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1297 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1300 if (sqe->len || sqe->buf_index)
1303 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1304 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1305 accept->flags = READ_ONCE(sqe->accept_flags);
1306 accept->nofile = rlimit(RLIMIT_NOFILE);
1307 flags = READ_ONCE(sqe->ioprio);
1308 if (flags & ~IORING_ACCEPT_MULTISHOT)
1311 accept->file_slot = READ_ONCE(sqe->file_index);
1312 if (accept->file_slot) {
1313 if (accept->flags & SOCK_CLOEXEC)
1315 if (flags & IORING_ACCEPT_MULTISHOT &&
1316 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1319 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1321 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1322 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1323 if (flags & IORING_ACCEPT_MULTISHOT)
1324 req->flags |= REQ_F_APOLL_MULTISHOT;
1328 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1330 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1331 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1332 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1333 bool fixed = !!accept->file_slot;
1337 if (!io_check_multishot(req, issue_flags))
1341 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1342 if (unlikely(fd < 0))
1345 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1350 ret = PTR_ERR(file);
1351 if (ret == -EAGAIN && force_nonblock) {
1353 * if it's multishot and polled, we don't need to
1354 * return EAGAIN to arm the poll infra since it
1355 * has already been done
1357 if (issue_flags & IO_URING_F_MULTISHOT)
1358 ret = IOU_ISSUE_SKIP_COMPLETE;
1361 if (ret == -ERESTARTSYS)
1364 } else if (!fixed) {
1365 fd_install(fd, file);
1368 ret = io_fixed_fd_install(req, issue_flags, file,
1372 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1373 io_req_set_res(req, ret, 0);
1379 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
1380 ret, IORING_CQE_F_MORE))
1386 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1388 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1390 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1393 sock->domain = READ_ONCE(sqe->fd);
1394 sock->type = READ_ONCE(sqe->off);
1395 sock->protocol = READ_ONCE(sqe->len);
1396 sock->file_slot = READ_ONCE(sqe->file_index);
1397 sock->nofile = rlimit(RLIMIT_NOFILE);
1399 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1400 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1402 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1407 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1409 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1410 bool fixed = !!sock->file_slot;
1415 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1416 if (unlikely(fd < 0))
1419 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1423 ret = PTR_ERR(file);
1424 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1426 if (ret == -ERESTARTSYS)
1429 } else if (!fixed) {
1430 fd_install(fd, file);
1433 ret = io_fixed_fd_install(req, issue_flags, file,
1436 io_req_set_res(req, ret, 0);
1440 int io_connect_prep_async(struct io_kiocb *req)
1442 struct io_async_connect *io = req->async_data;
1443 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1445 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1448 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1450 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1452 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1455 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1456 conn->addr_len = READ_ONCE(sqe->addr2);
1457 conn->in_progress = conn->seen_econnaborted = false;
1461 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1463 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1464 struct io_async_connect __io, *io;
1465 unsigned file_flags;
1467 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1469 if (req_has_async_data(req)) {
1470 io = req->async_data;
1472 ret = move_addr_to_kernel(connect->addr,
1480 file_flags = force_nonblock ? O_NONBLOCK : 0;
1482 ret = __sys_connect_file(req->file, &io->address,
1483 connect->addr_len, file_flags);
1484 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1485 && force_nonblock) {
1486 if (ret == -EINPROGRESS) {
1487 connect->in_progress = true;
1488 } else if (ret == -ECONNABORTED) {
1489 if (connect->seen_econnaborted)
1491 connect->seen_econnaborted = true;
1493 if (req_has_async_data(req))
1495 if (io_alloc_async_data(req)) {
1499 memcpy(req->async_data, &__io, sizeof(__io));
1502 if (connect->in_progress) {
1504 * At least bluetooth will return -EBADFD on a re-connect
1505 * attempt, and it's (supposedly) also valid to get -EISCONN
1506 * which means the previous result is good. For both of these,
1507 * grab the sock_error() and use that for the completion.
1509 if (ret == -EBADFD || ret == -EISCONN)
1510 ret = sock_error(sock_from_file(req->file)->sk);
1512 if (ret == -ERESTARTSYS)
1517 io_req_set_res(req, ret, 0);
1521 void io_netmsg_cache_free(struct io_cache_entry *entry)
1523 kfree(container_of(entry, struct io_async_msghdr, cache));