1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
55 struct compat_msghdr __user *umsg_compat;
56 struct user_msghdr __user *umsg;
63 /* initialised and used only by !msg send variants */
67 /* used only for send zerocopy */
68 struct io_kiocb *notif;
71 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
73 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
75 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
76 sqe->buf_index || sqe->splice_fd_in))
79 shutdown->how = READ_ONCE(sqe->len);
83 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
85 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
89 if (issue_flags & IO_URING_F_NONBLOCK)
92 sock = sock_from_file(req->file);
96 ret = __sys_shutdown_sock(sock, shutdown->how);
97 io_req_set_res(req, ret, 0);
101 static bool io_net_retry(struct socket *sock, int flags)
103 if (!(flags & MSG_WAITALL))
105 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
108 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
110 struct io_async_msghdr *hdr = req->async_data;
112 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
115 /* Let normal cleanup path reap it if we fail adding to the cache */
116 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
117 req->async_data = NULL;
118 req->flags &= ~REQ_F_ASYNC_DATA;
122 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
123 unsigned int issue_flags)
125 struct io_ring_ctx *ctx = req->ctx;
126 struct io_cache_entry *entry;
127 struct io_async_msghdr *hdr;
129 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
130 entry = io_alloc_cache_get(&ctx->netmsg_cache);
132 hdr = container_of(entry, struct io_async_msghdr, cache);
133 hdr->free_iov = NULL;
134 req->flags |= REQ_F_ASYNC_DATA;
135 req->async_data = hdr;
140 if (!io_alloc_async_data(req)) {
141 hdr = req->async_data;
142 hdr->free_iov = NULL;
148 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
150 /* ->prep_async is always called from the submission context */
151 return io_msg_alloc_async(req, 0);
154 static int io_setup_async_msg(struct io_kiocb *req,
155 struct io_async_msghdr *kmsg,
156 unsigned int issue_flags)
158 struct io_async_msghdr *async_msg;
160 if (req_has_async_data(req))
162 async_msg = io_msg_alloc_async(req, issue_flags);
164 kfree(kmsg->free_iov);
167 req->flags |= REQ_F_NEED_CLEANUP;
168 memcpy(async_msg, kmsg, sizeof(*kmsg));
169 if (async_msg->msg.msg_name)
170 async_msg->msg.msg_name = &async_msg->addr;
171 /* if were using fast_iov, set it to the new one */
172 if (!kmsg->free_iov) {
173 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
174 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
180 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
181 struct io_async_msghdr *iomsg)
183 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
185 iomsg->msg.msg_name = &iomsg->addr;
186 iomsg->free_iov = iomsg->fast_iov;
187 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
191 int io_send_prep_async(struct io_kiocb *req)
193 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
194 struct io_async_msghdr *io;
197 if (!zc->addr || req_has_async_data(req))
199 io = io_msg_alloc_async_prep(req);
202 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
206 static int io_setup_async_addr(struct io_kiocb *req,
207 struct sockaddr_storage *addr_storage,
208 unsigned int issue_flags)
210 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
211 struct io_async_msghdr *io;
213 if (!sr->addr || req_has_async_data(req))
215 io = io_msg_alloc_async(req, issue_flags);
218 memcpy(&io->addr, addr_storage, sizeof(io->addr));
222 int io_sendmsg_prep_async(struct io_kiocb *req)
226 if (!io_msg_alloc_async_prep(req))
228 ret = io_sendmsg_copy_hdr(req, req->async_data);
230 req->flags |= REQ_F_NEED_CLEANUP;
234 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
236 struct io_async_msghdr *io = req->async_data;
241 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
243 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
245 if (req->opcode == IORING_OP_SEND) {
246 if (READ_ONCE(sqe->__pad3[0]))
248 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
249 sr->addr_len = READ_ONCE(sqe->addr_len);
250 } else if (sqe->addr2 || sqe->file_index) {
254 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
255 sr->len = READ_ONCE(sqe->len);
256 sr->flags = READ_ONCE(sqe->ioprio);
257 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
259 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
260 if (sr->msg_flags & MSG_DONTWAIT)
261 req->flags |= REQ_F_NOWAIT;
264 if (req->ctx->compat)
265 sr->msg_flags |= MSG_CMSG_COMPAT;
271 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
273 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
274 struct io_async_msghdr iomsg, *kmsg;
280 sock = sock_from_file(req->file);
284 if (req_has_async_data(req)) {
285 kmsg = req->async_data;
287 ret = io_sendmsg_copy_hdr(req, &iomsg);
293 if (!(req->flags & REQ_F_POLLED) &&
294 (sr->flags & IORING_RECVSEND_POLL_FIRST))
295 return io_setup_async_msg(req, kmsg, issue_flags);
297 flags = sr->msg_flags;
298 if (issue_flags & IO_URING_F_NONBLOCK)
299 flags |= MSG_DONTWAIT;
300 if (flags & MSG_WAITALL)
301 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
303 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
306 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
307 return io_setup_async_msg(req, kmsg, issue_flags);
308 if (ret > 0 && io_net_retry(sock, flags)) {
310 req->flags |= REQ_F_PARTIAL_IO;
311 return io_setup_async_msg(req, kmsg, issue_flags);
313 if (ret == -ERESTARTSYS)
317 /* fast path, check for non-NULL to avoid function call */
319 kfree(kmsg->free_iov);
320 req->flags &= ~REQ_F_NEED_CLEANUP;
321 io_netmsg_recycle(req, issue_flags);
324 else if (sr->done_io)
326 io_req_set_res(req, ret, 0);
330 int io_send(struct io_kiocb *req, unsigned int issue_flags)
332 struct sockaddr_storage __address;
333 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
342 msg.msg_control = NULL;
343 msg.msg_controllen = 0;
348 if (req_has_async_data(req)) {
349 struct io_async_msghdr *io = req->async_data;
351 msg.msg_name = &io->addr;
353 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
354 if (unlikely(ret < 0))
356 msg.msg_name = (struct sockaddr *)&__address;
358 msg.msg_namelen = sr->addr_len;
361 if (!(req->flags & REQ_F_POLLED) &&
362 (sr->flags & IORING_RECVSEND_POLL_FIRST))
363 return io_setup_async_addr(req, &__address, issue_flags);
365 sock = sock_from_file(req->file);
369 ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
373 flags = sr->msg_flags;
374 if (issue_flags & IO_URING_F_NONBLOCK)
375 flags |= MSG_DONTWAIT;
376 if (flags & MSG_WAITALL)
377 min_ret = iov_iter_count(&msg.msg_iter);
379 msg.msg_flags = flags;
380 ret = sock_sendmsg(sock, &msg);
382 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
383 return io_setup_async_addr(req, &__address, issue_flags);
385 if (ret > 0 && io_net_retry(sock, flags)) {
389 req->flags |= REQ_F_PARTIAL_IO;
390 return io_setup_async_addr(req, &__address, issue_flags);
392 if (ret == -ERESTARTSYS)
398 else if (sr->done_io)
400 io_req_set_res(req, ret, 0);
404 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
408 if (iomsg->namelen < 0)
410 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
411 iomsg->namelen, &hdr))
413 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
419 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
420 struct io_async_msghdr *iomsg)
422 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
423 struct user_msghdr msg;
426 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
429 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
433 if (req->flags & REQ_F_BUFFER_SELECT) {
434 if (msg.msg_iovlen == 0) {
435 sr->len = iomsg->fast_iov[0].iov_len = 0;
436 iomsg->fast_iov[0].iov_base = NULL;
437 iomsg->free_iov = NULL;
438 } else if (msg.msg_iovlen > 1) {
441 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
443 sr->len = iomsg->fast_iov[0].iov_len;
444 iomsg->free_iov = NULL;
447 if (req->flags & REQ_F_APOLL_MULTISHOT) {
448 iomsg->namelen = msg.msg_namelen;
449 iomsg->controllen = msg.msg_controllen;
450 if (io_recvmsg_multishot_overflow(iomsg))
454 iomsg->free_iov = iomsg->fast_iov;
455 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
456 &iomsg->free_iov, &iomsg->msg.msg_iter,
466 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
467 struct io_async_msghdr *iomsg)
469 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
470 struct compat_msghdr msg;
471 struct compat_iovec __user *uiov;
474 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
477 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
481 uiov = compat_ptr(msg.msg_iov);
482 if (req->flags & REQ_F_BUFFER_SELECT) {
485 iomsg->free_iov = NULL;
486 if (msg.msg_iovlen == 0) {
488 } else if (msg.msg_iovlen > 1) {
491 if (!access_ok(uiov, sizeof(*uiov)))
493 if (__get_user(clen, &uiov->iov_len))
500 if (req->flags & REQ_F_APOLL_MULTISHOT) {
501 iomsg->namelen = msg.msg_namelen;
502 iomsg->controllen = msg.msg_controllen;
503 if (io_recvmsg_multishot_overflow(iomsg))
507 iomsg->free_iov = iomsg->fast_iov;
508 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
509 UIO_FASTIOV, &iomsg->free_iov,
510 &iomsg->msg.msg_iter, true);
519 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
520 struct io_async_msghdr *iomsg)
522 iomsg->msg.msg_name = &iomsg->addr;
525 if (req->ctx->compat)
526 return __io_compat_recvmsg_copy_hdr(req, iomsg);
529 return __io_recvmsg_copy_hdr(req, iomsg);
532 int io_recvmsg_prep_async(struct io_kiocb *req)
536 if (!io_msg_alloc_async_prep(req))
538 ret = io_recvmsg_copy_hdr(req, req->async_data);
540 req->flags |= REQ_F_NEED_CLEANUP;
544 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
546 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
548 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
550 if (unlikely(sqe->file_index || sqe->addr2))
553 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
554 sr->len = READ_ONCE(sqe->len);
555 sr->flags = READ_ONCE(sqe->ioprio);
556 if (sr->flags & ~(RECVMSG_FLAGS))
558 sr->msg_flags = READ_ONCE(sqe->msg_flags);
559 if (sr->msg_flags & MSG_DONTWAIT)
560 req->flags |= REQ_F_NOWAIT;
561 if (sr->msg_flags & MSG_ERRQUEUE)
562 req->flags |= REQ_F_CLEAR_POLLIN;
563 if (sr->flags & IORING_RECV_MULTISHOT) {
564 if (!(req->flags & REQ_F_BUFFER_SELECT))
566 if (sr->msg_flags & MSG_WAITALL)
568 if (req->opcode == IORING_OP_RECV && sr->len)
570 req->flags |= REQ_F_APOLL_MULTISHOT;
572 * Store the buffer group for this multishot receive separately,
573 * as if we end up doing an io-wq based issue that selects a
574 * buffer, it has to be committed immediately and that will
575 * clear ->buf_list. This means we lose the link to the buffer
576 * list, and the eventual buffer put on completion then cannot
579 sr->buf_group = req->buf_index;
583 if (req->ctx->compat)
584 sr->msg_flags |= MSG_CMSG_COMPAT;
590 static inline void io_recv_prep_retry(struct io_kiocb *req)
592 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
595 sr->len = 0; /* get from the provided buffer */
596 req->buf_index = sr->buf_group;
600 * Finishes io_recv and io_recvmsg.
602 * Returns true if it is actually finished, or false if it should run
603 * again (for multishot).
605 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
606 unsigned int cflags, bool mshot_finished,
607 unsigned issue_flags)
609 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
610 io_req_set_res(req, *ret, cflags);
615 if (!mshot_finished) {
616 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
617 cflags | IORING_CQE_F_MORE, false)) {
618 io_recv_prep_retry(req);
622 * Otherwise stop multishot but use the current result.
623 * Probably will end up going into overflow, but this means
624 * we cannot trust the ordering anymore
628 io_req_set_res(req, *ret, cflags);
630 if (issue_flags & IO_URING_F_MULTISHOT)
631 *ret = IOU_STOP_MULTISHOT;
637 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
638 struct io_sr_msg *sr, void __user **buf,
641 unsigned long ubuf = (unsigned long) *buf;
644 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
649 if (kmsg->controllen) {
650 unsigned long control = ubuf + hdr - kmsg->controllen;
652 kmsg->msg.msg_control_user = (void __user *) control;
653 kmsg->msg.msg_controllen = kmsg->controllen;
656 sr->buf = *buf; /* stash for later copy */
657 *buf = (void __user *) (ubuf + hdr);
658 kmsg->payloadlen = *len = *len - hdr;
662 struct io_recvmsg_multishot_hdr {
663 struct io_uring_recvmsg_out msg;
664 struct sockaddr_storage addr;
667 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
668 struct io_async_msghdr *kmsg,
669 unsigned int flags, bool *finished)
673 struct io_recvmsg_multishot_hdr hdr;
676 kmsg->msg.msg_name = &hdr.addr;
677 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
678 kmsg->msg.msg_namelen = 0;
680 if (sock->file->f_flags & O_NONBLOCK)
681 flags |= MSG_DONTWAIT;
683 err = sock_recvmsg(sock, &kmsg->msg, flags);
684 *finished = err <= 0;
688 hdr.msg = (struct io_uring_recvmsg_out) {
689 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
690 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
693 hdr.msg.payloadlen = err;
694 if (err > kmsg->payloadlen)
695 err = kmsg->payloadlen;
697 copy_len = sizeof(struct io_uring_recvmsg_out);
698 if (kmsg->msg.msg_namelen > kmsg->namelen)
699 copy_len += kmsg->namelen;
701 copy_len += kmsg->msg.msg_namelen;
704 * "fromlen shall refer to the value before truncation.."
707 hdr.msg.namelen = kmsg->msg.msg_namelen;
709 /* ensure that there is no gap between hdr and sockaddr_storage */
710 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
711 sizeof(struct io_uring_recvmsg_out));
712 if (copy_to_user(io->buf, &hdr, copy_len)) {
717 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
718 kmsg->controllen + err;
721 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
723 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
724 struct io_async_msghdr iomsg, *kmsg;
728 int ret, min_ret = 0;
729 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
730 bool mshot_finished = true;
732 sock = sock_from_file(req->file);
736 if (req_has_async_data(req)) {
737 kmsg = req->async_data;
739 ret = io_recvmsg_copy_hdr(req, &iomsg);
745 if (!(req->flags & REQ_F_POLLED) &&
746 (sr->flags & IORING_RECVSEND_POLL_FIRST))
747 return io_setup_async_msg(req, kmsg, issue_flags);
750 if (io_do_buffer_select(req)) {
752 size_t len = sr->len;
754 buf = io_buffer_select(req, &len, issue_flags);
758 if (req->flags & REQ_F_APOLL_MULTISHOT) {
759 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
761 io_kbuf_recycle(req, issue_flags);
766 kmsg->fast_iov[0].iov_base = buf;
767 kmsg->fast_iov[0].iov_len = len;
768 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
772 flags = sr->msg_flags;
774 flags |= MSG_DONTWAIT;
775 if (flags & MSG_WAITALL)
776 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
778 kmsg->msg.msg_get_inq = 1;
779 if (req->flags & REQ_F_APOLL_MULTISHOT)
780 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
783 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
787 if (ret == -EAGAIN && force_nonblock) {
788 ret = io_setup_async_msg(req, kmsg, issue_flags);
789 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
790 io_kbuf_recycle(req, issue_flags);
791 return IOU_ISSUE_SKIP_COMPLETE;
795 if (ret > 0 && io_net_retry(sock, flags)) {
797 req->flags |= REQ_F_PARTIAL_IO;
798 return io_setup_async_msg(req, kmsg, issue_flags);
800 if (ret == -ERESTARTSYS)
803 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
809 else if (sr->done_io)
812 io_kbuf_recycle(req, issue_flags);
814 cflags = io_put_kbuf(req, issue_flags);
815 if (kmsg->msg.msg_inq)
816 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
818 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
819 goto retry_multishot;
821 if (mshot_finished) {
822 /* fast path, check for non-NULL to avoid function call */
824 kfree(kmsg->free_iov);
825 io_netmsg_recycle(req, issue_flags);
826 req->flags &= ~REQ_F_NEED_CLEANUP;
832 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
834 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
840 int ret, min_ret = 0;
841 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
842 size_t len = sr->len;
844 if (!(req->flags & REQ_F_POLLED) &&
845 (sr->flags & IORING_RECVSEND_POLL_FIRST))
848 sock = sock_from_file(req->file);
853 if (io_do_buffer_select(req)) {
856 buf = io_buffer_select(req, &len, issue_flags);
862 ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
868 msg.msg_control = NULL;
871 msg.msg_controllen = 0;
875 flags = sr->msg_flags;
877 flags |= MSG_DONTWAIT;
878 if (flags & MSG_WAITALL)
879 min_ret = iov_iter_count(&msg.msg_iter);
881 ret = sock_recvmsg(sock, &msg, flags);
883 if (ret == -EAGAIN && force_nonblock) {
884 if (issue_flags & IO_URING_F_MULTISHOT) {
885 io_kbuf_recycle(req, issue_flags);
886 return IOU_ISSUE_SKIP_COMPLETE;
891 if (ret > 0 && io_net_retry(sock, flags)) {
895 req->flags |= REQ_F_PARTIAL_IO;
898 if (ret == -ERESTARTSYS)
901 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
908 else if (sr->done_io)
911 io_kbuf_recycle(req, issue_flags);
913 cflags = io_put_kbuf(req, issue_flags);
915 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
917 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
918 goto retry_multishot;
923 void io_send_zc_cleanup(struct io_kiocb *req)
925 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
926 struct io_async_msghdr *io;
928 if (req_has_async_data(req)) {
929 io = req->async_data;
930 /* might be ->fast_iov if *msg_copy_hdr failed */
931 if (io->free_iov != io->fast_iov)
935 io_notif_flush(zc->notif);
940 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
942 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
943 struct io_ring_ctx *ctx = req->ctx;
944 struct io_kiocb *notif;
946 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
948 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
949 if (req->flags & REQ_F_CQE_SKIP)
952 zc->flags = READ_ONCE(sqe->ioprio);
953 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
954 IORING_RECVSEND_FIXED_BUF |
955 IORING_SEND_ZC_REPORT_USAGE))
957 notif = zc->notif = io_alloc_notif(ctx);
960 notif->cqe.user_data = req->cqe.user_data;
962 notif->cqe.flags = IORING_CQE_F_NOTIF;
963 req->flags |= REQ_F_NEED_CLEANUP;
964 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
965 unsigned idx = READ_ONCE(sqe->buf_index);
967 if (unlikely(idx >= ctx->nr_user_bufs))
969 idx = array_index_nospec(idx, ctx->nr_user_bufs);
970 req->imu = READ_ONCE(ctx->user_bufs[idx]);
971 io_req_set_rsrc_node(notif, ctx, 0);
973 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
974 io_notif_to_data(notif)->zc_report = true;
977 if (req->opcode == IORING_OP_SEND_ZC) {
978 if (READ_ONCE(sqe->__pad3[0]))
980 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
981 zc->addr_len = READ_ONCE(sqe->addr_len);
983 if (unlikely(sqe->addr2 || sqe->file_index))
985 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
989 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
990 zc->len = READ_ONCE(sqe->len);
991 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
992 if (zc->msg_flags & MSG_DONTWAIT)
993 req->flags |= REQ_F_NOWAIT;
998 if (req->ctx->compat)
999 zc->msg_flags |= MSG_CMSG_COMPAT;
1004 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1005 struct iov_iter *from, size_t length)
1007 skb_zcopy_downgrade_managed(skb);
1008 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1011 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1012 struct iov_iter *from, size_t length)
1014 struct skb_shared_info *shinfo = skb_shinfo(skb);
1015 int frag = shinfo->nr_frags;
1017 struct bvec_iter bi;
1019 unsigned long truesize = 0;
1022 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1023 else if (unlikely(!skb_zcopy_managed(skb)))
1024 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1026 bi.bi_size = min(from->count, length);
1027 bi.bi_bvec_done = from->iov_offset;
1030 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1031 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1034 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1035 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1036 v.bv_offset, v.bv_len);
1037 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1042 shinfo->nr_frags = frag;
1043 from->bvec += bi.bi_idx;
1044 from->nr_segs -= bi.bi_idx;
1045 from->count -= copied;
1046 from->iov_offset = bi.bi_bvec_done;
1048 skb->data_len += copied;
1050 skb->truesize += truesize;
1052 if (sk && sk->sk_type == SOCK_STREAM) {
1053 sk_wmem_queued_add(sk, truesize);
1054 if (!skb_zcopy_pure(skb))
1055 sk_mem_charge(sk, truesize);
1057 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1062 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1064 struct sockaddr_storage __address;
1065 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1068 struct socket *sock;
1070 int ret, min_ret = 0;
1072 sock = sock_from_file(req->file);
1073 if (unlikely(!sock))
1075 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1078 msg.msg_name = NULL;
1079 msg.msg_control = NULL;
1080 msg.msg_controllen = 0;
1081 msg.msg_namelen = 0;
1084 if (req_has_async_data(req)) {
1085 struct io_async_msghdr *io = req->async_data;
1087 msg.msg_name = &io->addr;
1089 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1090 if (unlikely(ret < 0))
1092 msg.msg_name = (struct sockaddr *)&__address;
1094 msg.msg_namelen = zc->addr_len;
1097 if (!(req->flags & REQ_F_POLLED) &&
1098 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1099 return io_setup_async_addr(req, &__address, issue_flags);
1101 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1102 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1103 (u64)(uintptr_t)zc->buf, zc->len);
1106 msg.sg_from_iter = io_sg_from_iter;
1108 ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1112 ret = io_notif_account_mem(zc->notif, zc->len);
1115 msg.sg_from_iter = io_sg_from_iter_iovec;
1118 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1119 if (issue_flags & IO_URING_F_NONBLOCK)
1120 msg_flags |= MSG_DONTWAIT;
1121 if (msg_flags & MSG_WAITALL)
1122 min_ret = iov_iter_count(&msg.msg_iter);
1124 msg.msg_flags = msg_flags;
1125 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1126 ret = sock_sendmsg(sock, &msg);
1128 if (unlikely(ret < min_ret)) {
1129 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1130 return io_setup_async_addr(req, &__address, issue_flags);
1132 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1136 req->flags |= REQ_F_PARTIAL_IO;
1137 return io_setup_async_addr(req, &__address, issue_flags);
1139 if (ret == -ERESTARTSYS)
1146 else if (zc->done_io)
1150 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1151 * flushing notif to io_send_zc_cleanup()
1153 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1154 io_notif_flush(zc->notif);
1155 req->flags &= ~REQ_F_NEED_CLEANUP;
1157 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1161 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1163 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1164 struct io_async_msghdr iomsg, *kmsg;
1165 struct socket *sock;
1167 int ret, min_ret = 0;
1169 sock = sock_from_file(req->file);
1170 if (unlikely(!sock))
1172 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1175 if (req_has_async_data(req)) {
1176 kmsg = req->async_data;
1178 ret = io_sendmsg_copy_hdr(req, &iomsg);
1184 if (!(req->flags & REQ_F_POLLED) &&
1185 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1186 return io_setup_async_msg(req, kmsg, issue_flags);
1188 flags = sr->msg_flags | MSG_ZEROCOPY;
1189 if (issue_flags & IO_URING_F_NONBLOCK)
1190 flags |= MSG_DONTWAIT;
1191 if (flags & MSG_WAITALL)
1192 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1194 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1195 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1196 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1198 if (unlikely(ret < min_ret)) {
1199 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1200 return io_setup_async_msg(req, kmsg, issue_flags);
1202 if (ret > 0 && io_net_retry(sock, flags)) {
1204 req->flags |= REQ_F_PARTIAL_IO;
1205 return io_setup_async_msg(req, kmsg, issue_flags);
1207 if (ret == -ERESTARTSYS)
1211 /* fast path, check for non-NULL to avoid function call */
1212 if (kmsg->free_iov) {
1213 kfree(kmsg->free_iov);
1214 kmsg->free_iov = NULL;
1217 io_netmsg_recycle(req, issue_flags);
1220 else if (sr->done_io)
1224 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1225 * flushing notif to io_send_zc_cleanup()
1227 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1228 io_notif_flush(sr->notif);
1229 req->flags &= ~REQ_F_NEED_CLEANUP;
1231 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1235 void io_sendrecv_fail(struct io_kiocb *req)
1237 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1239 if (req->flags & REQ_F_PARTIAL_IO)
1240 req->cqe.res = sr->done_io;
1242 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1243 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1244 req->cqe.flags |= IORING_CQE_F_MORE;
1247 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1249 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1252 if (sqe->len || sqe->buf_index)
1255 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1256 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1257 accept->flags = READ_ONCE(sqe->accept_flags);
1258 accept->nofile = rlimit(RLIMIT_NOFILE);
1259 flags = READ_ONCE(sqe->ioprio);
1260 if (flags & ~IORING_ACCEPT_MULTISHOT)
1263 accept->file_slot = READ_ONCE(sqe->file_index);
1264 if (accept->file_slot) {
1265 if (accept->flags & SOCK_CLOEXEC)
1267 if (flags & IORING_ACCEPT_MULTISHOT &&
1268 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1271 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1273 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1274 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1275 if (flags & IORING_ACCEPT_MULTISHOT)
1276 req->flags |= REQ_F_APOLL_MULTISHOT;
1280 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1282 struct io_ring_ctx *ctx = req->ctx;
1283 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1284 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1285 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1286 bool fixed = !!accept->file_slot;
1292 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1293 if (unlikely(fd < 0))
1296 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1301 ret = PTR_ERR(file);
1302 if (ret == -EAGAIN && force_nonblock) {
1304 * if it's multishot and polled, we don't need to
1305 * return EAGAIN to arm the poll infra since it
1306 * has already been done
1308 if (issue_flags & IO_URING_F_MULTISHOT)
1309 ret = IOU_ISSUE_SKIP_COMPLETE;
1312 if (ret == -ERESTARTSYS)
1315 } else if (!fixed) {
1316 fd_install(fd, file);
1319 ret = io_fixed_fd_install(req, issue_flags, file,
1323 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1324 io_req_set_res(req, ret, 0);
1329 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1332 io_req_set_res(req, ret, 0);
1333 return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
1336 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1338 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1340 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1343 sock->domain = READ_ONCE(sqe->fd);
1344 sock->type = READ_ONCE(sqe->off);
1345 sock->protocol = READ_ONCE(sqe->len);
1346 sock->file_slot = READ_ONCE(sqe->file_index);
1347 sock->nofile = rlimit(RLIMIT_NOFILE);
1349 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1350 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1352 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1357 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1359 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1360 bool fixed = !!sock->file_slot;
1365 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1366 if (unlikely(fd < 0))
1369 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1373 ret = PTR_ERR(file);
1374 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1376 if (ret == -ERESTARTSYS)
1379 } else if (!fixed) {
1380 fd_install(fd, file);
1383 ret = io_fixed_fd_install(req, issue_flags, file,
1386 io_req_set_res(req, ret, 0);
1390 int io_connect_prep_async(struct io_kiocb *req)
1392 struct io_async_connect *io = req->async_data;
1393 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1395 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1398 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1400 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1402 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1405 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1406 conn->addr_len = READ_ONCE(sqe->addr2);
1407 conn->in_progress = false;
1411 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1413 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1414 struct io_async_connect __io, *io;
1415 unsigned file_flags;
1417 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1419 if (connect->in_progress) {
1420 struct socket *socket;
1423 socket = sock_from_file(req->file);
1425 ret = sock_error(socket->sk);
1429 if (req_has_async_data(req)) {
1430 io = req->async_data;
1432 ret = move_addr_to_kernel(connect->addr,
1440 file_flags = force_nonblock ? O_NONBLOCK : 0;
1442 ret = __sys_connect_file(req->file, &io->address,
1443 connect->addr_len, file_flags);
1444 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1445 if (ret == -EINPROGRESS) {
1446 connect->in_progress = true;
1448 if (req_has_async_data(req))
1450 if (io_alloc_async_data(req)) {
1454 memcpy(req->async_data, &__io, sizeof(__io));
1458 if (ret == -ERESTARTSYS)
1463 io_req_set_res(req, ret, 0);
1467 void io_netmsg_cache_free(struct io_cache_entry *entry)
1469 kfree(container_of(entry, struct io_async_msghdr, cache));