1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
55 struct compat_msghdr __user *umsg_compat;
56 struct user_msghdr __user *umsg;
63 /* initialised and used only by !msg send variants */
66 /* used only for send zerocopy */
67 struct io_kiocb *notif;
70 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
72 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
74 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
76 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
77 sqe->buf_index || sqe->splice_fd_in))
80 shutdown->how = READ_ONCE(sqe->len);
84 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
86 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
90 if (issue_flags & IO_URING_F_NONBLOCK)
93 sock = sock_from_file(req->file);
97 ret = __sys_shutdown_sock(sock, shutdown->how);
98 io_req_set_res(req, ret, 0);
102 static bool io_net_retry(struct socket *sock, int flags)
104 if (!(flags & MSG_WAITALL))
106 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
109 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
111 struct io_async_msghdr *hdr = req->async_data;
113 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
116 /* Let normal cleanup path reap it if we fail adding to the cache */
117 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
118 req->async_data = NULL;
119 req->flags &= ~REQ_F_ASYNC_DATA;
123 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
124 unsigned int issue_flags)
126 struct io_ring_ctx *ctx = req->ctx;
127 struct io_cache_entry *entry;
128 struct io_async_msghdr *hdr;
130 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
131 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
132 hdr = container_of(entry, struct io_async_msghdr, cache);
133 hdr->free_iov = NULL;
134 req->flags |= REQ_F_ASYNC_DATA;
135 req->async_data = hdr;
139 if (!io_alloc_async_data(req)) {
140 hdr = req->async_data;
141 hdr->free_iov = NULL;
147 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
149 /* ->prep_async is always called from the submission context */
150 return io_msg_alloc_async(req, 0);
153 static int io_setup_async_msg(struct io_kiocb *req,
154 struct io_async_msghdr *kmsg,
155 unsigned int issue_flags)
157 struct io_async_msghdr *async_msg;
159 if (req_has_async_data(req))
161 async_msg = io_msg_alloc_async(req, issue_flags);
163 kfree(kmsg->free_iov);
166 req->flags |= REQ_F_NEED_CLEANUP;
167 memcpy(async_msg, kmsg, sizeof(*kmsg));
168 if (async_msg->msg.msg_name)
169 async_msg->msg.msg_name = &async_msg->addr;
170 /* if were using fast_iov, set it to the new one */
171 if (!kmsg->free_iov) {
172 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
173 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
179 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
180 struct io_async_msghdr *iomsg)
182 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
184 iomsg->msg.msg_name = &iomsg->addr;
185 iomsg->free_iov = iomsg->fast_iov;
186 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
190 int io_send_prep_async(struct io_kiocb *req)
192 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
193 struct io_async_msghdr *io;
196 if (!zc->addr || req_has_async_data(req))
198 io = io_msg_alloc_async_prep(req);
201 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
205 static int io_setup_async_addr(struct io_kiocb *req,
206 struct sockaddr_storage *addr_storage,
207 unsigned int issue_flags)
209 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
210 struct io_async_msghdr *io;
212 if (!sr->addr || req_has_async_data(req))
214 io = io_msg_alloc_async(req, issue_flags);
217 memcpy(&io->addr, addr_storage, sizeof(io->addr));
221 int io_sendmsg_prep_async(struct io_kiocb *req)
225 if (!io_msg_alloc_async_prep(req))
227 ret = io_sendmsg_copy_hdr(req, req->async_data);
229 req->flags |= REQ_F_NEED_CLEANUP;
233 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
235 struct io_async_msghdr *io = req->async_data;
240 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
242 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
244 if (req->opcode == IORING_OP_SEND) {
245 if (READ_ONCE(sqe->__pad3[0]))
247 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
248 sr->addr_len = READ_ONCE(sqe->addr_len);
249 } else if (sqe->addr2 || sqe->file_index) {
253 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
254 sr->len = READ_ONCE(sqe->len);
255 sr->flags = READ_ONCE(sqe->ioprio);
256 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
258 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
259 if (sr->msg_flags & MSG_DONTWAIT)
260 req->flags |= REQ_F_NOWAIT;
263 if (req->ctx->compat)
264 sr->msg_flags |= MSG_CMSG_COMPAT;
270 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
272 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
273 struct io_async_msghdr iomsg, *kmsg;
279 sock = sock_from_file(req->file);
283 if (req_has_async_data(req)) {
284 kmsg = req->async_data;
286 ret = io_sendmsg_copy_hdr(req, &iomsg);
292 if (!(req->flags & REQ_F_POLLED) &&
293 (sr->flags & IORING_RECVSEND_POLL_FIRST))
294 return io_setup_async_msg(req, kmsg, issue_flags);
296 flags = sr->msg_flags;
297 if (issue_flags & IO_URING_F_NONBLOCK)
298 flags |= MSG_DONTWAIT;
299 if (flags & MSG_WAITALL)
300 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
302 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
305 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
306 return io_setup_async_msg(req, kmsg, issue_flags);
307 if (ret > 0 && io_net_retry(sock, flags)) {
309 req->flags |= REQ_F_PARTIAL_IO;
310 return io_setup_async_msg(req, kmsg, issue_flags);
312 if (ret == -ERESTARTSYS)
316 /* fast path, check for non-NULL to avoid function call */
318 kfree(kmsg->free_iov);
319 req->flags &= ~REQ_F_NEED_CLEANUP;
320 io_netmsg_recycle(req, issue_flags);
323 else if (sr->done_io)
325 io_req_set_res(req, ret, 0);
329 int io_send(struct io_kiocb *req, unsigned int issue_flags)
331 struct sockaddr_storage __address;
332 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
341 msg.msg_control = NULL;
342 msg.msg_controllen = 0;
347 if (req_has_async_data(req)) {
348 struct io_async_msghdr *io = req->async_data;
350 msg.msg_name = &io->addr;
352 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
353 if (unlikely(ret < 0))
355 msg.msg_name = (struct sockaddr *)&__address;
357 msg.msg_namelen = sr->addr_len;
360 if (!(req->flags & REQ_F_POLLED) &&
361 (sr->flags & IORING_RECVSEND_POLL_FIRST))
362 return io_setup_async_addr(req, &__address, issue_flags);
364 sock = sock_from_file(req->file);
368 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
372 flags = sr->msg_flags;
373 if (issue_flags & IO_URING_F_NONBLOCK)
374 flags |= MSG_DONTWAIT;
375 if (flags & MSG_WAITALL)
376 min_ret = iov_iter_count(&msg.msg_iter);
378 msg.msg_flags = flags;
379 ret = sock_sendmsg(sock, &msg);
381 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
382 return io_setup_async_addr(req, &__address, issue_flags);
384 if (ret > 0 && io_net_retry(sock, flags)) {
388 req->flags |= REQ_F_PARTIAL_IO;
389 return io_setup_async_addr(req, &__address, issue_flags);
391 if (ret == -ERESTARTSYS)
397 else if (sr->done_io)
399 io_req_set_res(req, ret, 0);
403 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
407 if (iomsg->namelen < 0)
409 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
410 iomsg->namelen, &hdr))
412 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
418 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
419 struct io_async_msghdr *iomsg)
421 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
422 struct user_msghdr msg;
425 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
428 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
432 if (req->flags & REQ_F_BUFFER_SELECT) {
433 if (msg.msg_iovlen == 0) {
434 sr->len = iomsg->fast_iov[0].iov_len = 0;
435 iomsg->fast_iov[0].iov_base = NULL;
436 iomsg->free_iov = NULL;
437 } else if (msg.msg_iovlen > 1) {
440 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
442 sr->len = iomsg->fast_iov[0].iov_len;
443 iomsg->free_iov = NULL;
446 if (req->flags & REQ_F_APOLL_MULTISHOT) {
447 iomsg->namelen = msg.msg_namelen;
448 iomsg->controllen = msg.msg_controllen;
449 if (io_recvmsg_multishot_overflow(iomsg))
453 iomsg->free_iov = iomsg->fast_iov;
454 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
455 &iomsg->free_iov, &iomsg->msg.msg_iter,
465 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
466 struct io_async_msghdr *iomsg)
468 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
469 struct compat_msghdr msg;
470 struct compat_iovec __user *uiov;
473 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
476 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
480 uiov = compat_ptr(msg.msg_iov);
481 if (req->flags & REQ_F_BUFFER_SELECT) {
484 if (msg.msg_iovlen == 0) {
486 } else if (msg.msg_iovlen > 1) {
489 if (!access_ok(uiov, sizeof(*uiov)))
491 if (__get_user(clen, &uiov->iov_len))
498 if (req->flags & REQ_F_APOLL_MULTISHOT) {
499 iomsg->namelen = msg.msg_namelen;
500 iomsg->controllen = msg.msg_controllen;
501 if (io_recvmsg_multishot_overflow(iomsg))
505 iomsg->free_iov = iomsg->fast_iov;
506 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
507 UIO_FASTIOV, &iomsg->free_iov,
508 &iomsg->msg.msg_iter, true);
517 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
518 struct io_async_msghdr *iomsg)
520 iomsg->msg.msg_name = &iomsg->addr;
523 if (req->ctx->compat)
524 return __io_compat_recvmsg_copy_hdr(req, iomsg);
527 return __io_recvmsg_copy_hdr(req, iomsg);
530 int io_recvmsg_prep_async(struct io_kiocb *req)
534 if (!io_msg_alloc_async_prep(req))
536 ret = io_recvmsg_copy_hdr(req, req->async_data);
538 req->flags |= REQ_F_NEED_CLEANUP;
542 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
544 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
546 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
548 if (unlikely(sqe->file_index || sqe->addr2))
551 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
552 sr->len = READ_ONCE(sqe->len);
553 sr->flags = READ_ONCE(sqe->ioprio);
554 if (sr->flags & ~(RECVMSG_FLAGS))
556 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
557 if (sr->msg_flags & MSG_DONTWAIT)
558 req->flags |= REQ_F_NOWAIT;
559 if (sr->msg_flags & MSG_ERRQUEUE)
560 req->flags |= REQ_F_CLEAR_POLLIN;
561 if (sr->flags & IORING_RECV_MULTISHOT) {
562 if (!(req->flags & REQ_F_BUFFER_SELECT))
564 if (sr->msg_flags & MSG_WAITALL)
566 if (req->opcode == IORING_OP_RECV && sr->len)
568 req->flags |= REQ_F_APOLL_MULTISHOT;
572 if (req->ctx->compat)
573 sr->msg_flags |= MSG_CMSG_COMPAT;
579 static inline void io_recv_prep_retry(struct io_kiocb *req)
581 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
584 sr->len = 0; /* get from the provided buffer */
588 * Finishes io_recv and io_recvmsg.
590 * Returns true if it is actually finished, or false if it should run
591 * again (for multishot).
593 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
594 unsigned int cflags, bool mshot_finished)
596 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
597 io_req_set_res(req, *ret, cflags);
602 if (!mshot_finished) {
603 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
604 cflags | IORING_CQE_F_MORE, false)) {
605 io_recv_prep_retry(req);
609 * Otherwise stop multishot but use the current result.
610 * Probably will end up going into overflow, but this means
611 * we cannot trust the ordering anymore
615 io_req_set_res(req, *ret, cflags);
617 if (req->flags & REQ_F_POLLED)
618 *ret = IOU_STOP_MULTISHOT;
624 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
625 struct io_sr_msg *sr, void __user **buf,
628 unsigned long ubuf = (unsigned long) *buf;
631 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
636 if (kmsg->controllen) {
637 unsigned long control = ubuf + hdr - kmsg->controllen;
639 kmsg->msg.msg_control_user = (void __user *) control;
640 kmsg->msg.msg_controllen = kmsg->controllen;
643 sr->buf = *buf; /* stash for later copy */
644 *buf = (void __user *) (ubuf + hdr);
645 kmsg->payloadlen = *len = *len - hdr;
649 struct io_recvmsg_multishot_hdr {
650 struct io_uring_recvmsg_out msg;
651 struct sockaddr_storage addr;
654 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
655 struct io_async_msghdr *kmsg,
656 unsigned int flags, bool *finished)
660 struct io_recvmsg_multishot_hdr hdr;
663 kmsg->msg.msg_name = &hdr.addr;
664 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
665 kmsg->msg.msg_namelen = 0;
667 if (sock->file->f_flags & O_NONBLOCK)
668 flags |= MSG_DONTWAIT;
670 err = sock_recvmsg(sock, &kmsg->msg, flags);
671 *finished = err <= 0;
675 hdr.msg = (struct io_uring_recvmsg_out) {
676 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
677 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
680 hdr.msg.payloadlen = err;
681 if (err > kmsg->payloadlen)
682 err = kmsg->payloadlen;
684 copy_len = sizeof(struct io_uring_recvmsg_out);
685 if (kmsg->msg.msg_namelen > kmsg->namelen)
686 copy_len += kmsg->namelen;
688 copy_len += kmsg->msg.msg_namelen;
691 * "fromlen shall refer to the value before truncation.."
694 hdr.msg.namelen = kmsg->msg.msg_namelen;
696 /* ensure that there is no gap between hdr and sockaddr_storage */
697 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
698 sizeof(struct io_uring_recvmsg_out));
699 if (copy_to_user(io->buf, &hdr, copy_len)) {
704 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
705 kmsg->controllen + err;
708 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
710 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
711 struct io_async_msghdr iomsg, *kmsg;
715 int ret, min_ret = 0;
716 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
717 bool mshot_finished = true;
719 sock = sock_from_file(req->file);
723 if (req_has_async_data(req)) {
724 kmsg = req->async_data;
726 ret = io_recvmsg_copy_hdr(req, &iomsg);
732 if (!(req->flags & REQ_F_POLLED) &&
733 (sr->flags & IORING_RECVSEND_POLL_FIRST))
734 return io_setup_async_msg(req, kmsg, issue_flags);
737 if (io_do_buffer_select(req)) {
739 size_t len = sr->len;
741 buf = io_buffer_select(req, &len, issue_flags);
745 if (req->flags & REQ_F_APOLL_MULTISHOT) {
746 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
748 io_kbuf_recycle(req, issue_flags);
753 kmsg->fast_iov[0].iov_base = buf;
754 kmsg->fast_iov[0].iov_len = len;
755 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
759 flags = sr->msg_flags;
761 flags |= MSG_DONTWAIT;
762 if (flags & MSG_WAITALL)
763 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
765 kmsg->msg.msg_get_inq = 1;
766 if (req->flags & REQ_F_APOLL_MULTISHOT)
767 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
770 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
774 if (ret == -EAGAIN && force_nonblock) {
775 ret = io_setup_async_msg(req, kmsg, issue_flags);
776 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
777 IO_APOLL_MULTI_POLLED) {
778 io_kbuf_recycle(req, issue_flags);
779 return IOU_ISSUE_SKIP_COMPLETE;
783 if (ret > 0 && io_net_retry(sock, flags)) {
785 req->flags |= REQ_F_PARTIAL_IO;
786 return io_setup_async_msg(req, kmsg, issue_flags);
788 if (ret == -ERESTARTSYS)
791 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
797 else if (sr->done_io)
800 io_kbuf_recycle(req, issue_flags);
802 cflags = io_put_kbuf(req, issue_flags);
803 if (kmsg->msg.msg_inq)
804 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
806 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
807 goto retry_multishot;
809 if (mshot_finished) {
810 io_netmsg_recycle(req, issue_flags);
811 /* fast path, check for non-NULL to avoid function call */
813 kfree(kmsg->free_iov);
814 req->flags &= ~REQ_F_NEED_CLEANUP;
820 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
822 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
828 int ret, min_ret = 0;
829 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
830 size_t len = sr->len;
832 if (!(req->flags & REQ_F_POLLED) &&
833 (sr->flags & IORING_RECVSEND_POLL_FIRST))
836 sock = sock_from_file(req->file);
841 if (io_do_buffer_select(req)) {
844 buf = io_buffer_select(req, &len, issue_flags);
850 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
856 msg.msg_control = NULL;
859 msg.msg_controllen = 0;
863 flags = sr->msg_flags;
865 flags |= MSG_DONTWAIT;
866 if (flags & MSG_WAITALL)
867 min_ret = iov_iter_count(&msg.msg_iter);
869 ret = sock_recvmsg(sock, &msg, flags);
871 if (ret == -EAGAIN && force_nonblock) {
872 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
873 io_kbuf_recycle(req, issue_flags);
874 return IOU_ISSUE_SKIP_COMPLETE;
879 if (ret > 0 && io_net_retry(sock, flags)) {
883 req->flags |= REQ_F_PARTIAL_IO;
886 if (ret == -ERESTARTSYS)
889 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
896 else if (sr->done_io)
899 io_kbuf_recycle(req, issue_flags);
901 cflags = io_put_kbuf(req, issue_flags);
903 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
905 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
906 goto retry_multishot;
911 void io_send_zc_cleanup(struct io_kiocb *req)
913 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
914 struct io_async_msghdr *io;
916 if (req_has_async_data(req)) {
917 io = req->async_data;
918 /* might be ->fast_iov if *msg_copy_hdr failed */
919 if (io->free_iov != io->fast_iov)
923 io_notif_flush(zc->notif);
928 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
930 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
931 struct io_ring_ctx *ctx = req->ctx;
932 struct io_kiocb *notif;
934 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
936 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
937 if (req->flags & REQ_F_CQE_SKIP)
940 zc->flags = READ_ONCE(sqe->ioprio);
941 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
942 IORING_RECVSEND_FIXED_BUF))
944 notif = zc->notif = io_alloc_notif(ctx);
947 notif->cqe.user_data = req->cqe.user_data;
949 notif->cqe.flags = IORING_CQE_F_NOTIF;
950 req->flags |= REQ_F_NEED_CLEANUP;
951 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
952 unsigned idx = READ_ONCE(sqe->buf_index);
954 if (unlikely(idx >= ctx->nr_user_bufs))
956 idx = array_index_nospec(idx, ctx->nr_user_bufs);
957 req->imu = READ_ONCE(ctx->user_bufs[idx]);
958 io_req_set_rsrc_node(notif, ctx, 0);
961 if (req->opcode == IORING_OP_SEND_ZC) {
962 if (READ_ONCE(sqe->__pad3[0]))
964 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
965 zc->addr_len = READ_ONCE(sqe->addr_len);
967 if (unlikely(sqe->addr2 || sqe->file_index))
969 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
973 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
974 zc->len = READ_ONCE(sqe->len);
975 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
976 if (zc->msg_flags & MSG_DONTWAIT)
977 req->flags |= REQ_F_NOWAIT;
982 if (req->ctx->compat)
983 zc->msg_flags |= MSG_CMSG_COMPAT;
988 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
989 struct iov_iter *from, size_t length)
991 skb_zcopy_downgrade_managed(skb);
992 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
995 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
996 struct iov_iter *from, size_t length)
998 struct skb_shared_info *shinfo = skb_shinfo(skb);
999 int frag = shinfo->nr_frags;
1001 struct bvec_iter bi;
1003 unsigned long truesize = 0;
1006 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1007 else if (unlikely(!skb_zcopy_managed(skb)))
1008 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1010 bi.bi_size = min(from->count, length);
1011 bi.bi_bvec_done = from->iov_offset;
1014 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1015 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1018 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1019 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1020 v.bv_offset, v.bv_len);
1021 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1026 shinfo->nr_frags = frag;
1027 from->bvec += bi.bi_idx;
1028 from->nr_segs -= bi.bi_idx;
1029 from->count -= copied;
1030 from->iov_offset = bi.bi_bvec_done;
1032 skb->data_len += copied;
1034 skb->truesize += truesize;
1036 if (sk && sk->sk_type == SOCK_STREAM) {
1037 sk_wmem_queued_add(sk, truesize);
1038 if (!skb_zcopy_pure(skb))
1039 sk_mem_charge(sk, truesize);
1041 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1046 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1048 struct sockaddr_storage __address;
1049 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1052 struct socket *sock;
1054 int ret, min_ret = 0;
1056 sock = sock_from_file(req->file);
1057 if (unlikely(!sock))
1059 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1062 msg.msg_name = NULL;
1063 msg.msg_control = NULL;
1064 msg.msg_controllen = 0;
1065 msg.msg_namelen = 0;
1068 if (req_has_async_data(req)) {
1069 struct io_async_msghdr *io = req->async_data;
1071 msg.msg_name = &io->addr;
1073 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1074 if (unlikely(ret < 0))
1076 msg.msg_name = (struct sockaddr *)&__address;
1078 msg.msg_namelen = zc->addr_len;
1081 if (!(req->flags & REQ_F_POLLED) &&
1082 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1083 return io_setup_async_addr(req, &__address, issue_flags);
1085 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1086 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
1087 (u64)(uintptr_t)zc->buf, zc->len);
1090 msg.sg_from_iter = io_sg_from_iter;
1092 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
1096 ret = io_notif_account_mem(zc->notif, zc->len);
1099 msg.sg_from_iter = io_sg_from_iter_iovec;
1102 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1103 if (issue_flags & IO_URING_F_NONBLOCK)
1104 msg_flags |= MSG_DONTWAIT;
1105 if (msg_flags & MSG_WAITALL)
1106 min_ret = iov_iter_count(&msg.msg_iter);
1108 msg.msg_flags = msg_flags;
1109 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1110 ret = sock_sendmsg(sock, &msg);
1112 if (unlikely(ret < min_ret)) {
1113 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1114 return io_setup_async_addr(req, &__address, issue_flags);
1116 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1120 req->flags |= REQ_F_PARTIAL_IO;
1121 return io_setup_async_addr(req, &__address, issue_flags);
1123 if (ret == -ERESTARTSYS)
1130 else if (zc->done_io)
1134 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1135 * flushing notif to io_send_zc_cleanup()
1137 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1138 io_notif_flush(zc->notif);
1139 req->flags &= ~REQ_F_NEED_CLEANUP;
1141 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1145 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1147 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1148 struct io_async_msghdr iomsg, *kmsg;
1149 struct socket *sock;
1151 int ret, min_ret = 0;
1153 sock = sock_from_file(req->file);
1154 if (unlikely(!sock))
1156 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1159 if (req_has_async_data(req)) {
1160 kmsg = req->async_data;
1162 ret = io_sendmsg_copy_hdr(req, &iomsg);
1168 if (!(req->flags & REQ_F_POLLED) &&
1169 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1170 return io_setup_async_msg(req, kmsg, issue_flags);
1172 flags = sr->msg_flags | MSG_ZEROCOPY;
1173 if (issue_flags & IO_URING_F_NONBLOCK)
1174 flags |= MSG_DONTWAIT;
1175 if (flags & MSG_WAITALL)
1176 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1178 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1179 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1180 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1182 if (unlikely(ret < min_ret)) {
1183 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1184 return io_setup_async_msg(req, kmsg, issue_flags);
1186 if (ret > 0 && io_net_retry(sock, flags)) {
1188 req->flags |= REQ_F_PARTIAL_IO;
1189 return io_setup_async_msg(req, kmsg, issue_flags);
1191 if (ret == -ERESTARTSYS)
1195 /* fast path, check for non-NULL to avoid function call */
1196 if (kmsg->free_iov) {
1197 kfree(kmsg->free_iov);
1198 kmsg->free_iov = NULL;
1201 io_netmsg_recycle(req, issue_flags);
1204 else if (sr->done_io)
1208 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1209 * flushing notif to io_send_zc_cleanup()
1211 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1212 io_notif_flush(sr->notif);
1213 req->flags &= ~REQ_F_NEED_CLEANUP;
1215 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1219 void io_sendrecv_fail(struct io_kiocb *req)
1221 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1223 if (req->flags & REQ_F_PARTIAL_IO)
1224 req->cqe.res = sr->done_io;
1226 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1227 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1228 req->cqe.flags |= IORING_CQE_F_MORE;
1231 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1233 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1236 if (sqe->len || sqe->buf_index)
1239 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1240 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1241 accept->flags = READ_ONCE(sqe->accept_flags);
1242 accept->nofile = rlimit(RLIMIT_NOFILE);
1243 flags = READ_ONCE(sqe->ioprio);
1244 if (flags & ~IORING_ACCEPT_MULTISHOT)
1247 accept->file_slot = READ_ONCE(sqe->file_index);
1248 if (accept->file_slot) {
1249 if (accept->flags & SOCK_CLOEXEC)
1251 if (flags & IORING_ACCEPT_MULTISHOT &&
1252 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1255 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1257 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1258 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1259 if (flags & IORING_ACCEPT_MULTISHOT)
1260 req->flags |= REQ_F_APOLL_MULTISHOT;
1264 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1266 struct io_ring_ctx *ctx = req->ctx;
1267 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1268 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1269 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1270 bool fixed = !!accept->file_slot;
1276 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1277 if (unlikely(fd < 0))
1280 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1285 ret = PTR_ERR(file);
1286 if (ret == -EAGAIN && force_nonblock) {
1288 * if it's multishot and polled, we don't need to
1289 * return EAGAIN to arm the poll infra since it
1290 * has already been done
1292 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1293 IO_APOLL_MULTI_POLLED)
1294 ret = IOU_ISSUE_SKIP_COMPLETE;
1297 if (ret == -ERESTARTSYS)
1300 } else if (!fixed) {
1301 fd_install(fd, file);
1304 ret = io_fixed_fd_install(req, issue_flags, file,
1308 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1309 io_req_set_res(req, ret, 0);
1314 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1317 io_req_set_res(req, ret, 0);
1318 if (req->flags & REQ_F_POLLED)
1319 return IOU_STOP_MULTISHOT;
1323 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1325 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1327 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1330 sock->domain = READ_ONCE(sqe->fd);
1331 sock->type = READ_ONCE(sqe->off);
1332 sock->protocol = READ_ONCE(sqe->len);
1333 sock->file_slot = READ_ONCE(sqe->file_index);
1334 sock->nofile = rlimit(RLIMIT_NOFILE);
1336 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1337 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1339 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1344 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1346 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1347 bool fixed = !!sock->file_slot;
1352 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1353 if (unlikely(fd < 0))
1356 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1360 ret = PTR_ERR(file);
1361 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1363 if (ret == -ERESTARTSYS)
1366 } else if (!fixed) {
1367 fd_install(fd, file);
1370 ret = io_fixed_fd_install(req, issue_flags, file,
1373 io_req_set_res(req, ret, 0);
1377 int io_connect_prep_async(struct io_kiocb *req)
1379 struct io_async_connect *io = req->async_data;
1380 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1382 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1385 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1387 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1389 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1392 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1393 conn->addr_len = READ_ONCE(sqe->addr2);
1394 conn->in_progress = false;
1398 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1400 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1401 struct io_async_connect __io, *io;
1402 unsigned file_flags;
1404 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1406 if (connect->in_progress) {
1407 struct socket *socket;
1410 socket = sock_from_file(req->file);
1412 ret = sock_error(socket->sk);
1416 if (req_has_async_data(req)) {
1417 io = req->async_data;
1419 ret = move_addr_to_kernel(connect->addr,
1427 file_flags = force_nonblock ? O_NONBLOCK : 0;
1429 ret = __sys_connect_file(req->file, &io->address,
1430 connect->addr_len, file_flags);
1431 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1432 if (ret == -EINPROGRESS) {
1433 connect->in_progress = true;
1435 if (req_has_async_data(req))
1437 if (io_alloc_async_data(req)) {
1441 memcpy(req->async_data, &__io, sizeof(__io));
1445 if (ret == -ERESTARTSYS)
1450 io_req_set_res(req, ret, 0);
1454 void io_netmsg_cache_free(struct io_cache_entry *entry)
1456 kfree(container_of(entry, struct io_async_msghdr, cache));