1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
54 struct compat_msghdr __user *umsg_compat;
55 struct user_msghdr __user *umsg;
62 /* initialised and used only by !msg send variants */
65 /* used only for send zerocopy */
66 struct io_kiocb *notif;
69 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
71 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
73 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
75 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
76 sqe->buf_index || sqe->splice_fd_in))
79 shutdown->how = READ_ONCE(sqe->len);
83 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
85 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
89 if (issue_flags & IO_URING_F_NONBLOCK)
92 sock = sock_from_file(req->file);
96 ret = __sys_shutdown_sock(sock, shutdown->how);
97 io_req_set_res(req, ret, 0);
101 static bool io_net_retry(struct socket *sock, int flags)
103 if (!(flags & MSG_WAITALL))
105 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
108 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
110 struct io_async_msghdr *hdr = req->async_data;
112 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
115 /* Let normal cleanup path reap it if we fail adding to the cache */
116 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
117 req->async_data = NULL;
118 req->flags &= ~REQ_F_ASYNC_DATA;
122 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
123 unsigned int issue_flags)
125 struct io_ring_ctx *ctx = req->ctx;
126 struct io_cache_entry *entry;
127 struct io_async_msghdr *hdr;
129 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
130 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
131 hdr = container_of(entry, struct io_async_msghdr, cache);
132 hdr->free_iov = NULL;
133 req->flags |= REQ_F_ASYNC_DATA;
134 req->async_data = hdr;
138 if (!io_alloc_async_data(req)) {
139 hdr = req->async_data;
140 hdr->free_iov = NULL;
146 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
148 /* ->prep_async is always called from the submission context */
149 return io_msg_alloc_async(req, 0);
152 static int io_setup_async_msg(struct io_kiocb *req,
153 struct io_async_msghdr *kmsg,
154 unsigned int issue_flags)
156 struct io_async_msghdr *async_msg;
158 if (req_has_async_data(req))
160 async_msg = io_msg_alloc_async(req, issue_flags);
162 kfree(kmsg->free_iov);
165 req->flags |= REQ_F_NEED_CLEANUP;
166 memcpy(async_msg, kmsg, sizeof(*kmsg));
167 if (async_msg->msg.msg_name)
168 async_msg->msg.msg_name = &async_msg->addr;
169 /* if were using fast_iov, set it to the new one */
170 if (!kmsg->free_iov) {
171 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
172 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
178 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
179 struct io_async_msghdr *iomsg)
181 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
183 iomsg->msg.msg_name = &iomsg->addr;
184 iomsg->free_iov = iomsg->fast_iov;
185 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
189 int io_send_prep_async(struct io_kiocb *req)
191 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
192 struct io_async_msghdr *io;
195 if (!zc->addr || req_has_async_data(req))
197 io = io_msg_alloc_async_prep(req);
200 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
204 static int io_setup_async_addr(struct io_kiocb *req,
205 struct sockaddr_storage *addr_storage,
206 unsigned int issue_flags)
208 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
209 struct io_async_msghdr *io;
211 if (!sr->addr || req_has_async_data(req))
213 io = io_msg_alloc_async(req, issue_flags);
216 memcpy(&io->addr, addr_storage, sizeof(io->addr));
220 int io_sendmsg_prep_async(struct io_kiocb *req)
224 if (!io_msg_alloc_async_prep(req))
226 ret = io_sendmsg_copy_hdr(req, req->async_data);
228 req->flags |= REQ_F_NEED_CLEANUP;
232 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
234 struct io_async_msghdr *io = req->async_data;
239 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
241 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
243 if (req->opcode == IORING_OP_SEND) {
244 if (READ_ONCE(sqe->__pad3[0]))
246 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
247 sr->addr_len = READ_ONCE(sqe->addr_len);
248 } else if (sqe->addr2 || sqe->file_index) {
252 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
253 sr->len = READ_ONCE(sqe->len);
254 sr->flags = READ_ONCE(sqe->ioprio);
255 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
257 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
258 if (sr->msg_flags & MSG_DONTWAIT)
259 req->flags |= REQ_F_NOWAIT;
262 if (req->ctx->compat)
263 sr->msg_flags |= MSG_CMSG_COMPAT;
269 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
271 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
272 struct io_async_msghdr iomsg, *kmsg;
278 sock = sock_from_file(req->file);
282 if (req_has_async_data(req)) {
283 kmsg = req->async_data;
285 ret = io_sendmsg_copy_hdr(req, &iomsg);
291 if (!(req->flags & REQ_F_POLLED) &&
292 (sr->flags & IORING_RECVSEND_POLL_FIRST))
293 return io_setup_async_msg(req, kmsg, issue_flags);
295 flags = sr->msg_flags;
296 if (issue_flags & IO_URING_F_NONBLOCK)
297 flags |= MSG_DONTWAIT;
298 if (flags & MSG_WAITALL)
299 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
301 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
304 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
305 return io_setup_async_msg(req, kmsg, issue_flags);
306 if (ret > 0 && io_net_retry(sock, flags)) {
308 req->flags |= REQ_F_PARTIAL_IO;
309 return io_setup_async_msg(req, kmsg, issue_flags);
311 if (ret == -ERESTARTSYS)
315 /* fast path, check for non-NULL to avoid function call */
317 kfree(kmsg->free_iov);
318 req->flags &= ~REQ_F_NEED_CLEANUP;
319 io_netmsg_recycle(req, issue_flags);
322 else if (sr->done_io)
324 io_req_set_res(req, ret, 0);
328 int io_send(struct io_kiocb *req, unsigned int issue_flags)
330 struct sockaddr_storage __address;
331 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
340 msg.msg_control = NULL;
341 msg.msg_controllen = 0;
346 if (req_has_async_data(req)) {
347 struct io_async_msghdr *io = req->async_data;
349 msg.msg_name = &io->addr;
351 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
352 if (unlikely(ret < 0))
354 msg.msg_name = (struct sockaddr *)&__address;
356 msg.msg_namelen = sr->addr_len;
359 if (!(req->flags & REQ_F_POLLED) &&
360 (sr->flags & IORING_RECVSEND_POLL_FIRST))
361 return io_setup_async_addr(req, &__address, issue_flags);
363 sock = sock_from_file(req->file);
367 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
371 flags = sr->msg_flags;
372 if (issue_flags & IO_URING_F_NONBLOCK)
373 flags |= MSG_DONTWAIT;
374 if (flags & MSG_WAITALL)
375 min_ret = iov_iter_count(&msg.msg_iter);
377 msg.msg_flags = flags;
378 ret = sock_sendmsg(sock, &msg);
380 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
381 return io_setup_async_addr(req, &__address, issue_flags);
383 if (ret > 0 && io_net_retry(sock, flags)) {
387 req->flags |= REQ_F_PARTIAL_IO;
388 return io_setup_async_addr(req, &__address, issue_flags);
390 if (ret == -ERESTARTSYS)
396 else if (sr->done_io)
398 io_req_set_res(req, ret, 0);
402 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
406 if (iomsg->namelen < 0)
408 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
409 iomsg->namelen, &hdr))
411 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
417 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
418 struct io_async_msghdr *iomsg)
420 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
421 struct user_msghdr msg;
424 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
427 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
431 if (req->flags & REQ_F_BUFFER_SELECT) {
432 if (msg.msg_iovlen == 0) {
433 sr->len = iomsg->fast_iov[0].iov_len = 0;
434 iomsg->fast_iov[0].iov_base = NULL;
435 iomsg->free_iov = NULL;
436 } else if (msg.msg_iovlen > 1) {
439 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
441 sr->len = iomsg->fast_iov[0].iov_len;
442 iomsg->free_iov = NULL;
445 if (req->flags & REQ_F_APOLL_MULTISHOT) {
446 iomsg->namelen = msg.msg_namelen;
447 iomsg->controllen = msg.msg_controllen;
448 if (io_recvmsg_multishot_overflow(iomsg))
452 iomsg->free_iov = iomsg->fast_iov;
453 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
454 &iomsg->free_iov, &iomsg->msg.msg_iter,
464 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
465 struct io_async_msghdr *iomsg)
467 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
468 struct compat_msghdr msg;
469 struct compat_iovec __user *uiov;
472 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
475 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
479 uiov = compat_ptr(msg.msg_iov);
480 if (req->flags & REQ_F_BUFFER_SELECT) {
483 if (msg.msg_iovlen == 0) {
485 } else if (msg.msg_iovlen > 1) {
488 if (!access_ok(uiov, sizeof(*uiov)))
490 if (__get_user(clen, &uiov->iov_len))
497 if (req->flags & REQ_F_APOLL_MULTISHOT) {
498 iomsg->namelen = msg.msg_namelen;
499 iomsg->controllen = msg.msg_controllen;
500 if (io_recvmsg_multishot_overflow(iomsg))
504 iomsg->free_iov = iomsg->fast_iov;
505 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
506 UIO_FASTIOV, &iomsg->free_iov,
507 &iomsg->msg.msg_iter, true);
516 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
517 struct io_async_msghdr *iomsg)
519 iomsg->msg.msg_name = &iomsg->addr;
522 if (req->ctx->compat)
523 return __io_compat_recvmsg_copy_hdr(req, iomsg);
526 return __io_recvmsg_copy_hdr(req, iomsg);
529 int io_recvmsg_prep_async(struct io_kiocb *req)
533 if (!io_msg_alloc_async_prep(req))
535 ret = io_recvmsg_copy_hdr(req, req->async_data);
537 req->flags |= REQ_F_NEED_CLEANUP;
541 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
543 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
545 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
547 if (unlikely(sqe->file_index || sqe->addr2))
550 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
551 sr->len = READ_ONCE(sqe->len);
552 sr->flags = READ_ONCE(sqe->ioprio);
553 if (sr->flags & ~(RECVMSG_FLAGS))
555 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
556 if (sr->msg_flags & MSG_DONTWAIT)
557 req->flags |= REQ_F_NOWAIT;
558 if (sr->msg_flags & MSG_ERRQUEUE)
559 req->flags |= REQ_F_CLEAR_POLLIN;
560 if (sr->flags & IORING_RECV_MULTISHOT) {
561 if (!(req->flags & REQ_F_BUFFER_SELECT))
563 if (sr->msg_flags & MSG_WAITALL)
565 if (req->opcode == IORING_OP_RECV && sr->len)
567 req->flags |= REQ_F_APOLL_MULTISHOT;
571 if (req->ctx->compat)
572 sr->msg_flags |= MSG_CMSG_COMPAT;
578 static inline void io_recv_prep_retry(struct io_kiocb *req)
580 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
583 sr->len = 0; /* get from the provided buffer */
587 * Finishes io_recv and io_recvmsg.
589 * Returns true if it is actually finished, or false if it should run
590 * again (for multishot).
592 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
593 unsigned int cflags, bool mshot_finished)
595 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
596 io_req_set_res(req, *ret, cflags);
601 if (!mshot_finished) {
602 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
603 cflags | IORING_CQE_F_MORE, false)) {
604 io_recv_prep_retry(req);
608 * Otherwise stop multishot but use the current result.
609 * Probably will end up going into overflow, but this means
610 * we cannot trust the ordering anymore
614 io_req_set_res(req, *ret, cflags);
616 if (req->flags & REQ_F_POLLED)
617 *ret = IOU_STOP_MULTISHOT;
623 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
624 struct io_sr_msg *sr, void __user **buf,
627 unsigned long ubuf = (unsigned long) *buf;
630 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
635 if (kmsg->controllen) {
636 unsigned long control = ubuf + hdr - kmsg->controllen;
638 kmsg->msg.msg_control_user = (void __user *) control;
639 kmsg->msg.msg_controllen = kmsg->controllen;
642 sr->buf = *buf; /* stash for later copy */
643 *buf = (void __user *) (ubuf + hdr);
644 kmsg->payloadlen = *len = *len - hdr;
648 struct io_recvmsg_multishot_hdr {
649 struct io_uring_recvmsg_out msg;
650 struct sockaddr_storage addr;
653 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
654 struct io_async_msghdr *kmsg,
655 unsigned int flags, bool *finished)
659 struct io_recvmsg_multishot_hdr hdr;
662 kmsg->msg.msg_name = &hdr.addr;
663 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
664 kmsg->msg.msg_namelen = 0;
666 if (sock->file->f_flags & O_NONBLOCK)
667 flags |= MSG_DONTWAIT;
669 err = sock_recvmsg(sock, &kmsg->msg, flags);
670 *finished = err <= 0;
674 hdr.msg = (struct io_uring_recvmsg_out) {
675 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
676 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
679 hdr.msg.payloadlen = err;
680 if (err > kmsg->payloadlen)
681 err = kmsg->payloadlen;
683 copy_len = sizeof(struct io_uring_recvmsg_out);
684 if (kmsg->msg.msg_namelen > kmsg->namelen)
685 copy_len += kmsg->namelen;
687 copy_len += kmsg->msg.msg_namelen;
690 * "fromlen shall refer to the value before truncation.."
693 hdr.msg.namelen = kmsg->msg.msg_namelen;
695 /* ensure that there is no gap between hdr and sockaddr_storage */
696 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
697 sizeof(struct io_uring_recvmsg_out));
698 if (copy_to_user(io->buf, &hdr, copy_len)) {
703 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
704 kmsg->controllen + err;
707 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
709 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
710 struct io_async_msghdr iomsg, *kmsg;
714 int ret, min_ret = 0;
715 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
716 bool mshot_finished = true;
718 sock = sock_from_file(req->file);
722 if (req_has_async_data(req)) {
723 kmsg = req->async_data;
725 ret = io_recvmsg_copy_hdr(req, &iomsg);
731 if (!(req->flags & REQ_F_POLLED) &&
732 (sr->flags & IORING_RECVSEND_POLL_FIRST))
733 return io_setup_async_msg(req, kmsg, issue_flags);
736 if (io_do_buffer_select(req)) {
738 size_t len = sr->len;
740 buf = io_buffer_select(req, &len, issue_flags);
744 if (req->flags & REQ_F_APOLL_MULTISHOT) {
745 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
747 io_kbuf_recycle(req, issue_flags);
752 kmsg->fast_iov[0].iov_base = buf;
753 kmsg->fast_iov[0].iov_len = len;
754 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
758 flags = sr->msg_flags;
760 flags |= MSG_DONTWAIT;
761 if (flags & MSG_WAITALL)
762 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
764 kmsg->msg.msg_get_inq = 1;
765 if (req->flags & REQ_F_APOLL_MULTISHOT)
766 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
769 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
773 if (ret == -EAGAIN && force_nonblock) {
774 ret = io_setup_async_msg(req, kmsg, issue_flags);
775 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
776 IO_APOLL_MULTI_POLLED) {
777 io_kbuf_recycle(req, issue_flags);
778 return IOU_ISSUE_SKIP_COMPLETE;
782 if (ret > 0 && io_net_retry(sock, flags)) {
784 req->flags |= REQ_F_PARTIAL_IO;
785 return io_setup_async_msg(req, kmsg, issue_flags);
787 if (ret == -ERESTARTSYS)
790 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
796 else if (sr->done_io)
799 io_kbuf_recycle(req, issue_flags);
801 cflags = io_put_kbuf(req, issue_flags);
802 if (kmsg->msg.msg_inq)
803 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
805 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
806 goto retry_multishot;
808 if (mshot_finished) {
809 io_netmsg_recycle(req, issue_flags);
810 /* fast path, check for non-NULL to avoid function call */
812 kfree(kmsg->free_iov);
813 req->flags &= ~REQ_F_NEED_CLEANUP;
819 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
821 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
827 int ret, min_ret = 0;
828 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
829 size_t len = sr->len;
831 if (!(req->flags & REQ_F_POLLED) &&
832 (sr->flags & IORING_RECVSEND_POLL_FIRST))
835 sock = sock_from_file(req->file);
840 if (io_do_buffer_select(req)) {
843 buf = io_buffer_select(req, &len, issue_flags);
849 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
855 msg.msg_control = NULL;
858 msg.msg_controllen = 0;
862 flags = sr->msg_flags;
864 flags |= MSG_DONTWAIT;
865 if (flags & MSG_WAITALL)
866 min_ret = iov_iter_count(&msg.msg_iter);
868 ret = sock_recvmsg(sock, &msg, flags);
870 if (ret == -EAGAIN && force_nonblock) {
871 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
872 io_kbuf_recycle(req, issue_flags);
873 return IOU_ISSUE_SKIP_COMPLETE;
878 if (ret > 0 && io_net_retry(sock, flags)) {
882 req->flags |= REQ_F_PARTIAL_IO;
885 if (ret == -ERESTARTSYS)
888 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
895 else if (sr->done_io)
898 io_kbuf_recycle(req, issue_flags);
900 cflags = io_put_kbuf(req, issue_flags);
902 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
904 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
905 goto retry_multishot;
910 void io_send_zc_cleanup(struct io_kiocb *req)
912 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
913 struct io_async_msghdr *io;
915 if (req_has_async_data(req)) {
916 io = req->async_data;
917 /* might be ->fast_iov if *msg_copy_hdr failed */
918 if (io->free_iov != io->fast_iov)
922 io_notif_flush(zc->notif);
927 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
929 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
930 struct io_ring_ctx *ctx = req->ctx;
931 struct io_kiocb *notif;
933 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
935 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
936 if (req->flags & REQ_F_CQE_SKIP)
939 zc->flags = READ_ONCE(sqe->ioprio);
940 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
941 IORING_RECVSEND_FIXED_BUF))
943 notif = zc->notif = io_alloc_notif(ctx);
946 notif->cqe.user_data = req->cqe.user_data;
948 notif->cqe.flags = IORING_CQE_F_NOTIF;
949 req->flags |= REQ_F_NEED_CLEANUP;
950 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
951 unsigned idx = READ_ONCE(sqe->buf_index);
953 if (unlikely(idx >= ctx->nr_user_bufs))
955 idx = array_index_nospec(idx, ctx->nr_user_bufs);
956 req->imu = READ_ONCE(ctx->user_bufs[idx]);
957 io_req_set_rsrc_node(notif, ctx, 0);
960 if (req->opcode == IORING_OP_SEND_ZC) {
961 if (READ_ONCE(sqe->__pad3[0]))
963 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
964 zc->addr_len = READ_ONCE(sqe->addr_len);
966 if (unlikely(sqe->addr2 || sqe->file_index))
968 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
972 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
973 zc->len = READ_ONCE(sqe->len);
974 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
975 if (zc->msg_flags & MSG_DONTWAIT)
976 req->flags |= REQ_F_NOWAIT;
981 if (req->ctx->compat)
982 zc->msg_flags |= MSG_CMSG_COMPAT;
987 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
988 struct iov_iter *from, size_t length)
990 skb_zcopy_downgrade_managed(skb);
991 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
994 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
995 struct iov_iter *from, size_t length)
997 struct skb_shared_info *shinfo = skb_shinfo(skb);
998 int frag = shinfo->nr_frags;
1000 struct bvec_iter bi;
1002 unsigned long truesize = 0;
1005 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1006 else if (unlikely(!skb_zcopy_managed(skb)))
1007 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1009 bi.bi_size = min(from->count, length);
1010 bi.bi_bvec_done = from->iov_offset;
1013 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1014 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1017 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1018 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1019 v.bv_offset, v.bv_len);
1020 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1025 shinfo->nr_frags = frag;
1026 from->bvec += bi.bi_idx;
1027 from->nr_segs -= bi.bi_idx;
1028 from->count -= copied;
1029 from->iov_offset = bi.bi_bvec_done;
1031 skb->data_len += copied;
1033 skb->truesize += truesize;
1035 if (sk && sk->sk_type == SOCK_STREAM) {
1036 sk_wmem_queued_add(sk, truesize);
1037 if (!skb_zcopy_pure(skb))
1038 sk_mem_charge(sk, truesize);
1040 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1045 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1047 struct sockaddr_storage __address;
1048 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1051 struct socket *sock;
1053 int ret, min_ret = 0;
1055 sock = sock_from_file(req->file);
1056 if (unlikely(!sock))
1059 msg.msg_name = NULL;
1060 msg.msg_control = NULL;
1061 msg.msg_controllen = 0;
1062 msg.msg_namelen = 0;
1065 if (req_has_async_data(req)) {
1066 struct io_async_msghdr *io = req->async_data;
1068 msg.msg_name = &io->addr;
1070 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1071 if (unlikely(ret < 0))
1073 msg.msg_name = (struct sockaddr *)&__address;
1075 msg.msg_namelen = zc->addr_len;
1078 if (!(req->flags & REQ_F_POLLED) &&
1079 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1080 return io_setup_async_addr(req, &__address, issue_flags);
1082 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1083 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
1084 (u64)(uintptr_t)zc->buf, zc->len);
1087 msg.sg_from_iter = io_sg_from_iter;
1089 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
1093 ret = io_notif_account_mem(zc->notif, zc->len);
1096 msg.sg_from_iter = io_sg_from_iter_iovec;
1099 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1100 if (issue_flags & IO_URING_F_NONBLOCK)
1101 msg_flags |= MSG_DONTWAIT;
1102 if (msg_flags & MSG_WAITALL)
1103 min_ret = iov_iter_count(&msg.msg_iter);
1105 msg.msg_flags = msg_flags;
1106 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1107 ret = sock_sendmsg(sock, &msg);
1109 if (unlikely(ret < min_ret)) {
1110 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1111 return io_setup_async_addr(req, &__address, issue_flags);
1113 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1117 req->flags |= REQ_F_PARTIAL_IO;
1118 return io_setup_async_addr(req, &__address, issue_flags);
1120 if (ret == -ERESTARTSYS)
1127 else if (zc->done_io)
1131 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1132 * flushing notif to io_send_zc_cleanup()
1134 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1135 io_notif_flush(zc->notif);
1136 req->flags &= ~REQ_F_NEED_CLEANUP;
1138 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1142 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1144 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1145 struct io_async_msghdr iomsg, *kmsg;
1146 struct socket *sock;
1148 int ret, min_ret = 0;
1150 sock = sock_from_file(req->file);
1151 if (unlikely(!sock))
1154 if (req_has_async_data(req)) {
1155 kmsg = req->async_data;
1157 ret = io_sendmsg_copy_hdr(req, &iomsg);
1163 if (!(req->flags & REQ_F_POLLED) &&
1164 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1165 return io_setup_async_msg(req, kmsg, issue_flags);
1167 flags = sr->msg_flags | MSG_ZEROCOPY;
1168 if (issue_flags & IO_URING_F_NONBLOCK)
1169 flags |= MSG_DONTWAIT;
1170 if (flags & MSG_WAITALL)
1171 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1173 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1174 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1175 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1177 if (unlikely(ret < min_ret)) {
1178 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1179 return io_setup_async_msg(req, kmsg, issue_flags);
1181 if (ret > 0 && io_net_retry(sock, flags)) {
1183 req->flags |= REQ_F_PARTIAL_IO;
1184 return io_setup_async_msg(req, kmsg, issue_flags);
1186 if (ret == -ERESTARTSYS)
1190 /* fast path, check for non-NULL to avoid function call */
1191 if (kmsg->free_iov) {
1192 kfree(kmsg->free_iov);
1193 kmsg->free_iov = NULL;
1196 io_netmsg_recycle(req, issue_flags);
1199 else if (sr->done_io)
1203 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1204 * flushing notif to io_send_zc_cleanup()
1206 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1207 io_notif_flush(sr->notif);
1208 req->flags &= ~REQ_F_NEED_CLEANUP;
1210 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1214 void io_sendrecv_fail(struct io_kiocb *req)
1216 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1218 if (req->flags & REQ_F_PARTIAL_IO)
1219 req->cqe.res = sr->done_io;
1221 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1222 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1223 req->cqe.flags |= IORING_CQE_F_MORE;
1226 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1228 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1231 if (sqe->len || sqe->buf_index)
1234 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1235 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1236 accept->flags = READ_ONCE(sqe->accept_flags);
1237 accept->nofile = rlimit(RLIMIT_NOFILE);
1238 flags = READ_ONCE(sqe->ioprio);
1239 if (flags & ~IORING_ACCEPT_MULTISHOT)
1242 accept->file_slot = READ_ONCE(sqe->file_index);
1243 if (accept->file_slot) {
1244 if (accept->flags & SOCK_CLOEXEC)
1246 if (flags & IORING_ACCEPT_MULTISHOT &&
1247 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1250 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1252 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1253 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1254 if (flags & IORING_ACCEPT_MULTISHOT)
1255 req->flags |= REQ_F_APOLL_MULTISHOT;
1259 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1261 struct io_ring_ctx *ctx = req->ctx;
1262 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1263 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1264 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1265 bool fixed = !!accept->file_slot;
1271 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1272 if (unlikely(fd < 0))
1275 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1280 ret = PTR_ERR(file);
1281 if (ret == -EAGAIN && force_nonblock) {
1283 * if it's multishot and polled, we don't need to
1284 * return EAGAIN to arm the poll infra since it
1285 * has already been done
1287 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1288 IO_APOLL_MULTI_POLLED)
1289 ret = IOU_ISSUE_SKIP_COMPLETE;
1292 if (ret == -ERESTARTSYS)
1295 } else if (!fixed) {
1296 fd_install(fd, file);
1299 ret = io_fixed_fd_install(req, issue_flags, file,
1303 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1304 io_req_set_res(req, ret, 0);
1309 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1312 io_req_set_res(req, ret, 0);
1313 if (req->flags & REQ_F_POLLED)
1314 return IOU_STOP_MULTISHOT;
1318 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1320 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1322 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1325 sock->domain = READ_ONCE(sqe->fd);
1326 sock->type = READ_ONCE(sqe->off);
1327 sock->protocol = READ_ONCE(sqe->len);
1328 sock->file_slot = READ_ONCE(sqe->file_index);
1329 sock->nofile = rlimit(RLIMIT_NOFILE);
1331 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1332 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1334 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1339 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1341 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1342 bool fixed = !!sock->file_slot;
1347 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1348 if (unlikely(fd < 0))
1351 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1355 ret = PTR_ERR(file);
1356 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1358 if (ret == -ERESTARTSYS)
1361 } else if (!fixed) {
1362 fd_install(fd, file);
1365 ret = io_fixed_fd_install(req, issue_flags, file,
1368 io_req_set_res(req, ret, 0);
1372 int io_connect_prep_async(struct io_kiocb *req)
1374 struct io_async_connect *io = req->async_data;
1375 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1377 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1380 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1382 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1384 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1387 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1388 conn->addr_len = READ_ONCE(sqe->addr2);
1392 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1394 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1395 struct io_async_connect __io, *io;
1396 unsigned file_flags;
1398 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1400 if (req_has_async_data(req)) {
1401 io = req->async_data;
1403 ret = move_addr_to_kernel(connect->addr,
1411 file_flags = force_nonblock ? O_NONBLOCK : 0;
1413 ret = __sys_connect_file(req->file, &io->address,
1414 connect->addr_len, file_flags);
1415 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1416 if (req_has_async_data(req))
1418 if (io_alloc_async_data(req)) {
1422 memcpy(req->async_data, &__io, sizeof(__io));
1425 if (ret == -ERESTARTSYS)
1430 io_req_set_res(req, ret, 0);
1434 void io_netmsg_cache_free(struct io_cache_entry *entry)
1436 kfree(container_of(entry, struct io_async_msghdr, cache));