1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
55 struct compat_msghdr __user *umsg_compat;
56 struct user_msghdr __user *umsg;
63 /* initialised and used only by !msg send variants */
67 /* used only for send zerocopy */
68 struct io_kiocb *notif;
71 static inline bool io_check_multishot(struct io_kiocb *req,
72 unsigned int issue_flags)
75 * When ->locked_cq is set we only allow to post CQEs from the original
76 * task context. Usual request completions will be handled in other
77 * generic paths but multipoll may decide to post extra cqes.
79 return !(issue_flags & IO_URING_F_IOWQ) ||
80 !(issue_flags & IO_URING_F_MULTISHOT) ||
81 !req->ctx->task_complete;
84 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
86 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
88 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
89 sqe->buf_index || sqe->splice_fd_in))
92 shutdown->how = READ_ONCE(sqe->len);
93 req->flags |= REQ_F_FORCE_ASYNC;
97 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
99 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
103 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
105 sock = sock_from_file(req->file);
109 ret = __sys_shutdown_sock(sock, shutdown->how);
110 io_req_set_res(req, ret, 0);
114 static bool io_net_retry(struct socket *sock, int flags)
116 if (!(flags & MSG_WAITALL))
118 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
121 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
123 struct io_async_msghdr *hdr = req->async_data;
125 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
128 /* Let normal cleanup path reap it if we fail adding to the cache */
129 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
130 req->async_data = NULL;
131 req->flags &= ~REQ_F_ASYNC_DATA;
135 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
136 unsigned int issue_flags)
138 struct io_ring_ctx *ctx = req->ctx;
139 struct io_cache_entry *entry;
140 struct io_async_msghdr *hdr;
142 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
143 entry = io_alloc_cache_get(&ctx->netmsg_cache);
145 hdr = container_of(entry, struct io_async_msghdr, cache);
146 hdr->free_iov = NULL;
147 req->flags |= REQ_F_ASYNC_DATA;
148 req->async_data = hdr;
153 if (!io_alloc_async_data(req)) {
154 hdr = req->async_data;
155 hdr->free_iov = NULL;
161 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
163 /* ->prep_async is always called from the submission context */
164 return io_msg_alloc_async(req, 0);
167 static int io_setup_async_msg(struct io_kiocb *req,
168 struct io_async_msghdr *kmsg,
169 unsigned int issue_flags)
171 struct io_async_msghdr *async_msg;
173 if (req_has_async_data(req))
175 async_msg = io_msg_alloc_async(req, issue_flags);
177 kfree(kmsg->free_iov);
180 req->flags |= REQ_F_NEED_CLEANUP;
181 memcpy(async_msg, kmsg, sizeof(*kmsg));
182 if (async_msg->msg.msg_name)
183 async_msg->msg.msg_name = &async_msg->addr;
184 /* if were using fast_iov, set it to the new one */
185 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
186 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
187 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
193 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
194 struct io_async_msghdr *iomsg)
196 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
198 iomsg->msg.msg_name = &iomsg->addr;
199 iomsg->free_iov = iomsg->fast_iov;
200 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
204 int io_send_prep_async(struct io_kiocb *req)
206 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
207 struct io_async_msghdr *io;
210 if (!zc->addr || req_has_async_data(req))
212 io = io_msg_alloc_async_prep(req);
215 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
219 static int io_setup_async_addr(struct io_kiocb *req,
220 struct sockaddr_storage *addr_storage,
221 unsigned int issue_flags)
223 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
224 struct io_async_msghdr *io;
226 if (!sr->addr || req_has_async_data(req))
228 io = io_msg_alloc_async(req, issue_flags);
231 memcpy(&io->addr, addr_storage, sizeof(io->addr));
235 int io_sendmsg_prep_async(struct io_kiocb *req)
239 if (!io_msg_alloc_async_prep(req))
241 ret = io_sendmsg_copy_hdr(req, req->async_data);
243 req->flags |= REQ_F_NEED_CLEANUP;
247 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
249 struct io_async_msghdr *io = req->async_data;
254 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
256 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
258 if (req->opcode == IORING_OP_SEND) {
259 if (READ_ONCE(sqe->__pad3[0]))
261 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
262 sr->addr_len = READ_ONCE(sqe->addr_len);
263 } else if (sqe->addr2 || sqe->file_index) {
267 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
268 sr->len = READ_ONCE(sqe->len);
269 sr->flags = READ_ONCE(sqe->ioprio);
270 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
272 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
273 if (sr->msg_flags & MSG_DONTWAIT)
274 req->flags |= REQ_F_NOWAIT;
277 if (req->ctx->compat)
278 sr->msg_flags |= MSG_CMSG_COMPAT;
284 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
286 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
287 struct io_async_msghdr iomsg, *kmsg;
293 sock = sock_from_file(req->file);
297 if (req_has_async_data(req)) {
298 kmsg = req->async_data;
300 ret = io_sendmsg_copy_hdr(req, &iomsg);
306 if (!(req->flags & REQ_F_POLLED) &&
307 (sr->flags & IORING_RECVSEND_POLL_FIRST))
308 return io_setup_async_msg(req, kmsg, issue_flags);
310 flags = sr->msg_flags;
311 if (issue_flags & IO_URING_F_NONBLOCK)
312 flags |= MSG_DONTWAIT;
313 if (flags & MSG_WAITALL)
314 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
316 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
319 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
320 return io_setup_async_msg(req, kmsg, issue_flags);
321 if (ret > 0 && io_net_retry(sock, flags)) {
323 req->flags |= REQ_F_PARTIAL_IO;
324 return io_setup_async_msg(req, kmsg, issue_flags);
326 if (ret == -ERESTARTSYS)
330 /* fast path, check for non-NULL to avoid function call */
332 kfree(kmsg->free_iov);
333 req->flags &= ~REQ_F_NEED_CLEANUP;
334 io_netmsg_recycle(req, issue_flags);
337 else if (sr->done_io)
339 io_req_set_res(req, ret, 0);
343 int io_send(struct io_kiocb *req, unsigned int issue_flags)
345 struct sockaddr_storage __address;
346 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
354 msg.msg_control = NULL;
355 msg.msg_controllen = 0;
360 if (req_has_async_data(req)) {
361 struct io_async_msghdr *io = req->async_data;
363 msg.msg_name = &io->addr;
365 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
366 if (unlikely(ret < 0))
368 msg.msg_name = (struct sockaddr *)&__address;
370 msg.msg_namelen = sr->addr_len;
373 if (!(req->flags & REQ_F_POLLED) &&
374 (sr->flags & IORING_RECVSEND_POLL_FIRST))
375 return io_setup_async_addr(req, &__address, issue_flags);
377 sock = sock_from_file(req->file);
381 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
385 flags = sr->msg_flags;
386 if (issue_flags & IO_URING_F_NONBLOCK)
387 flags |= MSG_DONTWAIT;
388 if (flags & MSG_WAITALL)
389 min_ret = iov_iter_count(&msg.msg_iter);
391 msg.msg_flags = flags;
392 ret = sock_sendmsg(sock, &msg);
394 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
395 return io_setup_async_addr(req, &__address, issue_flags);
397 if (ret > 0 && io_net_retry(sock, flags)) {
401 req->flags |= REQ_F_PARTIAL_IO;
402 return io_setup_async_addr(req, &__address, issue_flags);
404 if (ret == -ERESTARTSYS)
410 else if (sr->done_io)
412 io_req_set_res(req, ret, 0);
416 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
420 if (iomsg->namelen < 0)
422 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
423 iomsg->namelen, &hdr))
425 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
431 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
432 struct io_async_msghdr *iomsg)
434 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
435 struct user_msghdr msg;
438 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
441 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
445 if (req->flags & REQ_F_BUFFER_SELECT) {
446 if (msg.msg_iovlen == 0) {
447 sr->len = iomsg->fast_iov[0].iov_len = 0;
448 iomsg->fast_iov[0].iov_base = NULL;
449 iomsg->free_iov = NULL;
450 } else if (msg.msg_iovlen > 1) {
453 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
455 sr->len = iomsg->fast_iov[0].iov_len;
456 iomsg->free_iov = NULL;
459 if (req->flags & REQ_F_APOLL_MULTISHOT) {
460 iomsg->namelen = msg.msg_namelen;
461 iomsg->controllen = msg.msg_controllen;
462 if (io_recvmsg_multishot_overflow(iomsg))
466 iomsg->free_iov = iomsg->fast_iov;
467 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
468 &iomsg->free_iov, &iomsg->msg.msg_iter,
478 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
479 struct io_async_msghdr *iomsg)
481 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
482 struct compat_msghdr msg;
483 struct compat_iovec __user *uiov;
486 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
489 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
493 uiov = compat_ptr(msg.msg_iov);
494 if (req->flags & REQ_F_BUFFER_SELECT) {
497 iomsg->free_iov = NULL;
498 if (msg.msg_iovlen == 0) {
500 } else if (msg.msg_iovlen > 1) {
503 if (!access_ok(uiov, sizeof(*uiov)))
505 if (__get_user(clen, &uiov->iov_len))
512 if (req->flags & REQ_F_APOLL_MULTISHOT) {
513 iomsg->namelen = msg.msg_namelen;
514 iomsg->controllen = msg.msg_controllen;
515 if (io_recvmsg_multishot_overflow(iomsg))
519 iomsg->free_iov = iomsg->fast_iov;
520 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
521 UIO_FASTIOV, &iomsg->free_iov,
522 &iomsg->msg.msg_iter, true);
531 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
532 struct io_async_msghdr *iomsg)
534 iomsg->msg.msg_name = &iomsg->addr;
537 if (req->ctx->compat)
538 return __io_compat_recvmsg_copy_hdr(req, iomsg);
541 return __io_recvmsg_copy_hdr(req, iomsg);
544 int io_recvmsg_prep_async(struct io_kiocb *req)
548 if (!io_msg_alloc_async_prep(req))
550 ret = io_recvmsg_copy_hdr(req, req->async_data);
552 req->flags |= REQ_F_NEED_CLEANUP;
556 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
558 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
560 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
562 if (unlikely(sqe->file_index || sqe->addr2))
565 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
566 sr->len = READ_ONCE(sqe->len);
567 sr->flags = READ_ONCE(sqe->ioprio);
568 if (sr->flags & ~(RECVMSG_FLAGS))
570 sr->msg_flags = READ_ONCE(sqe->msg_flags);
571 if (sr->msg_flags & MSG_DONTWAIT)
572 req->flags |= REQ_F_NOWAIT;
573 if (sr->msg_flags & MSG_ERRQUEUE)
574 req->flags |= REQ_F_CLEAR_POLLIN;
575 if (sr->flags & IORING_RECV_MULTISHOT) {
576 if (!(req->flags & REQ_F_BUFFER_SELECT))
578 if (sr->msg_flags & MSG_WAITALL)
580 if (req->opcode == IORING_OP_RECV && sr->len)
582 req->flags |= REQ_F_APOLL_MULTISHOT;
584 * Store the buffer group for this multishot receive separately,
585 * as if we end up doing an io-wq based issue that selects a
586 * buffer, it has to be committed immediately and that will
587 * clear ->buf_list. This means we lose the link to the buffer
588 * list, and the eventual buffer put on completion then cannot
591 sr->buf_group = req->buf_index;
595 if (req->ctx->compat)
596 sr->msg_flags |= MSG_CMSG_COMPAT;
602 static inline void io_recv_prep_retry(struct io_kiocb *req)
604 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
607 sr->len = 0; /* get from the provided buffer */
608 req->buf_index = sr->buf_group;
612 * Finishes io_recv and io_recvmsg.
614 * Returns true if it is actually finished, or false if it should run
615 * again (for multishot).
617 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
618 unsigned int cflags, bool mshot_finished,
619 unsigned issue_flags)
621 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
622 io_req_set_res(req, *ret, cflags);
627 if (!mshot_finished) {
628 if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
629 req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
630 io_recv_prep_retry(req);
633 /* Otherwise stop multishot but use the current result. */
636 io_req_set_res(req, *ret, cflags);
638 if (issue_flags & IO_URING_F_MULTISHOT)
639 *ret = IOU_STOP_MULTISHOT;
645 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
646 struct io_sr_msg *sr, void __user **buf,
649 unsigned long ubuf = (unsigned long) *buf;
652 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
657 if (kmsg->controllen) {
658 unsigned long control = ubuf + hdr - kmsg->controllen;
660 kmsg->msg.msg_control_user = (void __user *) control;
661 kmsg->msg.msg_controllen = kmsg->controllen;
664 sr->buf = *buf; /* stash for later copy */
665 *buf = (void __user *) (ubuf + hdr);
666 kmsg->payloadlen = *len = *len - hdr;
670 struct io_recvmsg_multishot_hdr {
671 struct io_uring_recvmsg_out msg;
672 struct sockaddr_storage addr;
675 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
676 struct io_async_msghdr *kmsg,
677 unsigned int flags, bool *finished)
681 struct io_recvmsg_multishot_hdr hdr;
684 kmsg->msg.msg_name = &hdr.addr;
685 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
686 kmsg->msg.msg_namelen = 0;
688 if (sock->file->f_flags & O_NONBLOCK)
689 flags |= MSG_DONTWAIT;
691 err = sock_recvmsg(sock, &kmsg->msg, flags);
692 *finished = err <= 0;
696 hdr.msg = (struct io_uring_recvmsg_out) {
697 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
698 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
701 hdr.msg.payloadlen = err;
702 if (err > kmsg->payloadlen)
703 err = kmsg->payloadlen;
705 copy_len = sizeof(struct io_uring_recvmsg_out);
706 if (kmsg->msg.msg_namelen > kmsg->namelen)
707 copy_len += kmsg->namelen;
709 copy_len += kmsg->msg.msg_namelen;
712 * "fromlen shall refer to the value before truncation.."
715 hdr.msg.namelen = kmsg->msg.msg_namelen;
717 /* ensure that there is no gap between hdr and sockaddr_storage */
718 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
719 sizeof(struct io_uring_recvmsg_out));
720 if (copy_to_user(io->buf, &hdr, copy_len)) {
725 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
726 kmsg->controllen + err;
729 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
731 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
732 struct io_async_msghdr iomsg, *kmsg;
736 int ret, min_ret = 0;
737 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
738 bool mshot_finished = true;
740 sock = sock_from_file(req->file);
744 if (req_has_async_data(req)) {
745 kmsg = req->async_data;
747 ret = io_recvmsg_copy_hdr(req, &iomsg);
753 if (!(req->flags & REQ_F_POLLED) &&
754 (sr->flags & IORING_RECVSEND_POLL_FIRST))
755 return io_setup_async_msg(req, kmsg, issue_flags);
757 if (!io_check_multishot(req, issue_flags))
758 return io_setup_async_msg(req, kmsg, issue_flags);
761 if (io_do_buffer_select(req)) {
763 size_t len = sr->len;
765 buf = io_buffer_select(req, &len, issue_flags);
769 if (req->flags & REQ_F_APOLL_MULTISHOT) {
770 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
772 io_kbuf_recycle(req, issue_flags);
777 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
780 flags = sr->msg_flags;
782 flags |= MSG_DONTWAIT;
783 if (flags & MSG_WAITALL)
784 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
786 kmsg->msg.msg_get_inq = 1;
787 if (req->flags & REQ_F_APOLL_MULTISHOT)
788 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
791 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
795 if (ret == -EAGAIN && force_nonblock) {
796 ret = io_setup_async_msg(req, kmsg, issue_flags);
797 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
798 io_kbuf_recycle(req, issue_flags);
799 return IOU_ISSUE_SKIP_COMPLETE;
803 if (ret > 0 && io_net_retry(sock, flags)) {
805 req->flags |= REQ_F_PARTIAL_IO;
806 return io_setup_async_msg(req, kmsg, issue_flags);
808 if (ret == -ERESTARTSYS)
811 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
817 else if (sr->done_io)
820 io_kbuf_recycle(req, issue_flags);
822 cflags = io_put_kbuf(req, issue_flags);
823 if (kmsg->msg.msg_inq)
824 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
826 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
827 goto retry_multishot;
829 if (mshot_finished) {
830 /* fast path, check for non-NULL to avoid function call */
832 kfree(kmsg->free_iov);
833 io_netmsg_recycle(req, issue_flags);
834 req->flags &= ~REQ_F_NEED_CLEANUP;
840 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
842 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
847 int ret, min_ret = 0;
848 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
849 size_t len = sr->len;
851 if (!(req->flags & REQ_F_POLLED) &&
852 (sr->flags & IORING_RECVSEND_POLL_FIRST))
855 if (!io_check_multishot(req, issue_flags))
858 sock = sock_from_file(req->file);
863 if (io_do_buffer_select(req)) {
866 buf = io_buffer_select(req, &len, issue_flags);
872 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
878 msg.msg_control = NULL;
881 msg.msg_controllen = 0;
885 flags = sr->msg_flags;
887 flags |= MSG_DONTWAIT;
888 if (flags & MSG_WAITALL)
889 min_ret = iov_iter_count(&msg.msg_iter);
891 ret = sock_recvmsg(sock, &msg, flags);
893 if (ret == -EAGAIN && force_nonblock) {
894 if (issue_flags & IO_URING_F_MULTISHOT) {
895 io_kbuf_recycle(req, issue_flags);
896 return IOU_ISSUE_SKIP_COMPLETE;
901 if (ret > 0 && io_net_retry(sock, flags)) {
905 req->flags |= REQ_F_PARTIAL_IO;
908 if (ret == -ERESTARTSYS)
911 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
918 else if (sr->done_io)
921 io_kbuf_recycle(req, issue_flags);
923 cflags = io_put_kbuf(req, issue_flags);
925 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
927 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
928 goto retry_multishot;
933 void io_send_zc_cleanup(struct io_kiocb *req)
935 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
936 struct io_async_msghdr *io;
938 if (req_has_async_data(req)) {
939 io = req->async_data;
940 /* might be ->fast_iov if *msg_copy_hdr failed */
941 if (io->free_iov != io->fast_iov)
945 io_notif_flush(zc->notif);
950 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
951 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
953 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
955 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
956 struct io_ring_ctx *ctx = req->ctx;
957 struct io_kiocb *notif;
959 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
961 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
962 if (req->flags & REQ_F_CQE_SKIP)
965 notif = zc->notif = io_alloc_notif(ctx);
968 notif->cqe.user_data = req->cqe.user_data;
970 notif->cqe.flags = IORING_CQE_F_NOTIF;
971 req->flags |= REQ_F_NEED_CLEANUP;
973 zc->flags = READ_ONCE(sqe->ioprio);
974 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
975 if (zc->flags & ~IO_ZC_FLAGS_VALID)
977 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
978 io_notif_set_extended(notif);
979 io_notif_to_data(notif)->zc_report = true;
983 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
984 unsigned idx = READ_ONCE(sqe->buf_index);
986 if (unlikely(idx >= ctx->nr_user_bufs))
988 idx = array_index_nospec(idx, ctx->nr_user_bufs);
989 req->imu = READ_ONCE(ctx->user_bufs[idx]);
990 io_req_set_rsrc_node(notif, ctx, 0);
993 if (req->opcode == IORING_OP_SEND_ZC) {
994 if (READ_ONCE(sqe->__pad3[0]))
996 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
997 zc->addr_len = READ_ONCE(sqe->addr_len);
999 if (unlikely(sqe->addr2 || sqe->file_index))
1001 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1005 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1006 zc->len = READ_ONCE(sqe->len);
1007 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1008 if (zc->msg_flags & MSG_DONTWAIT)
1009 req->flags |= REQ_F_NOWAIT;
1013 #ifdef CONFIG_COMPAT
1014 if (req->ctx->compat)
1015 zc->msg_flags |= MSG_CMSG_COMPAT;
1020 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1021 struct iov_iter *from, size_t length)
1023 skb_zcopy_downgrade_managed(skb);
1024 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1027 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1028 struct iov_iter *from, size_t length)
1030 struct skb_shared_info *shinfo = skb_shinfo(skb);
1031 int frag = shinfo->nr_frags;
1033 struct bvec_iter bi;
1035 unsigned long truesize = 0;
1038 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1039 else if (unlikely(!skb_zcopy_managed(skb)))
1040 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1042 bi.bi_size = min(from->count, length);
1043 bi.bi_bvec_done = from->iov_offset;
1046 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1047 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1050 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1051 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1052 v.bv_offset, v.bv_len);
1053 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1058 shinfo->nr_frags = frag;
1059 from->bvec += bi.bi_idx;
1060 from->nr_segs -= bi.bi_idx;
1061 from->count -= copied;
1062 from->iov_offset = bi.bi_bvec_done;
1064 skb->data_len += copied;
1066 skb->truesize += truesize;
1068 if (sk && sk->sk_type == SOCK_STREAM) {
1069 sk_wmem_queued_add(sk, truesize);
1070 if (!skb_zcopy_pure(skb))
1071 sk_mem_charge(sk, truesize);
1073 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1078 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1080 struct sockaddr_storage __address;
1081 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1083 struct socket *sock;
1085 int ret, min_ret = 0;
1087 sock = sock_from_file(req->file);
1088 if (unlikely(!sock))
1090 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1093 msg.msg_name = NULL;
1094 msg.msg_control = NULL;
1095 msg.msg_controllen = 0;
1096 msg.msg_namelen = 0;
1099 if (req_has_async_data(req)) {
1100 struct io_async_msghdr *io = req->async_data;
1102 msg.msg_name = &io->addr;
1104 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1105 if (unlikely(ret < 0))
1107 msg.msg_name = (struct sockaddr *)&__address;
1109 msg.msg_namelen = zc->addr_len;
1112 if (!(req->flags & REQ_F_POLLED) &&
1113 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1114 return io_setup_async_addr(req, &__address, issue_flags);
1116 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1117 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1118 (u64)(uintptr_t)zc->buf, zc->len);
1121 msg.sg_from_iter = io_sg_from_iter;
1123 io_notif_set_extended(zc->notif);
1124 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1127 ret = io_notif_account_mem(zc->notif, zc->len);
1130 msg.sg_from_iter = io_sg_from_iter_iovec;
1133 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1134 if (issue_flags & IO_URING_F_NONBLOCK)
1135 msg_flags |= MSG_DONTWAIT;
1136 if (msg_flags & MSG_WAITALL)
1137 min_ret = iov_iter_count(&msg.msg_iter);
1139 msg.msg_flags = msg_flags;
1140 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1141 ret = sock_sendmsg(sock, &msg);
1143 if (unlikely(ret < min_ret)) {
1144 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1145 return io_setup_async_addr(req, &__address, issue_flags);
1147 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1151 req->flags |= REQ_F_PARTIAL_IO;
1152 return io_setup_async_addr(req, &__address, issue_flags);
1154 if (ret == -ERESTARTSYS)
1161 else if (zc->done_io)
1165 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1166 * flushing notif to io_send_zc_cleanup()
1168 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1169 io_notif_flush(zc->notif);
1170 req->flags &= ~REQ_F_NEED_CLEANUP;
1172 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1176 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1178 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1179 struct io_async_msghdr iomsg, *kmsg;
1180 struct socket *sock;
1182 int ret, min_ret = 0;
1184 io_notif_set_extended(sr->notif);
1186 sock = sock_from_file(req->file);
1187 if (unlikely(!sock))
1189 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1192 if (req_has_async_data(req)) {
1193 kmsg = req->async_data;
1195 ret = io_sendmsg_copy_hdr(req, &iomsg);
1201 if (!(req->flags & REQ_F_POLLED) &&
1202 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1203 return io_setup_async_msg(req, kmsg, issue_flags);
1205 flags = sr->msg_flags | MSG_ZEROCOPY;
1206 if (issue_flags & IO_URING_F_NONBLOCK)
1207 flags |= MSG_DONTWAIT;
1208 if (flags & MSG_WAITALL)
1209 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1211 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1212 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1213 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1215 if (unlikely(ret < min_ret)) {
1216 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1217 return io_setup_async_msg(req, kmsg, issue_flags);
1219 if (ret > 0 && io_net_retry(sock, flags)) {
1221 req->flags |= REQ_F_PARTIAL_IO;
1222 return io_setup_async_msg(req, kmsg, issue_flags);
1224 if (ret == -ERESTARTSYS)
1228 /* fast path, check for non-NULL to avoid function call */
1229 if (kmsg->free_iov) {
1230 kfree(kmsg->free_iov);
1231 kmsg->free_iov = NULL;
1234 io_netmsg_recycle(req, issue_flags);
1237 else if (sr->done_io)
1241 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1242 * flushing notif to io_send_zc_cleanup()
1244 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1245 io_notif_flush(sr->notif);
1246 req->flags &= ~REQ_F_NEED_CLEANUP;
1248 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1252 void io_sendrecv_fail(struct io_kiocb *req)
1254 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1256 if (req->flags & REQ_F_PARTIAL_IO)
1257 req->cqe.res = sr->done_io;
1259 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1260 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1261 req->cqe.flags |= IORING_CQE_F_MORE;
1264 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1266 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1269 if (sqe->len || sqe->buf_index)
1272 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1273 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1274 accept->flags = READ_ONCE(sqe->accept_flags);
1275 accept->nofile = rlimit(RLIMIT_NOFILE);
1276 flags = READ_ONCE(sqe->ioprio);
1277 if (flags & ~IORING_ACCEPT_MULTISHOT)
1280 accept->file_slot = READ_ONCE(sqe->file_index);
1281 if (accept->file_slot) {
1282 if (accept->flags & SOCK_CLOEXEC)
1284 if (flags & IORING_ACCEPT_MULTISHOT &&
1285 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1288 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1290 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1291 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1292 if (flags & IORING_ACCEPT_MULTISHOT)
1293 req->flags |= REQ_F_APOLL_MULTISHOT;
1297 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1299 struct io_ring_ctx *ctx = req->ctx;
1300 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1301 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1302 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1303 bool fixed = !!accept->file_slot;
1307 if (!io_check_multishot(req, issue_flags))
1311 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1312 if (unlikely(fd < 0))
1315 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1320 ret = PTR_ERR(file);
1321 if (ret == -EAGAIN && force_nonblock) {
1323 * if it's multishot and polled, we don't need to
1324 * return EAGAIN to arm the poll infra since it
1325 * has already been done
1327 if (issue_flags & IO_URING_F_MULTISHOT)
1328 ret = IOU_ISSUE_SKIP_COMPLETE;
1331 if (ret == -ERESTARTSYS)
1334 } else if (!fixed) {
1335 fd_install(fd, file);
1338 ret = io_fixed_fd_install(req, issue_flags, file,
1342 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1343 io_req_set_res(req, ret, 0);
1349 if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1350 req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1356 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1358 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1360 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1363 sock->domain = READ_ONCE(sqe->fd);
1364 sock->type = READ_ONCE(sqe->off);
1365 sock->protocol = READ_ONCE(sqe->len);
1366 sock->file_slot = READ_ONCE(sqe->file_index);
1367 sock->nofile = rlimit(RLIMIT_NOFILE);
1369 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1370 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1372 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1377 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1379 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1380 bool fixed = !!sock->file_slot;
1385 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1386 if (unlikely(fd < 0))
1389 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1393 ret = PTR_ERR(file);
1394 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1396 if (ret == -ERESTARTSYS)
1399 } else if (!fixed) {
1400 fd_install(fd, file);
1403 ret = io_fixed_fd_install(req, issue_flags, file,
1406 io_req_set_res(req, ret, 0);
1410 int io_connect_prep_async(struct io_kiocb *req)
1412 struct io_async_connect *io = req->async_data;
1413 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1415 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1418 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1420 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1422 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1425 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1426 conn->addr_len = READ_ONCE(sqe->addr2);
1427 conn->in_progress = false;
1431 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1433 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1434 struct io_async_connect __io, *io;
1435 unsigned file_flags;
1437 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1439 if (connect->in_progress) {
1440 struct socket *socket;
1443 socket = sock_from_file(req->file);
1445 ret = sock_error(socket->sk);
1449 if (req_has_async_data(req)) {
1450 io = req->async_data;
1452 ret = move_addr_to_kernel(connect->addr,
1460 file_flags = force_nonblock ? O_NONBLOCK : 0;
1462 ret = __sys_connect_file(req->file, &io->address,
1463 connect->addr_len, file_flags);
1464 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1465 if (ret == -EINPROGRESS) {
1466 connect->in_progress = true;
1468 if (req_has_async_data(req))
1470 if (io_alloc_async_data(req)) {
1474 memcpy(req->async_data, &__io, sizeof(__io));
1478 if (ret == -ERESTARTSYS)
1483 io_req_set_res(req, ret, 0);
1487 void io_netmsg_cache_free(struct io_cache_entry *entry)
1489 kfree(container_of(entry, struct io_async_msghdr, cache));