1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
19 #if defined(CONFIG_NET)
27 struct sockaddr __user *addr;
46 struct sockaddr __user *addr;
53 struct compat_msghdr __user *umsg_compat;
54 struct user_msghdr __user *umsg;
72 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
74 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
76 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
78 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
79 sqe->buf_index || sqe->splice_fd_in))
82 shutdown->how = READ_ONCE(sqe->len);
86 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
88 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
92 if (issue_flags & IO_URING_F_NONBLOCK)
95 sock = sock_from_file(req->file);
99 ret = __sys_shutdown_sock(sock, shutdown->how);
100 io_req_set_res(req, ret, 0);
104 static bool io_net_retry(struct socket *sock, int flags)
106 if (!(flags & MSG_WAITALL))
108 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
111 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
113 struct io_async_msghdr *hdr = req->async_data;
115 if (!hdr || issue_flags & IO_URING_F_UNLOCKED)
118 /* Let normal cleanup path reap it if we fail adding to the cache */
119 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
120 req->async_data = NULL;
121 req->flags &= ~REQ_F_ASYNC_DATA;
125 static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req,
126 unsigned int issue_flags)
128 struct io_ring_ctx *ctx = req->ctx;
129 struct io_cache_entry *entry;
131 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
132 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
133 struct io_async_msghdr *hdr;
135 hdr = container_of(entry, struct io_async_msghdr, cache);
136 req->flags |= REQ_F_ASYNC_DATA;
137 req->async_data = hdr;
141 if (!io_alloc_async_data(req))
142 return req->async_data;
147 static int io_setup_async_msg(struct io_kiocb *req,
148 struct io_async_msghdr *kmsg,
149 unsigned int issue_flags)
151 struct io_async_msghdr *async_msg = req->async_data;
155 async_msg = io_recvmsg_alloc_async(req, issue_flags);
157 kfree(kmsg->free_iov);
160 req->flags |= REQ_F_NEED_CLEANUP;
161 memcpy(async_msg, kmsg, sizeof(*kmsg));
162 async_msg->msg.msg_name = &async_msg->addr;
163 /* if were using fast_iov, set it to the new one */
164 if (!async_msg->free_iov)
165 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
170 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
171 struct io_async_msghdr *iomsg)
173 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
175 iomsg->msg.msg_name = &iomsg->addr;
176 iomsg->free_iov = iomsg->fast_iov;
177 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
181 int io_sendmsg_prep_async(struct io_kiocb *req)
185 ret = io_sendmsg_copy_hdr(req, req->async_data);
187 req->flags |= REQ_F_NEED_CLEANUP;
191 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
193 struct io_async_msghdr *io = req->async_data;
198 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
200 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
202 if (unlikely(sqe->file_index || sqe->addr2))
205 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
206 sr->len = READ_ONCE(sqe->len);
207 sr->flags = READ_ONCE(sqe->ioprio);
208 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
210 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
211 if (sr->msg_flags & MSG_DONTWAIT)
212 req->flags |= REQ_F_NOWAIT;
215 if (req->ctx->compat)
216 sr->msg_flags |= MSG_CMSG_COMPAT;
222 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
224 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
225 struct io_async_msghdr iomsg, *kmsg;
231 sock = sock_from_file(req->file);
235 if (req_has_async_data(req)) {
236 kmsg = req->async_data;
238 ret = io_sendmsg_copy_hdr(req, &iomsg);
244 if (!(req->flags & REQ_F_POLLED) &&
245 (sr->flags & IORING_RECVSEND_POLL_FIRST))
246 return io_setup_async_msg(req, kmsg, issue_flags);
248 flags = sr->msg_flags;
249 if (issue_flags & IO_URING_F_NONBLOCK)
250 flags |= MSG_DONTWAIT;
251 if (flags & MSG_WAITALL)
252 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
254 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
257 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
258 return io_setup_async_msg(req, kmsg, issue_flags);
259 if (ret == -ERESTARTSYS)
261 if (ret > 0 && io_net_retry(sock, flags)) {
263 req->flags |= REQ_F_PARTIAL_IO;
264 return io_setup_async_msg(req, kmsg, issue_flags);
268 /* fast path, check for non-NULL to avoid function call */
270 kfree(kmsg->free_iov);
271 req->flags &= ~REQ_F_NEED_CLEANUP;
272 io_netmsg_recycle(req, issue_flags);
275 else if (sr->done_io)
277 io_req_set_res(req, ret, 0);
281 int io_send(struct io_kiocb *req, unsigned int issue_flags)
283 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
291 if (!(req->flags & REQ_F_POLLED) &&
292 (sr->flags & IORING_RECVSEND_POLL_FIRST))
295 sock = sock_from_file(req->file);
299 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
304 msg.msg_control = NULL;
305 msg.msg_controllen = 0;
309 flags = sr->msg_flags;
310 if (issue_flags & IO_URING_F_NONBLOCK)
311 flags |= MSG_DONTWAIT;
312 if (flags & MSG_WAITALL)
313 min_ret = iov_iter_count(&msg.msg_iter);
315 msg.msg_flags = flags;
316 ret = sock_sendmsg(sock, &msg);
318 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
320 if (ret == -ERESTARTSYS)
322 if (ret > 0 && io_net_retry(sock, flags)) {
326 req->flags |= REQ_F_PARTIAL_IO;
333 else if (sr->done_io)
335 io_req_set_res(req, ret, 0);
339 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
343 if (iomsg->namelen < 0)
345 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
346 iomsg->namelen, &hdr))
348 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
354 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
355 struct io_async_msghdr *iomsg)
357 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
358 struct user_msghdr msg;
361 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
364 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
368 if (req->flags & REQ_F_BUFFER_SELECT) {
369 if (msg.msg_iovlen == 0) {
370 sr->len = iomsg->fast_iov[0].iov_len = 0;
371 iomsg->fast_iov[0].iov_base = NULL;
372 iomsg->free_iov = NULL;
373 } else if (msg.msg_iovlen > 1) {
376 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
378 sr->len = iomsg->fast_iov[0].iov_len;
379 iomsg->free_iov = NULL;
382 if (req->flags & REQ_F_APOLL_MULTISHOT) {
383 iomsg->namelen = msg.msg_namelen;
384 iomsg->controllen = msg.msg_controllen;
385 if (io_recvmsg_multishot_overflow(iomsg))
389 iomsg->free_iov = iomsg->fast_iov;
390 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
391 &iomsg->free_iov, &iomsg->msg.msg_iter,
401 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
402 struct io_async_msghdr *iomsg)
404 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
405 struct compat_msghdr msg;
406 struct compat_iovec __user *uiov;
409 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
412 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
416 uiov = compat_ptr(msg.msg_iov);
417 if (req->flags & REQ_F_BUFFER_SELECT) {
420 if (msg.msg_iovlen == 0) {
422 iomsg->free_iov = NULL;
423 } else if (msg.msg_iovlen > 1) {
426 if (!access_ok(uiov, sizeof(*uiov)))
428 if (__get_user(clen, &uiov->iov_len))
433 iomsg->free_iov = NULL;
436 if (req->flags & REQ_F_APOLL_MULTISHOT) {
437 iomsg->namelen = msg.msg_namelen;
438 iomsg->controllen = msg.msg_controllen;
439 if (io_recvmsg_multishot_overflow(iomsg))
443 iomsg->free_iov = iomsg->fast_iov;
444 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
445 UIO_FASTIOV, &iomsg->free_iov,
446 &iomsg->msg.msg_iter, true);
455 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
456 struct io_async_msghdr *iomsg)
458 iomsg->msg.msg_name = &iomsg->addr;
461 if (req->ctx->compat)
462 return __io_compat_recvmsg_copy_hdr(req, iomsg);
465 return __io_recvmsg_copy_hdr(req, iomsg);
468 int io_recvmsg_prep_async(struct io_kiocb *req)
472 ret = io_recvmsg_copy_hdr(req, req->async_data);
474 req->flags |= REQ_F_NEED_CLEANUP;
478 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
480 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
482 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
484 if (unlikely(sqe->file_index || sqe->addr2))
487 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
488 sr->len = READ_ONCE(sqe->len);
489 sr->flags = READ_ONCE(sqe->ioprio);
490 if (sr->flags & ~(RECVMSG_FLAGS))
492 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
493 if (sr->msg_flags & MSG_DONTWAIT)
494 req->flags |= REQ_F_NOWAIT;
495 if (sr->msg_flags & MSG_ERRQUEUE)
496 req->flags |= REQ_F_CLEAR_POLLIN;
497 if (sr->flags & IORING_RECV_MULTISHOT) {
498 if (!(req->flags & REQ_F_BUFFER_SELECT))
500 if (sr->msg_flags & MSG_WAITALL)
502 if (req->opcode == IORING_OP_RECV && sr->len)
504 req->flags |= REQ_F_APOLL_MULTISHOT;
508 if (req->ctx->compat)
509 sr->msg_flags |= MSG_CMSG_COMPAT;
515 static inline void io_recv_prep_retry(struct io_kiocb *req)
517 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
520 sr->len = 0; /* get from the provided buffer */
524 * Finishes io_recv and io_recvmsg.
526 * Returns true if it is actually finished, or false if it should run
527 * again (for multishot).
529 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
530 unsigned int cflags, bool mshot_finished)
532 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
533 io_req_set_res(req, *ret, cflags);
538 if (!mshot_finished) {
539 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
540 cflags | IORING_CQE_F_MORE, false)) {
541 io_recv_prep_retry(req);
545 * Otherwise stop multishot but use the current result.
546 * Probably will end up going into overflow, but this means
547 * we cannot trust the ordering anymore
551 io_req_set_res(req, *ret, cflags);
553 if (req->flags & REQ_F_POLLED)
554 *ret = IOU_STOP_MULTISHOT;
560 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
561 struct io_sr_msg *sr, void __user **buf,
564 unsigned long ubuf = (unsigned long) *buf;
567 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
572 if (kmsg->controllen) {
573 unsigned long control = ubuf + hdr - kmsg->controllen;
575 kmsg->msg.msg_control_user = (void *) control;
576 kmsg->msg.msg_controllen = kmsg->controllen;
579 sr->buf = *buf; /* stash for later copy */
580 *buf = (void *) (ubuf + hdr);
581 kmsg->payloadlen = *len = *len - hdr;
585 struct io_recvmsg_multishot_hdr {
586 struct io_uring_recvmsg_out msg;
587 struct sockaddr_storage addr;
590 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
591 struct io_async_msghdr *kmsg,
592 unsigned int flags, bool *finished)
596 struct io_recvmsg_multishot_hdr hdr;
599 kmsg->msg.msg_name = &hdr.addr;
600 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
601 kmsg->msg.msg_namelen = 0;
603 if (sock->file->f_flags & O_NONBLOCK)
604 flags |= MSG_DONTWAIT;
606 err = sock_recvmsg(sock, &kmsg->msg, flags);
607 *finished = err <= 0;
611 hdr.msg = (struct io_uring_recvmsg_out) {
612 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
613 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
616 hdr.msg.payloadlen = err;
617 if (err > kmsg->payloadlen)
618 err = kmsg->payloadlen;
620 copy_len = sizeof(struct io_uring_recvmsg_out);
621 if (kmsg->msg.msg_namelen > kmsg->namelen)
622 copy_len += kmsg->namelen;
624 copy_len += kmsg->msg.msg_namelen;
627 * "fromlen shall refer to the value before truncation.."
630 hdr.msg.namelen = kmsg->msg.msg_namelen;
632 /* ensure that there is no gap between hdr and sockaddr_storage */
633 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
634 sizeof(struct io_uring_recvmsg_out));
635 if (copy_to_user(io->buf, &hdr, copy_len)) {
640 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
641 kmsg->controllen + err;
644 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
646 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
647 struct io_async_msghdr iomsg, *kmsg;
651 int ret, min_ret = 0;
652 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
653 bool mshot_finished = true;
655 sock = sock_from_file(req->file);
659 if (req_has_async_data(req)) {
660 kmsg = req->async_data;
662 ret = io_recvmsg_copy_hdr(req, &iomsg);
668 if (!(req->flags & REQ_F_POLLED) &&
669 (sr->flags & IORING_RECVSEND_POLL_FIRST))
670 return io_setup_async_msg(req, kmsg, issue_flags);
673 if (io_do_buffer_select(req)) {
675 size_t len = sr->len;
677 buf = io_buffer_select(req, &len, issue_flags);
681 if (req->flags & REQ_F_APOLL_MULTISHOT) {
682 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
684 io_kbuf_recycle(req, issue_flags);
689 kmsg->fast_iov[0].iov_base = buf;
690 kmsg->fast_iov[0].iov_len = len;
691 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
695 flags = sr->msg_flags;
697 flags |= MSG_DONTWAIT;
698 if (flags & MSG_WAITALL)
699 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
701 kmsg->msg.msg_get_inq = 1;
702 if (req->flags & REQ_F_APOLL_MULTISHOT)
703 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
706 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
710 if (ret == -EAGAIN && force_nonblock) {
711 ret = io_setup_async_msg(req, kmsg, issue_flags);
712 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
713 IO_APOLL_MULTI_POLLED) {
714 io_kbuf_recycle(req, issue_flags);
715 return IOU_ISSUE_SKIP_COMPLETE;
719 if (ret == -ERESTARTSYS)
721 if (ret > 0 && io_net_retry(sock, flags)) {
723 req->flags |= REQ_F_PARTIAL_IO;
724 return io_setup_async_msg(req, kmsg, issue_flags);
727 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
733 else if (sr->done_io)
736 io_kbuf_recycle(req, issue_flags);
738 cflags = io_put_kbuf(req, issue_flags);
739 if (kmsg->msg.msg_inq)
740 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
742 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
743 goto retry_multishot;
745 if (mshot_finished) {
746 io_netmsg_recycle(req, issue_flags);
747 /* fast path, check for non-NULL to avoid function call */
749 kfree(kmsg->free_iov);
750 req->flags &= ~REQ_F_NEED_CLEANUP;
756 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
758 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
764 int ret, min_ret = 0;
765 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
766 size_t len = sr->len;
768 if (!(req->flags & REQ_F_POLLED) &&
769 (sr->flags & IORING_RECVSEND_POLL_FIRST))
772 sock = sock_from_file(req->file);
777 if (io_do_buffer_select(req)) {
780 buf = io_buffer_select(req, &len, issue_flags);
786 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
792 msg.msg_control = NULL;
795 msg.msg_controllen = 0;
799 flags = sr->msg_flags;
801 flags |= MSG_DONTWAIT;
802 if (flags & MSG_WAITALL)
803 min_ret = iov_iter_count(&msg.msg_iter);
805 ret = sock_recvmsg(sock, &msg, flags);
807 if (ret == -EAGAIN && force_nonblock) {
808 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
809 io_kbuf_recycle(req, issue_flags);
810 return IOU_ISSUE_SKIP_COMPLETE;
815 if (ret == -ERESTARTSYS)
817 if (ret > 0 && io_net_retry(sock, flags)) {
821 req->flags |= REQ_F_PARTIAL_IO;
825 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
832 else if (sr->done_io)
835 io_kbuf_recycle(req, issue_flags);
837 cflags = io_put_kbuf(req, issue_flags);
839 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
841 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
842 goto retry_multishot;
847 int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
849 struct io_sendzc *zc = io_kiocb_to_cmd(req);
851 if (READ_ONCE(sqe->addr2) || READ_ONCE(sqe->__pad2[0]) ||
852 READ_ONCE(sqe->addr3))
855 zc->flags = READ_ONCE(sqe->ioprio);
856 if (zc->flags & ~IORING_RECVSEND_POLL_FIRST)
859 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
860 zc->len = READ_ONCE(sqe->len);
861 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
862 zc->slot_idx = READ_ONCE(sqe->notification_idx);
863 if (zc->msg_flags & MSG_DONTWAIT)
864 req->flags |= REQ_F_NOWAIT;
866 if (req->ctx->compat)
867 zc->msg_flags |= MSG_CMSG_COMPAT;
872 int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
874 struct io_ring_ctx *ctx = req->ctx;
875 struct io_sendzc *zc = io_kiocb_to_cmd(req);
876 struct io_notif_slot *notif_slot;
877 struct io_notif *notif;
882 int ret, min_ret = 0;
884 if (!(req->flags & REQ_F_POLLED) &&
885 (zc->flags & IORING_RECVSEND_POLL_FIRST))
888 if (issue_flags & IO_URING_F_UNLOCKED)
890 sock = sock_from_file(req->file);
894 notif_slot = io_get_notif_slot(ctx, zc->slot_idx);
897 notif = io_get_notif(ctx, notif_slot);
902 msg.msg_control = NULL;
903 msg.msg_controllen = 0;
906 ret = import_single_range(WRITE, zc->buf, zc->len, &iov, &msg.msg_iter);
909 mm_account_pinned_pages(¬if->uarg.mmp, zc->len);
911 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
912 if (issue_flags & IO_URING_F_NONBLOCK)
913 msg_flags |= MSG_DONTWAIT;
914 if (msg_flags & MSG_WAITALL)
915 min_ret = iov_iter_count(&msg.msg_iter);
917 msg.msg_flags = msg_flags;
918 msg.msg_ubuf = ¬if->uarg;
919 msg.sg_from_iter = NULL;
920 ret = sock_sendmsg(sock, &msg);
922 if (unlikely(ret < min_ret)) {
923 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
925 return ret == -ERESTARTSYS ? -EINTR : ret;
928 io_req_set_res(req, ret, 0);
932 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
934 struct io_accept *accept = io_kiocb_to_cmd(req);
937 if (sqe->len || sqe->buf_index)
940 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
941 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
942 accept->flags = READ_ONCE(sqe->accept_flags);
943 accept->nofile = rlimit(RLIMIT_NOFILE);
944 flags = READ_ONCE(sqe->ioprio);
945 if (flags & ~IORING_ACCEPT_MULTISHOT)
948 accept->file_slot = READ_ONCE(sqe->file_index);
949 if (accept->file_slot) {
950 if (accept->flags & SOCK_CLOEXEC)
952 if (flags & IORING_ACCEPT_MULTISHOT &&
953 accept->file_slot != IORING_FILE_INDEX_ALLOC)
956 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
958 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
959 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
960 if (flags & IORING_ACCEPT_MULTISHOT)
961 req->flags |= REQ_F_APOLL_MULTISHOT;
965 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
967 struct io_ring_ctx *ctx = req->ctx;
968 struct io_accept *accept = io_kiocb_to_cmd(req);
969 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
970 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
971 bool fixed = !!accept->file_slot;
977 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
978 if (unlikely(fd < 0))
981 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
987 if (ret == -EAGAIN && force_nonblock) {
989 * if it's multishot and polled, we don't need to
990 * return EAGAIN to arm the poll infra since it
991 * has already been done
993 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
994 IO_APOLL_MULTI_POLLED)
995 ret = IOU_ISSUE_SKIP_COMPLETE;
998 if (ret == -ERESTARTSYS)
1001 } else if (!fixed) {
1002 fd_install(fd, file);
1005 ret = io_fixed_fd_install(req, issue_flags, file,
1009 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1010 io_req_set_res(req, ret, 0);
1015 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1018 io_req_set_res(req, ret, 0);
1019 if (req->flags & REQ_F_POLLED)
1020 return IOU_STOP_MULTISHOT;
1024 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1026 struct io_socket *sock = io_kiocb_to_cmd(req);
1028 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1031 sock->domain = READ_ONCE(sqe->fd);
1032 sock->type = READ_ONCE(sqe->off);
1033 sock->protocol = READ_ONCE(sqe->len);
1034 sock->file_slot = READ_ONCE(sqe->file_index);
1035 sock->nofile = rlimit(RLIMIT_NOFILE);
1037 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1038 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1040 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1045 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1047 struct io_socket *sock = io_kiocb_to_cmd(req);
1048 bool fixed = !!sock->file_slot;
1053 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1054 if (unlikely(fd < 0))
1057 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1061 ret = PTR_ERR(file);
1062 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1064 if (ret == -ERESTARTSYS)
1067 } else if (!fixed) {
1068 fd_install(fd, file);
1071 ret = io_fixed_fd_install(req, issue_flags, file,
1074 io_req_set_res(req, ret, 0);
1078 int io_connect_prep_async(struct io_kiocb *req)
1080 struct io_async_connect *io = req->async_data;
1081 struct io_connect *conn = io_kiocb_to_cmd(req);
1083 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1086 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1088 struct io_connect *conn = io_kiocb_to_cmd(req);
1090 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1093 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1094 conn->addr_len = READ_ONCE(sqe->addr2);
1098 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1100 struct io_connect *connect = io_kiocb_to_cmd(req);
1101 struct io_async_connect __io, *io;
1102 unsigned file_flags;
1104 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1106 if (req_has_async_data(req)) {
1107 io = req->async_data;
1109 ret = move_addr_to_kernel(connect->addr,
1117 file_flags = force_nonblock ? O_NONBLOCK : 0;
1119 ret = __sys_connect_file(req->file, &io->address,
1120 connect->addr_len, file_flags);
1121 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1122 if (req_has_async_data(req))
1124 if (io_alloc_async_data(req)) {
1128 memcpy(req->async_data, &__io, sizeof(__io));
1131 if (ret == -ERESTARTSYS)
1136 io_req_set_res(req, ret, 0);
1140 void io_netmsg_cache_free(struct io_cache_entry *entry)
1142 kfree(container_of(entry, struct io_async_msghdr, cache));