1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
18 #if defined(CONFIG_NET)
26 struct sockaddr __user *addr;
45 struct sockaddr __user *addr;
52 struct compat_msghdr __user *umsg_compat;
53 struct user_msghdr __user *umsg;
62 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
64 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
66 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
68 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
69 sqe->buf_index || sqe->splice_fd_in))
72 shutdown->how = READ_ONCE(sqe->len);
76 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
78 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
82 if (issue_flags & IO_URING_F_NONBLOCK)
85 sock = sock_from_file(req->file);
89 ret = __sys_shutdown_sock(sock, shutdown->how);
90 io_req_set_res(req, ret, 0);
94 static bool io_net_retry(struct socket *sock, int flags)
96 if (!(flags & MSG_WAITALL))
98 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
101 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
103 struct io_async_msghdr *hdr = req->async_data;
105 if (!hdr || issue_flags & IO_URING_F_UNLOCKED)
108 /* Let normal cleanup path reap it if we fail adding to the cache */
109 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
110 req->async_data = NULL;
111 req->flags &= ~REQ_F_ASYNC_DATA;
115 static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req,
116 unsigned int issue_flags)
118 struct io_ring_ctx *ctx = req->ctx;
119 struct io_cache_entry *entry;
121 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
122 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
123 struct io_async_msghdr *hdr;
125 hdr = container_of(entry, struct io_async_msghdr, cache);
126 req->flags |= REQ_F_ASYNC_DATA;
127 req->async_data = hdr;
131 if (!io_alloc_async_data(req))
132 return req->async_data;
137 static int io_setup_async_msg(struct io_kiocb *req,
138 struct io_async_msghdr *kmsg,
139 unsigned int issue_flags)
141 struct io_async_msghdr *async_msg = req->async_data;
145 async_msg = io_recvmsg_alloc_async(req, issue_flags);
147 kfree(kmsg->free_iov);
150 req->flags |= REQ_F_NEED_CLEANUP;
151 memcpy(async_msg, kmsg, sizeof(*kmsg));
152 async_msg->msg.msg_name = &async_msg->addr;
153 /* if were using fast_iov, set it to the new one */
154 if (!async_msg->free_iov)
155 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
160 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
161 struct io_async_msghdr *iomsg)
163 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
165 iomsg->msg.msg_name = &iomsg->addr;
166 iomsg->free_iov = iomsg->fast_iov;
167 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
171 int io_sendmsg_prep_async(struct io_kiocb *req)
175 ret = io_sendmsg_copy_hdr(req, req->async_data);
177 req->flags |= REQ_F_NEED_CLEANUP;
181 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
183 struct io_async_msghdr *io = req->async_data;
188 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
190 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
192 if (unlikely(sqe->file_index || sqe->addr2))
195 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
196 sr->len = READ_ONCE(sqe->len);
197 sr->flags = READ_ONCE(sqe->ioprio);
198 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
200 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
201 if (sr->msg_flags & MSG_DONTWAIT)
202 req->flags |= REQ_F_NOWAIT;
205 if (req->ctx->compat)
206 sr->msg_flags |= MSG_CMSG_COMPAT;
212 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
214 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
215 struct io_async_msghdr iomsg, *kmsg;
221 sock = sock_from_file(req->file);
225 if (req_has_async_data(req)) {
226 kmsg = req->async_data;
228 ret = io_sendmsg_copy_hdr(req, &iomsg);
234 if (!(req->flags & REQ_F_POLLED) &&
235 (sr->flags & IORING_RECVSEND_POLL_FIRST))
236 return io_setup_async_msg(req, kmsg, issue_flags);
238 flags = sr->msg_flags;
239 if (issue_flags & IO_URING_F_NONBLOCK)
240 flags |= MSG_DONTWAIT;
241 if (flags & MSG_WAITALL)
242 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
244 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
247 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
248 return io_setup_async_msg(req, kmsg, issue_flags);
249 if (ret == -ERESTARTSYS)
251 if (ret > 0 && io_net_retry(sock, flags)) {
253 req->flags |= REQ_F_PARTIAL_IO;
254 return io_setup_async_msg(req, kmsg, issue_flags);
258 /* fast path, check for non-NULL to avoid function call */
260 kfree(kmsg->free_iov);
261 req->flags &= ~REQ_F_NEED_CLEANUP;
262 io_netmsg_recycle(req, issue_flags);
265 else if (sr->done_io)
267 io_req_set_res(req, ret, 0);
271 int io_send(struct io_kiocb *req, unsigned int issue_flags)
273 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
281 if (!(req->flags & REQ_F_POLLED) &&
282 (sr->flags & IORING_RECVSEND_POLL_FIRST))
285 sock = sock_from_file(req->file);
289 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
294 msg.msg_control = NULL;
295 msg.msg_controllen = 0;
298 flags = sr->msg_flags;
299 if (issue_flags & IO_URING_F_NONBLOCK)
300 flags |= MSG_DONTWAIT;
301 if (flags & MSG_WAITALL)
302 min_ret = iov_iter_count(&msg.msg_iter);
304 msg.msg_flags = flags;
305 ret = sock_sendmsg(sock, &msg);
307 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
309 if (ret == -ERESTARTSYS)
311 if (ret > 0 && io_net_retry(sock, flags)) {
315 req->flags |= REQ_F_PARTIAL_IO;
322 else if (sr->done_io)
324 io_req_set_res(req, ret, 0);
328 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
332 if (iomsg->namelen < 0)
334 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
335 iomsg->namelen, &hdr))
337 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
343 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
344 struct io_async_msghdr *iomsg)
346 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
347 struct user_msghdr msg;
350 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
353 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
357 if (req->flags & REQ_F_BUFFER_SELECT) {
358 if (msg.msg_iovlen == 0) {
359 sr->len = iomsg->fast_iov[0].iov_len = 0;
360 iomsg->fast_iov[0].iov_base = NULL;
361 iomsg->free_iov = NULL;
362 } else if (msg.msg_iovlen > 1) {
365 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
367 sr->len = iomsg->fast_iov[0].iov_len;
368 iomsg->free_iov = NULL;
371 if (req->flags & REQ_F_APOLL_MULTISHOT) {
372 iomsg->namelen = msg.msg_namelen;
373 iomsg->controllen = msg.msg_controllen;
374 if (io_recvmsg_multishot_overflow(iomsg))
378 iomsg->free_iov = iomsg->fast_iov;
379 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
380 &iomsg->free_iov, &iomsg->msg.msg_iter,
390 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
391 struct io_async_msghdr *iomsg)
393 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
394 struct compat_msghdr msg;
395 struct compat_iovec __user *uiov;
398 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
401 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr);
405 uiov = compat_ptr(msg.msg_iov);
406 if (req->flags & REQ_F_BUFFER_SELECT) {
409 if (msg.msg_iovlen == 0) {
411 iomsg->free_iov = NULL;
412 } else if (msg.msg_iovlen > 1) {
415 if (!access_ok(uiov, sizeof(*uiov)))
417 if (__get_user(clen, &uiov->iov_len))
422 iomsg->free_iov = NULL;
425 if (req->flags & REQ_F_APOLL_MULTISHOT) {
426 iomsg->namelen = msg.msg_namelen;
427 iomsg->controllen = msg.msg_controllen;
428 if (io_recvmsg_multishot_overflow(iomsg))
432 iomsg->free_iov = iomsg->fast_iov;
433 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
434 UIO_FASTIOV, &iomsg->free_iov,
435 &iomsg->msg.msg_iter, true);
444 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
445 struct io_async_msghdr *iomsg)
447 iomsg->msg.msg_name = &iomsg->addr;
450 if (req->ctx->compat)
451 return __io_compat_recvmsg_copy_hdr(req, iomsg);
454 return __io_recvmsg_copy_hdr(req, iomsg);
457 int io_recvmsg_prep_async(struct io_kiocb *req)
461 ret = io_recvmsg_copy_hdr(req, req->async_data);
463 req->flags |= REQ_F_NEED_CLEANUP;
467 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
469 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
471 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
473 if (unlikely(sqe->file_index || sqe->addr2))
476 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
477 sr->len = READ_ONCE(sqe->len);
478 sr->flags = READ_ONCE(sqe->ioprio);
479 if (sr->flags & ~(RECVMSG_FLAGS))
481 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
482 if (sr->msg_flags & MSG_DONTWAIT)
483 req->flags |= REQ_F_NOWAIT;
484 if (sr->msg_flags & MSG_ERRQUEUE)
485 req->flags |= REQ_F_CLEAR_POLLIN;
486 if (sr->flags & IORING_RECV_MULTISHOT) {
487 if (!(req->flags & REQ_F_BUFFER_SELECT))
489 if (sr->msg_flags & MSG_WAITALL)
491 if (req->opcode == IORING_OP_RECV && sr->len)
493 req->flags |= REQ_F_APOLL_MULTISHOT;
497 if (req->ctx->compat)
498 sr->msg_flags |= MSG_CMSG_COMPAT;
504 static inline void io_recv_prep_retry(struct io_kiocb *req)
506 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
509 sr->len = 0; /* get from the provided buffer */
513 * Finishes io_recv and io_recvmsg.
515 * Returns true if it is actually finished, or false if it should run
516 * again (for multishot).
518 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
519 unsigned int cflags, bool mshot_finished)
521 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
522 io_req_set_res(req, *ret, cflags);
527 if (!mshot_finished) {
528 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
529 cflags | IORING_CQE_F_MORE, false)) {
530 io_recv_prep_retry(req);
534 * Otherwise stop multishot but use the current result.
535 * Probably will end up going into overflow, but this means
536 * we cannot trust the ordering anymore
540 io_req_set_res(req, *ret, cflags);
542 if (req->flags & REQ_F_POLLED)
543 *ret = IOU_STOP_MULTISHOT;
549 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
550 struct io_sr_msg *sr, void __user **buf,
553 unsigned long ubuf = (unsigned long) *buf;
556 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
561 if (kmsg->controllen) {
562 unsigned long control = ubuf + hdr - kmsg->controllen;
564 kmsg->msg.msg_control_user = (void *) control;
565 kmsg->msg.msg_controllen = kmsg->controllen;
568 sr->buf = *buf; /* stash for later copy */
569 *buf = (void *) (ubuf + hdr);
570 kmsg->payloadlen = *len = *len - hdr;
574 struct io_recvmsg_multishot_hdr {
575 struct io_uring_recvmsg_out msg;
576 struct sockaddr_storage addr;
579 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
580 struct io_async_msghdr *kmsg,
581 unsigned int flags, bool *finished)
585 struct io_recvmsg_multishot_hdr hdr;
588 kmsg->msg.msg_name = &hdr.addr;
589 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
590 kmsg->msg.msg_namelen = 0;
592 if (sock->file->f_flags & O_NONBLOCK)
593 flags |= MSG_DONTWAIT;
595 err = sock_recvmsg(sock, &kmsg->msg, flags);
596 *finished = err <= 0;
600 hdr.msg = (struct io_uring_recvmsg_out) {
601 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
602 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
605 hdr.msg.payloadlen = err;
606 if (err > kmsg->payloadlen)
607 err = kmsg->payloadlen;
609 copy_len = sizeof(struct io_uring_recvmsg_out);
610 if (kmsg->msg.msg_namelen > kmsg->namelen)
611 copy_len += kmsg->namelen;
613 copy_len += kmsg->msg.msg_namelen;
616 * "fromlen shall refer to the value before truncation.."
619 hdr.msg.namelen = kmsg->msg.msg_namelen;
621 /* ensure that there is no gap between hdr and sockaddr_storage */
622 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
623 sizeof(struct io_uring_recvmsg_out));
624 if (copy_to_user(io->buf, &hdr, copy_len)) {
629 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
630 kmsg->controllen + err;
633 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
635 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
636 struct io_async_msghdr iomsg, *kmsg;
640 int ret, min_ret = 0;
641 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
642 bool mshot_finished = true;
644 sock = sock_from_file(req->file);
648 if (req_has_async_data(req)) {
649 kmsg = req->async_data;
651 ret = io_recvmsg_copy_hdr(req, &iomsg);
657 if (!(req->flags & REQ_F_POLLED) &&
658 (sr->flags & IORING_RECVSEND_POLL_FIRST))
659 return io_setup_async_msg(req, kmsg, issue_flags);
662 if (io_do_buffer_select(req)) {
664 size_t len = sr->len;
666 buf = io_buffer_select(req, &len, issue_flags);
670 if (req->flags & REQ_F_APOLL_MULTISHOT) {
671 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
673 io_kbuf_recycle(req, issue_flags);
678 kmsg->fast_iov[0].iov_base = buf;
679 kmsg->fast_iov[0].iov_len = len;
680 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
684 flags = sr->msg_flags;
686 flags |= MSG_DONTWAIT;
687 if (flags & MSG_WAITALL)
688 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
690 kmsg->msg.msg_get_inq = 1;
691 if (req->flags & REQ_F_APOLL_MULTISHOT)
692 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
695 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
699 if (ret == -EAGAIN && force_nonblock) {
700 ret = io_setup_async_msg(req, kmsg, issue_flags);
701 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
702 IO_APOLL_MULTI_POLLED) {
703 io_kbuf_recycle(req, issue_flags);
704 return IOU_ISSUE_SKIP_COMPLETE;
708 if (ret == -ERESTARTSYS)
710 if (ret > 0 && io_net_retry(sock, flags)) {
712 req->flags |= REQ_F_PARTIAL_IO;
713 return io_setup_async_msg(req, kmsg, issue_flags);
716 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
722 else if (sr->done_io)
725 io_kbuf_recycle(req, issue_flags);
727 cflags = io_put_kbuf(req, issue_flags);
728 if (kmsg->msg.msg_inq)
729 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
731 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
732 goto retry_multishot;
734 if (mshot_finished) {
735 io_netmsg_recycle(req, issue_flags);
736 /* fast path, check for non-NULL to avoid function call */
738 kfree(kmsg->free_iov);
739 req->flags &= ~REQ_F_NEED_CLEANUP;
745 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
747 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
753 int ret, min_ret = 0;
754 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
755 size_t len = sr->len;
757 if (!(req->flags & REQ_F_POLLED) &&
758 (sr->flags & IORING_RECVSEND_POLL_FIRST))
761 sock = sock_from_file(req->file);
766 if (io_do_buffer_select(req)) {
769 buf = io_buffer_select(req, &len, issue_flags);
775 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
781 msg.msg_control = NULL;
784 msg.msg_controllen = 0;
787 flags = sr->msg_flags;
789 flags |= MSG_DONTWAIT;
790 if (flags & MSG_WAITALL)
791 min_ret = iov_iter_count(&msg.msg_iter);
793 ret = sock_recvmsg(sock, &msg, flags);
795 if (ret == -EAGAIN && force_nonblock) {
796 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
797 io_kbuf_recycle(req, issue_flags);
798 return IOU_ISSUE_SKIP_COMPLETE;
803 if (ret == -ERESTARTSYS)
805 if (ret > 0 && io_net_retry(sock, flags)) {
809 req->flags |= REQ_F_PARTIAL_IO;
813 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
820 else if (sr->done_io)
823 io_kbuf_recycle(req, issue_flags);
825 cflags = io_put_kbuf(req, issue_flags);
827 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
829 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
830 goto retry_multishot;
835 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
837 struct io_accept *accept = io_kiocb_to_cmd(req);
840 if (sqe->len || sqe->buf_index)
843 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
844 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
845 accept->flags = READ_ONCE(sqe->accept_flags);
846 accept->nofile = rlimit(RLIMIT_NOFILE);
847 flags = READ_ONCE(sqe->ioprio);
848 if (flags & ~IORING_ACCEPT_MULTISHOT)
851 accept->file_slot = READ_ONCE(sqe->file_index);
852 if (accept->file_slot) {
853 if (accept->flags & SOCK_CLOEXEC)
855 if (flags & IORING_ACCEPT_MULTISHOT &&
856 accept->file_slot != IORING_FILE_INDEX_ALLOC)
859 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
861 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
862 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
863 if (flags & IORING_ACCEPT_MULTISHOT)
864 req->flags |= REQ_F_APOLL_MULTISHOT;
868 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
870 struct io_ring_ctx *ctx = req->ctx;
871 struct io_accept *accept = io_kiocb_to_cmd(req);
872 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
873 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
874 bool fixed = !!accept->file_slot;
880 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
881 if (unlikely(fd < 0))
884 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
890 if (ret == -EAGAIN && force_nonblock) {
892 * if it's multishot and polled, we don't need to
893 * return EAGAIN to arm the poll infra since it
894 * has already been done
896 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
897 IO_APOLL_MULTI_POLLED)
898 ret = IOU_ISSUE_SKIP_COMPLETE;
901 if (ret == -ERESTARTSYS)
905 fd_install(fd, file);
908 ret = io_fixed_fd_install(req, issue_flags, file,
912 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
913 io_req_set_res(req, ret, 0);
918 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
921 io_req_set_res(req, ret, 0);
922 if (req->flags & REQ_F_POLLED)
923 return IOU_STOP_MULTISHOT;
927 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
929 struct io_socket *sock = io_kiocb_to_cmd(req);
931 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
934 sock->domain = READ_ONCE(sqe->fd);
935 sock->type = READ_ONCE(sqe->off);
936 sock->protocol = READ_ONCE(sqe->len);
937 sock->file_slot = READ_ONCE(sqe->file_index);
938 sock->nofile = rlimit(RLIMIT_NOFILE);
940 sock->flags = sock->type & ~SOCK_TYPE_MASK;
941 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
943 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
948 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
950 struct io_socket *sock = io_kiocb_to_cmd(req);
951 bool fixed = !!sock->file_slot;
956 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
957 if (unlikely(fd < 0))
960 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
965 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
967 if (ret == -ERESTARTSYS)
971 fd_install(fd, file);
974 ret = io_fixed_fd_install(req, issue_flags, file,
977 io_req_set_res(req, ret, 0);
981 int io_connect_prep_async(struct io_kiocb *req)
983 struct io_async_connect *io = req->async_data;
984 struct io_connect *conn = io_kiocb_to_cmd(req);
986 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
989 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
991 struct io_connect *conn = io_kiocb_to_cmd(req);
993 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
996 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
997 conn->addr_len = READ_ONCE(sqe->addr2);
1001 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1003 struct io_connect *connect = io_kiocb_to_cmd(req);
1004 struct io_async_connect __io, *io;
1005 unsigned file_flags;
1007 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1009 if (req_has_async_data(req)) {
1010 io = req->async_data;
1012 ret = move_addr_to_kernel(connect->addr,
1020 file_flags = force_nonblock ? O_NONBLOCK : 0;
1022 ret = __sys_connect_file(req->file, &io->address,
1023 connect->addr_len, file_flags);
1024 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1025 if (req_has_async_data(req))
1027 if (io_alloc_async_data(req)) {
1031 memcpy(req->async_data, &__io, sizeof(__io));
1034 if (ret == -ERESTARTSYS)
1039 io_req_set_res(req, ret, 0);
1043 void io_netmsg_cache_free(struct io_cache_entry *entry)
1045 kfree(container_of(entry, struct io_async_msghdr, cache));