1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
54 struct compat_msghdr __user *umsg_compat;
55 struct user_msghdr __user *umsg;
76 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
78 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
80 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
82 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
83 sqe->buf_index || sqe->splice_fd_in))
86 shutdown->how = READ_ONCE(sqe->len);
90 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
92 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
96 if (issue_flags & IO_URING_F_NONBLOCK)
99 sock = sock_from_file(req->file);
103 ret = __sys_shutdown_sock(sock, shutdown->how);
104 io_req_set_res(req, ret, 0);
108 static bool io_net_retry(struct socket *sock, int flags)
110 if (!(flags & MSG_WAITALL))
112 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
115 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
117 struct io_async_msghdr *hdr = req->async_data;
119 if (!hdr || issue_flags & IO_URING_F_UNLOCKED)
122 /* Let normal cleanup path reap it if we fail adding to the cache */
123 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
124 req->async_data = NULL;
125 req->flags &= ~REQ_F_ASYNC_DATA;
129 static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req,
130 unsigned int issue_flags)
132 struct io_ring_ctx *ctx = req->ctx;
133 struct io_cache_entry *entry;
135 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
136 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
137 struct io_async_msghdr *hdr;
139 hdr = container_of(entry, struct io_async_msghdr, cache);
140 req->flags |= REQ_F_ASYNC_DATA;
141 req->async_data = hdr;
145 if (!io_alloc_async_data(req))
146 return req->async_data;
151 static int io_setup_async_msg(struct io_kiocb *req,
152 struct io_async_msghdr *kmsg,
153 unsigned int issue_flags)
155 struct io_async_msghdr *async_msg = req->async_data;
159 async_msg = io_recvmsg_alloc_async(req, issue_flags);
161 kfree(kmsg->free_iov);
164 req->flags |= REQ_F_NEED_CLEANUP;
165 memcpy(async_msg, kmsg, sizeof(*kmsg));
166 async_msg->msg.msg_name = &async_msg->addr;
167 /* if were using fast_iov, set it to the new one */
168 if (!async_msg->free_iov)
169 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
174 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
175 struct io_async_msghdr *iomsg)
177 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
179 iomsg->msg.msg_name = &iomsg->addr;
180 iomsg->free_iov = iomsg->fast_iov;
181 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
185 int io_sendmsg_prep_async(struct io_kiocb *req)
189 ret = io_sendmsg_copy_hdr(req, req->async_data);
191 req->flags |= REQ_F_NEED_CLEANUP;
195 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
197 struct io_async_msghdr *io = req->async_data;
202 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
204 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
206 if (unlikely(sqe->file_index || sqe->addr2))
209 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
210 sr->len = READ_ONCE(sqe->len);
211 sr->flags = READ_ONCE(sqe->ioprio);
212 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
214 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
215 if (sr->msg_flags & MSG_DONTWAIT)
216 req->flags |= REQ_F_NOWAIT;
219 if (req->ctx->compat)
220 sr->msg_flags |= MSG_CMSG_COMPAT;
226 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
228 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
229 struct io_async_msghdr iomsg, *kmsg;
235 sock = sock_from_file(req->file);
239 if (req_has_async_data(req)) {
240 kmsg = req->async_data;
242 ret = io_sendmsg_copy_hdr(req, &iomsg);
248 if (!(req->flags & REQ_F_POLLED) &&
249 (sr->flags & IORING_RECVSEND_POLL_FIRST))
250 return io_setup_async_msg(req, kmsg, issue_flags);
252 flags = sr->msg_flags;
253 if (issue_flags & IO_URING_F_NONBLOCK)
254 flags |= MSG_DONTWAIT;
255 if (flags & MSG_WAITALL)
256 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
258 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
261 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
262 return io_setup_async_msg(req, kmsg, issue_flags);
263 if (ret == -ERESTARTSYS)
265 if (ret > 0 && io_net_retry(sock, flags)) {
267 req->flags |= REQ_F_PARTIAL_IO;
268 return io_setup_async_msg(req, kmsg, issue_flags);
272 /* fast path, check for non-NULL to avoid function call */
274 kfree(kmsg->free_iov);
275 req->flags &= ~REQ_F_NEED_CLEANUP;
276 io_netmsg_recycle(req, issue_flags);
279 else if (sr->done_io)
281 io_req_set_res(req, ret, 0);
285 int io_send(struct io_kiocb *req, unsigned int issue_flags)
287 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
295 if (!(req->flags & REQ_F_POLLED) &&
296 (sr->flags & IORING_RECVSEND_POLL_FIRST))
299 sock = sock_from_file(req->file);
303 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
308 msg.msg_control = NULL;
309 msg.msg_controllen = 0;
313 flags = sr->msg_flags;
314 if (issue_flags & IO_URING_F_NONBLOCK)
315 flags |= MSG_DONTWAIT;
316 if (flags & MSG_WAITALL)
317 min_ret = iov_iter_count(&msg.msg_iter);
319 msg.msg_flags = flags;
320 ret = sock_sendmsg(sock, &msg);
322 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
324 if (ret == -ERESTARTSYS)
326 if (ret > 0 && io_net_retry(sock, flags)) {
330 req->flags |= REQ_F_PARTIAL_IO;
337 else if (sr->done_io)
339 io_req_set_res(req, ret, 0);
343 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
347 if (iomsg->namelen < 0)
349 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
350 iomsg->namelen, &hdr))
352 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
358 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
359 struct io_async_msghdr *iomsg)
361 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
362 struct user_msghdr msg;
365 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
368 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
372 if (req->flags & REQ_F_BUFFER_SELECT) {
373 if (msg.msg_iovlen == 0) {
374 sr->len = iomsg->fast_iov[0].iov_len = 0;
375 iomsg->fast_iov[0].iov_base = NULL;
376 iomsg->free_iov = NULL;
377 } else if (msg.msg_iovlen > 1) {
380 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
382 sr->len = iomsg->fast_iov[0].iov_len;
383 iomsg->free_iov = NULL;
386 if (req->flags & REQ_F_APOLL_MULTISHOT) {
387 iomsg->namelen = msg.msg_namelen;
388 iomsg->controllen = msg.msg_controllen;
389 if (io_recvmsg_multishot_overflow(iomsg))
393 iomsg->free_iov = iomsg->fast_iov;
394 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
395 &iomsg->free_iov, &iomsg->msg.msg_iter,
405 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
406 struct io_async_msghdr *iomsg)
408 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
409 struct compat_msghdr msg;
410 struct compat_iovec __user *uiov;
413 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
416 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
420 uiov = compat_ptr(msg.msg_iov);
421 if (req->flags & REQ_F_BUFFER_SELECT) {
424 if (msg.msg_iovlen == 0) {
426 iomsg->free_iov = NULL;
427 } else if (msg.msg_iovlen > 1) {
430 if (!access_ok(uiov, sizeof(*uiov)))
432 if (__get_user(clen, &uiov->iov_len))
437 iomsg->free_iov = NULL;
440 if (req->flags & REQ_F_APOLL_MULTISHOT) {
441 iomsg->namelen = msg.msg_namelen;
442 iomsg->controllen = msg.msg_controllen;
443 if (io_recvmsg_multishot_overflow(iomsg))
447 iomsg->free_iov = iomsg->fast_iov;
448 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
449 UIO_FASTIOV, &iomsg->free_iov,
450 &iomsg->msg.msg_iter, true);
459 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
460 struct io_async_msghdr *iomsg)
462 iomsg->msg.msg_name = &iomsg->addr;
465 if (req->ctx->compat)
466 return __io_compat_recvmsg_copy_hdr(req, iomsg);
469 return __io_recvmsg_copy_hdr(req, iomsg);
472 int io_recvmsg_prep_async(struct io_kiocb *req)
476 ret = io_recvmsg_copy_hdr(req, req->async_data);
478 req->flags |= REQ_F_NEED_CLEANUP;
482 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
484 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
486 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
488 if (unlikely(sqe->file_index || sqe->addr2))
491 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
492 sr->len = READ_ONCE(sqe->len);
493 sr->flags = READ_ONCE(sqe->ioprio);
494 if (sr->flags & ~(RECVMSG_FLAGS))
496 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
497 if (sr->msg_flags & MSG_DONTWAIT)
498 req->flags |= REQ_F_NOWAIT;
499 if (sr->msg_flags & MSG_ERRQUEUE)
500 req->flags |= REQ_F_CLEAR_POLLIN;
501 if (sr->flags & IORING_RECV_MULTISHOT) {
502 if (!(req->flags & REQ_F_BUFFER_SELECT))
504 if (sr->msg_flags & MSG_WAITALL)
506 if (req->opcode == IORING_OP_RECV && sr->len)
508 req->flags |= REQ_F_APOLL_MULTISHOT;
512 if (req->ctx->compat)
513 sr->msg_flags |= MSG_CMSG_COMPAT;
519 static inline void io_recv_prep_retry(struct io_kiocb *req)
521 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
524 sr->len = 0; /* get from the provided buffer */
528 * Finishes io_recv and io_recvmsg.
530 * Returns true if it is actually finished, or false if it should run
531 * again (for multishot).
533 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
534 unsigned int cflags, bool mshot_finished)
536 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
537 io_req_set_res(req, *ret, cflags);
542 if (!mshot_finished) {
543 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
544 cflags | IORING_CQE_F_MORE, false)) {
545 io_recv_prep_retry(req);
549 * Otherwise stop multishot but use the current result.
550 * Probably will end up going into overflow, but this means
551 * we cannot trust the ordering anymore
555 io_req_set_res(req, *ret, cflags);
557 if (req->flags & REQ_F_POLLED)
558 *ret = IOU_STOP_MULTISHOT;
564 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
565 struct io_sr_msg *sr, void __user **buf,
568 unsigned long ubuf = (unsigned long) *buf;
571 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
576 if (kmsg->controllen) {
577 unsigned long control = ubuf + hdr - kmsg->controllen;
579 kmsg->msg.msg_control_user = (void __user *) control;
580 kmsg->msg.msg_controllen = kmsg->controllen;
583 sr->buf = *buf; /* stash for later copy */
584 *buf = (void __user *) (ubuf + hdr);
585 kmsg->payloadlen = *len = *len - hdr;
589 struct io_recvmsg_multishot_hdr {
590 struct io_uring_recvmsg_out msg;
591 struct sockaddr_storage addr;
594 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
595 struct io_async_msghdr *kmsg,
596 unsigned int flags, bool *finished)
600 struct io_recvmsg_multishot_hdr hdr;
603 kmsg->msg.msg_name = &hdr.addr;
604 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
605 kmsg->msg.msg_namelen = 0;
607 if (sock->file->f_flags & O_NONBLOCK)
608 flags |= MSG_DONTWAIT;
610 err = sock_recvmsg(sock, &kmsg->msg, flags);
611 *finished = err <= 0;
615 hdr.msg = (struct io_uring_recvmsg_out) {
616 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
617 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
620 hdr.msg.payloadlen = err;
621 if (err > kmsg->payloadlen)
622 err = kmsg->payloadlen;
624 copy_len = sizeof(struct io_uring_recvmsg_out);
625 if (kmsg->msg.msg_namelen > kmsg->namelen)
626 copy_len += kmsg->namelen;
628 copy_len += kmsg->msg.msg_namelen;
631 * "fromlen shall refer to the value before truncation.."
634 hdr.msg.namelen = kmsg->msg.msg_namelen;
636 /* ensure that there is no gap between hdr and sockaddr_storage */
637 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
638 sizeof(struct io_uring_recvmsg_out));
639 if (copy_to_user(io->buf, &hdr, copy_len)) {
644 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
645 kmsg->controllen + err;
648 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
650 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
651 struct io_async_msghdr iomsg, *kmsg;
655 int ret, min_ret = 0;
656 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
657 bool mshot_finished = true;
659 sock = sock_from_file(req->file);
663 if (req_has_async_data(req)) {
664 kmsg = req->async_data;
666 ret = io_recvmsg_copy_hdr(req, &iomsg);
672 if (!(req->flags & REQ_F_POLLED) &&
673 (sr->flags & IORING_RECVSEND_POLL_FIRST))
674 return io_setup_async_msg(req, kmsg, issue_flags);
677 if (io_do_buffer_select(req)) {
679 size_t len = sr->len;
681 buf = io_buffer_select(req, &len, issue_flags);
685 if (req->flags & REQ_F_APOLL_MULTISHOT) {
686 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
688 io_kbuf_recycle(req, issue_flags);
693 kmsg->fast_iov[0].iov_base = buf;
694 kmsg->fast_iov[0].iov_len = len;
695 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
699 flags = sr->msg_flags;
701 flags |= MSG_DONTWAIT;
702 if (flags & MSG_WAITALL)
703 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
705 kmsg->msg.msg_get_inq = 1;
706 if (req->flags & REQ_F_APOLL_MULTISHOT)
707 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
710 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
714 if (ret == -EAGAIN && force_nonblock) {
715 ret = io_setup_async_msg(req, kmsg, issue_flags);
716 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
717 IO_APOLL_MULTI_POLLED) {
718 io_kbuf_recycle(req, issue_flags);
719 return IOU_ISSUE_SKIP_COMPLETE;
723 if (ret == -ERESTARTSYS)
725 if (ret > 0 && io_net_retry(sock, flags)) {
727 req->flags |= REQ_F_PARTIAL_IO;
728 return io_setup_async_msg(req, kmsg, issue_flags);
731 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
737 else if (sr->done_io)
740 io_kbuf_recycle(req, issue_flags);
742 cflags = io_put_kbuf(req, issue_flags);
743 if (kmsg->msg.msg_inq)
744 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
746 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
747 goto retry_multishot;
749 if (mshot_finished) {
750 io_netmsg_recycle(req, issue_flags);
751 /* fast path, check for non-NULL to avoid function call */
753 kfree(kmsg->free_iov);
754 req->flags &= ~REQ_F_NEED_CLEANUP;
760 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
762 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
768 int ret, min_ret = 0;
769 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
770 size_t len = sr->len;
772 if (!(req->flags & REQ_F_POLLED) &&
773 (sr->flags & IORING_RECVSEND_POLL_FIRST))
776 sock = sock_from_file(req->file);
781 if (io_do_buffer_select(req)) {
784 buf = io_buffer_select(req, &len, issue_flags);
790 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
796 msg.msg_control = NULL;
799 msg.msg_controllen = 0;
803 flags = sr->msg_flags;
805 flags |= MSG_DONTWAIT;
806 if (flags & MSG_WAITALL)
807 min_ret = iov_iter_count(&msg.msg_iter);
809 ret = sock_recvmsg(sock, &msg, flags);
811 if (ret == -EAGAIN && force_nonblock) {
812 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
813 io_kbuf_recycle(req, issue_flags);
814 return IOU_ISSUE_SKIP_COMPLETE;
819 if (ret == -ERESTARTSYS)
821 if (ret > 0 && io_net_retry(sock, flags)) {
825 req->flags |= REQ_F_PARTIAL_IO;
829 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
836 else if (sr->done_io)
839 io_kbuf_recycle(req, issue_flags);
841 cflags = io_put_kbuf(req, issue_flags);
843 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
845 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
846 goto retry_multishot;
851 int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
853 struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
854 struct io_ring_ctx *ctx = req->ctx;
856 if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))
859 zc->flags = READ_ONCE(sqe->ioprio);
860 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
861 IORING_RECVSEND_FIXED_BUF | IORING_RECVSEND_NOTIF_FLUSH))
863 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
864 unsigned idx = READ_ONCE(sqe->buf_index);
866 if (unlikely(idx >= ctx->nr_user_bufs))
868 idx = array_index_nospec(idx, ctx->nr_user_bufs);
869 req->imu = READ_ONCE(ctx->user_bufs[idx]);
870 io_req_set_rsrc_node(req, ctx, 0);
873 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
874 zc->len = READ_ONCE(sqe->len);
875 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
876 zc->slot_idx = READ_ONCE(sqe->notification_idx);
877 if (zc->msg_flags & MSG_DONTWAIT)
878 req->flags |= REQ_F_NOWAIT;
880 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
881 zc->addr_len = READ_ONCE(sqe->addr_len);
885 if (req->ctx->compat)
886 zc->msg_flags |= MSG_CMSG_COMPAT;
891 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
892 struct iov_iter *from, size_t length)
894 struct skb_shared_info *shinfo = skb_shinfo(skb);
895 int frag = shinfo->nr_frags;
899 unsigned long truesize = 0;
901 if (!shinfo->nr_frags)
902 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
904 if (!skb_zcopy_managed(skb) || !iov_iter_is_bvec(from)) {
905 skb_zcopy_downgrade_managed(skb);
906 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
909 bi.bi_size = min(from->count, length);
910 bi.bi_bvec_done = from->iov_offset;
913 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
914 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
917 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
918 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
919 v.bv_offset, v.bv_len);
920 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
925 shinfo->nr_frags = frag;
926 from->bvec += bi.bi_idx;
927 from->nr_segs -= bi.bi_idx;
928 from->count = bi.bi_size;
929 from->iov_offset = bi.bi_bvec_done;
931 skb->data_len += copied;
933 skb->truesize += truesize;
935 if (sk && sk->sk_type == SOCK_STREAM) {
936 sk_wmem_queued_add(sk, truesize);
937 if (!skb_zcopy_pure(skb))
938 sk_mem_charge(sk, truesize);
940 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
945 int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
947 struct sockaddr_storage address;
948 struct io_ring_ctx *ctx = req->ctx;
949 struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
950 struct io_notif_slot *notif_slot;
951 struct io_kiocb *notif;
956 int ret, min_ret = 0;
958 if (!(req->flags & REQ_F_POLLED) &&
959 (zc->flags & IORING_RECVSEND_POLL_FIRST))
962 if (issue_flags & IO_URING_F_UNLOCKED)
964 sock = sock_from_file(req->file);
968 notif_slot = io_get_notif_slot(ctx, zc->slot_idx);
971 notif = io_get_notif(ctx, notif_slot);
976 msg.msg_control = NULL;
977 msg.msg_controllen = 0;
980 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
981 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
982 (u64)(uintptr_t)zc->buf, zc->len);
986 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
990 ret = io_notif_account_mem(notif, zc->len);
996 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &address);
997 if (unlikely(ret < 0))
999 msg.msg_name = (struct sockaddr *)&address;
1000 msg.msg_namelen = zc->addr_len;
1003 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1004 if (issue_flags & IO_URING_F_NONBLOCK)
1005 msg_flags |= MSG_DONTWAIT;
1006 if (msg_flags & MSG_WAITALL)
1007 min_ret = iov_iter_count(&msg.msg_iter);
1009 msg.msg_flags = msg_flags;
1010 msg.msg_ubuf = &io_notif_to_data(notif)->uarg;
1011 msg.sg_from_iter = io_sg_from_iter;
1012 ret = sock_sendmsg(sock, &msg);
1014 if (unlikely(ret < min_ret)) {
1015 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1017 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1021 req->flags |= REQ_F_PARTIAL_IO;
1024 if (ret == -ERESTARTSYS)
1026 } else if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH) {
1027 io_notif_slot_flush_submit(notif_slot, 0);
1032 else if (zc->done_io)
1034 io_req_set_res(req, ret, 0);
1038 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1040 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1043 if (sqe->len || sqe->buf_index)
1046 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1047 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1048 accept->flags = READ_ONCE(sqe->accept_flags);
1049 accept->nofile = rlimit(RLIMIT_NOFILE);
1050 flags = READ_ONCE(sqe->ioprio);
1051 if (flags & ~IORING_ACCEPT_MULTISHOT)
1054 accept->file_slot = READ_ONCE(sqe->file_index);
1055 if (accept->file_slot) {
1056 if (accept->flags & SOCK_CLOEXEC)
1058 if (flags & IORING_ACCEPT_MULTISHOT &&
1059 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1062 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1064 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1065 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1066 if (flags & IORING_ACCEPT_MULTISHOT)
1067 req->flags |= REQ_F_APOLL_MULTISHOT;
1071 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1073 struct io_ring_ctx *ctx = req->ctx;
1074 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1075 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1076 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1077 bool fixed = !!accept->file_slot;
1083 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1084 if (unlikely(fd < 0))
1087 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1092 ret = PTR_ERR(file);
1093 if (ret == -EAGAIN && force_nonblock) {
1095 * if it's multishot and polled, we don't need to
1096 * return EAGAIN to arm the poll infra since it
1097 * has already been done
1099 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1100 IO_APOLL_MULTI_POLLED)
1101 ret = IOU_ISSUE_SKIP_COMPLETE;
1104 if (ret == -ERESTARTSYS)
1107 } else if (!fixed) {
1108 fd_install(fd, file);
1111 ret = io_fixed_fd_install(req, issue_flags, file,
1115 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1116 io_req_set_res(req, ret, 0);
1121 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1124 io_req_set_res(req, ret, 0);
1125 if (req->flags & REQ_F_POLLED)
1126 return IOU_STOP_MULTISHOT;
1130 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1132 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1134 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1137 sock->domain = READ_ONCE(sqe->fd);
1138 sock->type = READ_ONCE(sqe->off);
1139 sock->protocol = READ_ONCE(sqe->len);
1140 sock->file_slot = READ_ONCE(sqe->file_index);
1141 sock->nofile = rlimit(RLIMIT_NOFILE);
1143 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1144 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1146 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1151 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1153 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1154 bool fixed = !!sock->file_slot;
1159 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1160 if (unlikely(fd < 0))
1163 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1167 ret = PTR_ERR(file);
1168 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1170 if (ret == -ERESTARTSYS)
1173 } else if (!fixed) {
1174 fd_install(fd, file);
1177 ret = io_fixed_fd_install(req, issue_flags, file,
1180 io_req_set_res(req, ret, 0);
1184 int io_connect_prep_async(struct io_kiocb *req)
1186 struct io_async_connect *io = req->async_data;
1187 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1189 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1192 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1194 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1196 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1199 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1200 conn->addr_len = READ_ONCE(sqe->addr2);
1204 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1206 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1207 struct io_async_connect __io, *io;
1208 unsigned file_flags;
1210 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1212 if (req_has_async_data(req)) {
1213 io = req->async_data;
1215 ret = move_addr_to_kernel(connect->addr,
1223 file_flags = force_nonblock ? O_NONBLOCK : 0;
1225 ret = __sys_connect_file(req->file, &io->address,
1226 connect->addr_len, file_flags);
1227 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1228 if (req_has_async_data(req))
1230 if (io_alloc_async_data(req)) {
1234 memcpy(req->async_data, &__io, sizeof(__io));
1237 if (ret == -ERESTARTSYS)
1242 io_req_set_res(req, ret, 0);
1246 void io_netmsg_cache_free(struct io_cache_entry *entry)
1248 kfree(container_of(entry, struct io_async_msghdr, cache));