1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
54 struct compat_msghdr __user *umsg_compat;
55 struct user_msghdr __user *umsg;
75 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
77 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
79 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
81 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
82 sqe->buf_index || sqe->splice_fd_in))
85 shutdown->how = READ_ONCE(sqe->len);
89 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
91 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
95 if (issue_flags & IO_URING_F_NONBLOCK)
98 sock = sock_from_file(req->file);
102 ret = __sys_shutdown_sock(sock, shutdown->how);
103 io_req_set_res(req, ret, 0);
107 static bool io_net_retry(struct socket *sock, int flags)
109 if (!(flags & MSG_WAITALL))
111 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
114 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
116 struct io_async_msghdr *hdr = req->async_data;
118 if (!hdr || issue_flags & IO_URING_F_UNLOCKED)
121 /* Let normal cleanup path reap it if we fail adding to the cache */
122 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
123 req->async_data = NULL;
124 req->flags &= ~REQ_F_ASYNC_DATA;
128 static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req,
129 unsigned int issue_flags)
131 struct io_ring_ctx *ctx = req->ctx;
132 struct io_cache_entry *entry;
134 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
135 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
136 struct io_async_msghdr *hdr;
138 hdr = container_of(entry, struct io_async_msghdr, cache);
139 req->flags |= REQ_F_ASYNC_DATA;
140 req->async_data = hdr;
144 if (!io_alloc_async_data(req))
145 return req->async_data;
150 static int io_setup_async_msg(struct io_kiocb *req,
151 struct io_async_msghdr *kmsg,
152 unsigned int issue_flags)
154 struct io_async_msghdr *async_msg = req->async_data;
158 async_msg = io_recvmsg_alloc_async(req, issue_flags);
160 kfree(kmsg->free_iov);
163 req->flags |= REQ_F_NEED_CLEANUP;
164 memcpy(async_msg, kmsg, sizeof(*kmsg));
165 async_msg->msg.msg_name = &async_msg->addr;
166 /* if were using fast_iov, set it to the new one */
167 if (!async_msg->free_iov)
168 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
173 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
174 struct io_async_msghdr *iomsg)
176 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
178 iomsg->msg.msg_name = &iomsg->addr;
179 iomsg->free_iov = iomsg->fast_iov;
180 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
184 int io_sendmsg_prep_async(struct io_kiocb *req)
188 ret = io_sendmsg_copy_hdr(req, req->async_data);
190 req->flags |= REQ_F_NEED_CLEANUP;
194 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
196 struct io_async_msghdr *io = req->async_data;
201 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
203 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
205 if (unlikely(sqe->file_index || sqe->addr2))
208 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
209 sr->len = READ_ONCE(sqe->len);
210 sr->flags = READ_ONCE(sqe->ioprio);
211 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
213 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
214 if (sr->msg_flags & MSG_DONTWAIT)
215 req->flags |= REQ_F_NOWAIT;
218 if (req->ctx->compat)
219 sr->msg_flags |= MSG_CMSG_COMPAT;
225 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
227 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
228 struct io_async_msghdr iomsg, *kmsg;
234 sock = sock_from_file(req->file);
238 if (req_has_async_data(req)) {
239 kmsg = req->async_data;
241 ret = io_sendmsg_copy_hdr(req, &iomsg);
247 if (!(req->flags & REQ_F_POLLED) &&
248 (sr->flags & IORING_RECVSEND_POLL_FIRST))
249 return io_setup_async_msg(req, kmsg, issue_flags);
251 flags = sr->msg_flags;
252 if (issue_flags & IO_URING_F_NONBLOCK)
253 flags |= MSG_DONTWAIT;
254 if (flags & MSG_WAITALL)
255 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
257 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
260 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
261 return io_setup_async_msg(req, kmsg, issue_flags);
262 if (ret == -ERESTARTSYS)
264 if (ret > 0 && io_net_retry(sock, flags)) {
266 req->flags |= REQ_F_PARTIAL_IO;
267 return io_setup_async_msg(req, kmsg, issue_flags);
271 /* fast path, check for non-NULL to avoid function call */
273 kfree(kmsg->free_iov);
274 req->flags &= ~REQ_F_NEED_CLEANUP;
275 io_netmsg_recycle(req, issue_flags);
278 else if (sr->done_io)
280 io_req_set_res(req, ret, 0);
284 int io_send(struct io_kiocb *req, unsigned int issue_flags)
286 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
294 if (!(req->flags & REQ_F_POLLED) &&
295 (sr->flags & IORING_RECVSEND_POLL_FIRST))
298 sock = sock_from_file(req->file);
302 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
307 msg.msg_control = NULL;
308 msg.msg_controllen = 0;
312 flags = sr->msg_flags;
313 if (issue_flags & IO_URING_F_NONBLOCK)
314 flags |= MSG_DONTWAIT;
315 if (flags & MSG_WAITALL)
316 min_ret = iov_iter_count(&msg.msg_iter);
318 msg.msg_flags = flags;
319 ret = sock_sendmsg(sock, &msg);
321 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
323 if (ret == -ERESTARTSYS)
325 if (ret > 0 && io_net_retry(sock, flags)) {
329 req->flags |= REQ_F_PARTIAL_IO;
336 else if (sr->done_io)
338 io_req_set_res(req, ret, 0);
342 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
346 if (iomsg->namelen < 0)
348 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
349 iomsg->namelen, &hdr))
351 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
357 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
358 struct io_async_msghdr *iomsg)
360 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
361 struct user_msghdr msg;
364 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
367 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
371 if (req->flags & REQ_F_BUFFER_SELECT) {
372 if (msg.msg_iovlen == 0) {
373 sr->len = iomsg->fast_iov[0].iov_len = 0;
374 iomsg->fast_iov[0].iov_base = NULL;
375 iomsg->free_iov = NULL;
376 } else if (msg.msg_iovlen > 1) {
379 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
381 sr->len = iomsg->fast_iov[0].iov_len;
382 iomsg->free_iov = NULL;
385 if (req->flags & REQ_F_APOLL_MULTISHOT) {
386 iomsg->namelen = msg.msg_namelen;
387 iomsg->controllen = msg.msg_controllen;
388 if (io_recvmsg_multishot_overflow(iomsg))
392 iomsg->free_iov = iomsg->fast_iov;
393 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
394 &iomsg->free_iov, &iomsg->msg.msg_iter,
404 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
405 struct io_async_msghdr *iomsg)
407 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
408 struct compat_msghdr msg;
409 struct compat_iovec __user *uiov;
412 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
415 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
419 uiov = compat_ptr(msg.msg_iov);
420 if (req->flags & REQ_F_BUFFER_SELECT) {
423 if (msg.msg_iovlen == 0) {
425 iomsg->free_iov = NULL;
426 } else if (msg.msg_iovlen > 1) {
429 if (!access_ok(uiov, sizeof(*uiov)))
431 if (__get_user(clen, &uiov->iov_len))
436 iomsg->free_iov = NULL;
439 if (req->flags & REQ_F_APOLL_MULTISHOT) {
440 iomsg->namelen = msg.msg_namelen;
441 iomsg->controllen = msg.msg_controllen;
442 if (io_recvmsg_multishot_overflow(iomsg))
446 iomsg->free_iov = iomsg->fast_iov;
447 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
448 UIO_FASTIOV, &iomsg->free_iov,
449 &iomsg->msg.msg_iter, true);
458 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
459 struct io_async_msghdr *iomsg)
461 iomsg->msg.msg_name = &iomsg->addr;
464 if (req->ctx->compat)
465 return __io_compat_recvmsg_copy_hdr(req, iomsg);
468 return __io_recvmsg_copy_hdr(req, iomsg);
471 int io_recvmsg_prep_async(struct io_kiocb *req)
475 ret = io_recvmsg_copy_hdr(req, req->async_data);
477 req->flags |= REQ_F_NEED_CLEANUP;
481 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
483 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
485 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
487 if (unlikely(sqe->file_index || sqe->addr2))
490 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
491 sr->len = READ_ONCE(sqe->len);
492 sr->flags = READ_ONCE(sqe->ioprio);
493 if (sr->flags & ~(RECVMSG_FLAGS))
495 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
496 if (sr->msg_flags & MSG_DONTWAIT)
497 req->flags |= REQ_F_NOWAIT;
498 if (sr->msg_flags & MSG_ERRQUEUE)
499 req->flags |= REQ_F_CLEAR_POLLIN;
500 if (sr->flags & IORING_RECV_MULTISHOT) {
501 if (!(req->flags & REQ_F_BUFFER_SELECT))
503 if (sr->msg_flags & MSG_WAITALL)
505 if (req->opcode == IORING_OP_RECV && sr->len)
507 req->flags |= REQ_F_APOLL_MULTISHOT;
511 if (req->ctx->compat)
512 sr->msg_flags |= MSG_CMSG_COMPAT;
518 static inline void io_recv_prep_retry(struct io_kiocb *req)
520 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
523 sr->len = 0; /* get from the provided buffer */
527 * Finishes io_recv and io_recvmsg.
529 * Returns true if it is actually finished, or false if it should run
530 * again (for multishot).
532 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
533 unsigned int cflags, bool mshot_finished)
535 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
536 io_req_set_res(req, *ret, cflags);
541 if (!mshot_finished) {
542 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
543 cflags | IORING_CQE_F_MORE, false)) {
544 io_recv_prep_retry(req);
548 * Otherwise stop multishot but use the current result.
549 * Probably will end up going into overflow, but this means
550 * we cannot trust the ordering anymore
554 io_req_set_res(req, *ret, cflags);
556 if (req->flags & REQ_F_POLLED)
557 *ret = IOU_STOP_MULTISHOT;
563 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
564 struct io_sr_msg *sr, void __user **buf,
567 unsigned long ubuf = (unsigned long) *buf;
570 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
575 if (kmsg->controllen) {
576 unsigned long control = ubuf + hdr - kmsg->controllen;
578 kmsg->msg.msg_control_user = (void *) control;
579 kmsg->msg.msg_controllen = kmsg->controllen;
582 sr->buf = *buf; /* stash for later copy */
583 *buf = (void *) (ubuf + hdr);
584 kmsg->payloadlen = *len = *len - hdr;
588 struct io_recvmsg_multishot_hdr {
589 struct io_uring_recvmsg_out msg;
590 struct sockaddr_storage addr;
593 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
594 struct io_async_msghdr *kmsg,
595 unsigned int flags, bool *finished)
599 struct io_recvmsg_multishot_hdr hdr;
602 kmsg->msg.msg_name = &hdr.addr;
603 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
604 kmsg->msg.msg_namelen = 0;
606 if (sock->file->f_flags & O_NONBLOCK)
607 flags |= MSG_DONTWAIT;
609 err = sock_recvmsg(sock, &kmsg->msg, flags);
610 *finished = err <= 0;
614 hdr.msg = (struct io_uring_recvmsg_out) {
615 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
616 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
619 hdr.msg.payloadlen = err;
620 if (err > kmsg->payloadlen)
621 err = kmsg->payloadlen;
623 copy_len = sizeof(struct io_uring_recvmsg_out);
624 if (kmsg->msg.msg_namelen > kmsg->namelen)
625 copy_len += kmsg->namelen;
627 copy_len += kmsg->msg.msg_namelen;
630 * "fromlen shall refer to the value before truncation.."
633 hdr.msg.namelen = kmsg->msg.msg_namelen;
635 /* ensure that there is no gap between hdr and sockaddr_storage */
636 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
637 sizeof(struct io_uring_recvmsg_out));
638 if (copy_to_user(io->buf, &hdr, copy_len)) {
643 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
644 kmsg->controllen + err;
647 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
649 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
650 struct io_async_msghdr iomsg, *kmsg;
654 int ret, min_ret = 0;
655 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
656 bool mshot_finished = true;
658 sock = sock_from_file(req->file);
662 if (req_has_async_data(req)) {
663 kmsg = req->async_data;
665 ret = io_recvmsg_copy_hdr(req, &iomsg);
671 if (!(req->flags & REQ_F_POLLED) &&
672 (sr->flags & IORING_RECVSEND_POLL_FIRST))
673 return io_setup_async_msg(req, kmsg, issue_flags);
676 if (io_do_buffer_select(req)) {
678 size_t len = sr->len;
680 buf = io_buffer_select(req, &len, issue_flags);
684 if (req->flags & REQ_F_APOLL_MULTISHOT) {
685 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
687 io_kbuf_recycle(req, issue_flags);
692 kmsg->fast_iov[0].iov_base = buf;
693 kmsg->fast_iov[0].iov_len = len;
694 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
698 flags = sr->msg_flags;
700 flags |= MSG_DONTWAIT;
701 if (flags & MSG_WAITALL)
702 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
704 kmsg->msg.msg_get_inq = 1;
705 if (req->flags & REQ_F_APOLL_MULTISHOT)
706 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
709 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
713 if (ret == -EAGAIN && force_nonblock) {
714 ret = io_setup_async_msg(req, kmsg, issue_flags);
715 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
716 IO_APOLL_MULTI_POLLED) {
717 io_kbuf_recycle(req, issue_flags);
718 return IOU_ISSUE_SKIP_COMPLETE;
722 if (ret == -ERESTARTSYS)
724 if (ret > 0 && io_net_retry(sock, flags)) {
726 req->flags |= REQ_F_PARTIAL_IO;
727 return io_setup_async_msg(req, kmsg, issue_flags);
730 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
736 else if (sr->done_io)
739 io_kbuf_recycle(req, issue_flags);
741 cflags = io_put_kbuf(req, issue_flags);
742 if (kmsg->msg.msg_inq)
743 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
745 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
746 goto retry_multishot;
748 if (mshot_finished) {
749 io_netmsg_recycle(req, issue_flags);
750 /* fast path, check for non-NULL to avoid function call */
752 kfree(kmsg->free_iov);
753 req->flags &= ~REQ_F_NEED_CLEANUP;
759 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
761 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
767 int ret, min_ret = 0;
768 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
769 size_t len = sr->len;
771 if (!(req->flags & REQ_F_POLLED) &&
772 (sr->flags & IORING_RECVSEND_POLL_FIRST))
775 sock = sock_from_file(req->file);
780 if (io_do_buffer_select(req)) {
783 buf = io_buffer_select(req, &len, issue_flags);
789 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
795 msg.msg_control = NULL;
798 msg.msg_controllen = 0;
802 flags = sr->msg_flags;
804 flags |= MSG_DONTWAIT;
805 if (flags & MSG_WAITALL)
806 min_ret = iov_iter_count(&msg.msg_iter);
808 ret = sock_recvmsg(sock, &msg, flags);
810 if (ret == -EAGAIN && force_nonblock) {
811 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
812 io_kbuf_recycle(req, issue_flags);
813 return IOU_ISSUE_SKIP_COMPLETE;
818 if (ret == -ERESTARTSYS)
820 if (ret > 0 && io_net_retry(sock, flags)) {
824 req->flags |= REQ_F_PARTIAL_IO;
828 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
835 else if (sr->done_io)
838 io_kbuf_recycle(req, issue_flags);
840 cflags = io_put_kbuf(req, issue_flags);
842 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
844 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
845 goto retry_multishot;
850 int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
852 struct io_sendzc *zc = io_kiocb_to_cmd(req);
853 struct io_ring_ctx *ctx = req->ctx;
855 if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))
858 zc->flags = READ_ONCE(sqe->ioprio);
859 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
860 IORING_RECVSEND_FIXED_BUF | IORING_RECVSEND_NOTIF_FLUSH))
862 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
863 unsigned idx = READ_ONCE(sqe->buf_index);
865 if (unlikely(idx >= ctx->nr_user_bufs))
867 idx = array_index_nospec(idx, ctx->nr_user_bufs);
868 req->imu = READ_ONCE(ctx->user_bufs[idx]);
869 io_req_set_rsrc_node(req, ctx, 0);
872 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
873 zc->len = READ_ONCE(sqe->len);
874 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
875 zc->slot_idx = READ_ONCE(sqe->notification_idx);
876 if (zc->msg_flags & MSG_DONTWAIT)
877 req->flags |= REQ_F_NOWAIT;
879 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
880 zc->addr_len = READ_ONCE(sqe->addr_len);
883 if (req->ctx->compat)
884 zc->msg_flags |= MSG_CMSG_COMPAT;
889 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
890 struct iov_iter *from, size_t length)
892 struct skb_shared_info *shinfo = skb_shinfo(skb);
893 int frag = shinfo->nr_frags;
897 unsigned long truesize = 0;
899 if (!shinfo->nr_frags)
900 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
902 if (!skb_zcopy_managed(skb) || !iov_iter_is_bvec(from)) {
903 skb_zcopy_downgrade_managed(skb);
904 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
907 bi.bi_size = min(from->count, length);
908 bi.bi_bvec_done = from->iov_offset;
911 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
912 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
915 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
916 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
917 v.bv_offset, v.bv_len);
918 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
923 shinfo->nr_frags = frag;
924 from->bvec += bi.bi_idx;
925 from->nr_segs -= bi.bi_idx;
926 from->count = bi.bi_size;
927 from->iov_offset = bi.bi_bvec_done;
929 skb->data_len += copied;
931 skb->truesize += truesize;
933 if (sk && sk->sk_type == SOCK_STREAM) {
934 sk_wmem_queued_add(sk, truesize);
935 if (!skb_zcopy_pure(skb))
936 sk_mem_charge(sk, truesize);
938 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
943 int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
945 struct sockaddr_storage address;
946 struct io_ring_ctx *ctx = req->ctx;
947 struct io_sendzc *zc = io_kiocb_to_cmd(req);
948 struct io_notif_slot *notif_slot;
949 struct io_kiocb *notif;
954 int ret, min_ret = 0;
956 if (!(req->flags & REQ_F_POLLED) &&
957 (zc->flags & IORING_RECVSEND_POLL_FIRST))
960 if (issue_flags & IO_URING_F_UNLOCKED)
962 sock = sock_from_file(req->file);
966 notif_slot = io_get_notif_slot(ctx, zc->slot_idx);
969 notif = io_get_notif(ctx, notif_slot);
974 msg.msg_control = NULL;
975 msg.msg_controllen = 0;
978 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
979 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
980 (u64)(uintptr_t)zc->buf, zc->len);
984 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
988 ret = io_notif_account_mem(notif, zc->len);
994 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &address);
995 if (unlikely(ret < 0))
997 msg.msg_name = (struct sockaddr *)&address;
998 msg.msg_namelen = zc->addr_len;
1001 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1002 if (issue_flags & IO_URING_F_NONBLOCK)
1003 msg_flags |= MSG_DONTWAIT;
1004 if (msg_flags & MSG_WAITALL)
1005 min_ret = iov_iter_count(&msg.msg_iter);
1007 msg.msg_flags = msg_flags;
1008 msg.msg_ubuf = &io_notif_to_data(notif)->uarg;
1009 msg.sg_from_iter = io_sg_from_iter;
1010 ret = sock_sendmsg(sock, &msg);
1012 if (unlikely(ret < min_ret)) {
1013 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1015 return ret == -ERESTARTSYS ? -EINTR : ret;
1018 if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH)
1019 io_notif_slot_flush_submit(notif_slot, 0);
1020 io_req_set_res(req, ret, 0);
1024 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1026 struct io_accept *accept = io_kiocb_to_cmd(req);
1029 if (sqe->len || sqe->buf_index)
1032 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1033 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1034 accept->flags = READ_ONCE(sqe->accept_flags);
1035 accept->nofile = rlimit(RLIMIT_NOFILE);
1036 flags = READ_ONCE(sqe->ioprio);
1037 if (flags & ~IORING_ACCEPT_MULTISHOT)
1040 accept->file_slot = READ_ONCE(sqe->file_index);
1041 if (accept->file_slot) {
1042 if (accept->flags & SOCK_CLOEXEC)
1044 if (flags & IORING_ACCEPT_MULTISHOT &&
1045 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1048 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1050 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1051 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1052 if (flags & IORING_ACCEPT_MULTISHOT)
1053 req->flags |= REQ_F_APOLL_MULTISHOT;
1057 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1059 struct io_ring_ctx *ctx = req->ctx;
1060 struct io_accept *accept = io_kiocb_to_cmd(req);
1061 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1062 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1063 bool fixed = !!accept->file_slot;
1069 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1070 if (unlikely(fd < 0))
1073 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1078 ret = PTR_ERR(file);
1079 if (ret == -EAGAIN && force_nonblock) {
1081 * if it's multishot and polled, we don't need to
1082 * return EAGAIN to arm the poll infra since it
1083 * has already been done
1085 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1086 IO_APOLL_MULTI_POLLED)
1087 ret = IOU_ISSUE_SKIP_COMPLETE;
1090 if (ret == -ERESTARTSYS)
1093 } else if (!fixed) {
1094 fd_install(fd, file);
1097 ret = io_fixed_fd_install(req, issue_flags, file,
1101 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1102 io_req_set_res(req, ret, 0);
1107 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1110 io_req_set_res(req, ret, 0);
1111 if (req->flags & REQ_F_POLLED)
1112 return IOU_STOP_MULTISHOT;
1116 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1118 struct io_socket *sock = io_kiocb_to_cmd(req);
1120 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1123 sock->domain = READ_ONCE(sqe->fd);
1124 sock->type = READ_ONCE(sqe->off);
1125 sock->protocol = READ_ONCE(sqe->len);
1126 sock->file_slot = READ_ONCE(sqe->file_index);
1127 sock->nofile = rlimit(RLIMIT_NOFILE);
1129 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1130 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1132 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1137 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1139 struct io_socket *sock = io_kiocb_to_cmd(req);
1140 bool fixed = !!sock->file_slot;
1145 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1146 if (unlikely(fd < 0))
1149 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1153 ret = PTR_ERR(file);
1154 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1156 if (ret == -ERESTARTSYS)
1159 } else if (!fixed) {
1160 fd_install(fd, file);
1163 ret = io_fixed_fd_install(req, issue_flags, file,
1166 io_req_set_res(req, ret, 0);
1170 int io_connect_prep_async(struct io_kiocb *req)
1172 struct io_async_connect *io = req->async_data;
1173 struct io_connect *conn = io_kiocb_to_cmd(req);
1175 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1178 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1180 struct io_connect *conn = io_kiocb_to_cmd(req);
1182 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1185 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1186 conn->addr_len = READ_ONCE(sqe->addr2);
1190 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1192 struct io_connect *connect = io_kiocb_to_cmd(req);
1193 struct io_async_connect __io, *io;
1194 unsigned file_flags;
1196 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1198 if (req_has_async_data(req)) {
1199 io = req->async_data;
1201 ret = move_addr_to_kernel(connect->addr,
1209 file_flags = force_nonblock ? O_NONBLOCK : 0;
1211 ret = __sys_connect_file(req->file, &io->address,
1212 connect->addr_len, file_flags);
1213 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1214 if (req_has_async_data(req))
1216 if (io_alloc_async_data(req)) {
1220 memcpy(req->async_data, &__io, sizeof(__io));
1223 if (ret == -ERESTARTSYS)
1228 io_req_set_res(req, ret, 0);
1232 void io_netmsg_cache_free(struct io_cache_entry *entry)
1234 kfree(container_of(entry, struct io_async_msghdr, cache));