2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/sched/clock.h>
39 #include <linux/time.h>
40 #include <linux/rds.h>
44 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
45 struct in6_addr *saddr)
47 refcount_set(&inc->i_refcount, 1);
48 INIT_LIST_HEAD(&inc->i_item);
50 inc->i_saddr = *saddr;
51 inc->i_usercopy.rdma_cookie = 0;
52 inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
54 memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace));
56 EXPORT_SYMBOL_GPL(rds_inc_init);
58 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
59 struct in6_addr *saddr)
61 refcount_set(&inc->i_refcount, 1);
62 INIT_LIST_HEAD(&inc->i_item);
63 inc->i_conn = cp->cp_conn;
64 inc->i_conn_path = cp;
65 inc->i_saddr = *saddr;
66 inc->i_usercopy.rdma_cookie = 0;
67 inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
69 EXPORT_SYMBOL_GPL(rds_inc_path_init);
71 static void rds_inc_addref(struct rds_incoming *inc)
73 rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
74 refcount_inc(&inc->i_refcount);
77 void rds_inc_put(struct rds_incoming *inc)
79 rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
80 if (refcount_dec_and_test(&inc->i_refcount)) {
81 BUG_ON(!list_empty(&inc->i_item));
83 inc->i_conn->c_trans->inc_free(inc);
86 EXPORT_SYMBOL_GPL(rds_inc_put);
88 static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
89 struct rds_cong_map *map,
90 int delta, __be16 port)
97 rs->rs_rcv_bytes += delta;
99 rds_stats_add(s_recv_bytes_added_to_socket, delta);
101 rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
103 /* loop transport doesn't send/recv congestion updates */
104 if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
107 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
109 rdsdebug("rs %p (%pI6c:%u) recv bytes %d buf %d "
110 "now_cong %d delta %d\n",
111 rs, &rs->rs_bound_addr,
112 ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
113 rds_sk_rcvbuf(rs), now_congested, delta);
115 /* wasn't -> am congested */
116 if (!rs->rs_congested && now_congested) {
117 rs->rs_congested = 1;
118 rds_cong_set_bit(map, port);
119 rds_cong_queue_updates(map);
121 /* was -> aren't congested */
122 /* Require more free space before reporting uncongested to prevent
123 bouncing cong/uncong state too often */
124 else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
125 rs->rs_congested = 0;
126 rds_cong_clear_bit(map, port);
127 rds_cong_queue_updates(map);
130 /* do nothing if no change in cong state */
133 static void rds_conn_peer_gen_update(struct rds_connection *conn,
137 struct rds_message *rm, *tmp;
140 WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
141 if (peer_gen_num != 0) {
142 if (conn->c_peer_gen_num != 0 &&
143 peer_gen_num != conn->c_peer_gen_num) {
144 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
145 struct rds_conn_path *cp;
147 cp = &conn->c_path[i];
148 spin_lock_irqsave(&cp->cp_lock, flags);
149 cp->cp_next_tx_seq = 1;
150 cp->cp_next_rx_seq = 0;
151 list_for_each_entry_safe(rm, tmp,
154 set_bit(RDS_MSG_FLUSH, &rm->m_flags);
156 spin_unlock_irqrestore(&cp->cp_lock, flags);
159 conn->c_peer_gen_num = peer_gen_num;
164 * Process all extension headers that come with this message.
166 static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
168 struct rds_header *hdr = &inc->i_hdr;
169 unsigned int pos = 0, type, len;
171 struct rds_ext_header_version version;
172 struct rds_ext_header_rdma rdma;
173 struct rds_ext_header_rdma_dest rdma_dest;
177 len = sizeof(buffer);
178 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
179 if (type == RDS_EXTHDR_NONE)
181 /* Process extension header here */
183 case RDS_EXTHDR_RDMA:
184 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
187 case RDS_EXTHDR_RDMA_DEST:
188 /* We ignore the size for now. We could stash it
189 * somewhere and use it for error checking. */
190 inc->i_usercopy.rdma_cookie = rds_rdma_make_cookie(
191 be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
192 be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
199 static void rds_recv_hs_exthdrs(struct rds_header *hdr,
200 struct rds_connection *conn)
202 unsigned int pos = 0, type, len;
204 struct rds_ext_header_version version;
208 u32 new_peer_gen_num = 0;
211 len = sizeof(buffer);
212 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
213 if (type == RDS_EXTHDR_NONE)
215 /* Process extension header here */
217 case RDS_EXTHDR_NPATHS:
218 conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
219 be16_to_cpu(buffer.rds_npaths));
221 case RDS_EXTHDR_GEN_NUM:
222 new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
225 pr_warn_ratelimited("ignoring unknown exthdr type "
229 /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
230 conn->c_npaths = max_t(int, conn->c_npaths, 1);
231 conn->c_ping_triggered = 0;
232 rds_conn_peer_gen_update(conn, new_peer_gen_num);
235 /* rds_start_mprds() will synchronously start multiple paths when appropriate.
236 * The scheme is based on the following rules:
238 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
239 * sender's npaths (s_npaths)
240 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
241 * sends back a probe-pong with r_npaths. After that, if rcvr is the
242 * smaller ip addr, it starts rds_conn_path_connect_if_down on all
244 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
245 * If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
246 * called after reception of the probe-pong on all mprds_paths.
247 * Otherwise (sender of probe-ping is not the smaller ip addr): just call
248 * rds_conn_path_connect_if_down on the hashed path. (see rule 4)
249 * 4. rds_connect_worker must only trigger a connection if laddr < faddr.
250 * 5. sender may end up queuing the packet on the cp. will get sent out later.
251 * when connection is completed.
253 static void rds_start_mprds(struct rds_connection *conn)
256 struct rds_conn_path *cp;
258 if (conn->c_npaths > 1 &&
259 rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) < 0) {
260 for (i = 0; i < conn->c_npaths; i++) {
261 cp = &conn->c_path[i];
262 rds_conn_path_connect_if_down(cp);
268 * The transport must make sure that this is serialized against other
269 * rx and conn reset on this specific conn.
271 * We currently assert that only one fragmented message will be sent
272 * down a connection at a time. This lets us reassemble in the conn
273 * instead of per-flow which means that we don't have to go digging through
274 * flows to tear down partial reassembly progress on conn failure and
275 * we save flow lookup and locking for each frag arrival. It does mean
276 * that small messages will wait behind large ones. Fragmenting at all
277 * is only to reduce the memory consumption of pre-posted buffers.
279 * The caller passes in saddr and daddr instead of us getting it from the
280 * conn. This lets loopback, who only has one conn for both directions,
281 * tell us which roles the addrs in the conn are playing for this message.
283 void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
284 struct in6_addr *daddr,
285 struct rds_incoming *inc, gfp_t gfp)
287 struct rds_sock *rs = NULL;
290 struct rds_conn_path *cp;
293 inc->i_rx_jiffies = jiffies;
294 if (conn->c_trans->t_mp_capable)
295 cp = inc->i_conn_path;
297 cp = &conn->c_path[0];
299 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
300 "flags 0x%x rx_jiffies %lu\n", conn,
301 (unsigned long long)cp->cp_next_rx_seq,
303 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
304 be32_to_cpu(inc->i_hdr.h_len),
305 be16_to_cpu(inc->i_hdr.h_sport),
306 be16_to_cpu(inc->i_hdr.h_dport),
311 * Sequence numbers should only increase. Messages get their
312 * sequence number as they're queued in a sending conn. They
313 * can be dropped, though, if the sending socket is closed before
314 * they hit the wire. So sequence numbers can skip forward
315 * under normal operation. They can also drop back in the conn
316 * failover case as previously sent messages are resent down the
317 * new instance of a conn. We drop those, otherwise we have
318 * to assume that the next valid seq does not come after a
319 * hole in the fragment stream.
321 * The headers don't give us a way to realize if fragments of
322 * a message have been dropped. We assume that frags that arrive
323 * to a flow are part of the current message on the flow that is
324 * being reassembled. This means that senders can't drop messages
325 * from the sending conn until all their frags are sent.
327 * XXX we could spend more on the wire to get more robust failure
328 * detection, arguably worth it to avoid data corruption.
330 if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
331 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
332 rds_stats_inc(s_recv_drop_old_seq);
335 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
337 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
338 if (inc->i_hdr.h_sport == 0) {
339 rdsdebug("ignore ping with 0 sport from %pI6c\n",
343 rds_stats_inc(s_recv_ping);
344 rds_send_pong(cp, inc->i_hdr.h_sport);
345 /* if this is a handshake ping, start multipath if necessary */
346 if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
347 be16_to_cpu(inc->i_hdr.h_dport))) {
348 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
349 rds_start_mprds(cp->cp_conn);
354 if (be16_to_cpu(inc->i_hdr.h_dport) == RDS_FLAG_PROBE_PORT &&
355 inc->i_hdr.h_sport == 0) {
356 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
357 /* if this is a handshake pong, start multipath if necessary */
358 rds_start_mprds(cp->cp_conn);
359 wake_up(&cp->cp_conn->c_hs_waitq);
363 rs = rds_find_bound(daddr, inc->i_hdr.h_dport, conn->c_bound_if);
365 rds_stats_inc(s_recv_drop_no_sock);
369 /* Process extension headers */
370 rds_recv_incoming_exthdrs(inc, rs);
372 /* We can be racing with rds_release() which marks the socket dead. */
373 sk = rds_rs_to_sk(rs);
375 /* serialize with rds_release -> sock_orphan */
376 write_lock_irqsave(&rs->rs_recv_lock, flags);
377 if (!sock_flag(sk, SOCK_DEAD)) {
378 rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
379 rds_stats_inc(s_recv_queued);
380 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
381 be32_to_cpu(inc->i_hdr.h_len),
383 if (sock_flag(sk, SOCK_RCVTSTAMP))
384 inc->i_usercopy.rx_tstamp = ktime_get_real();
386 inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
387 list_add_tail(&inc->i_item, &rs->rs_recv_queue);
388 __rds_wake_sk_sleep(sk);
390 rds_stats_inc(s_recv_drop_dead_sock);
392 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
398 EXPORT_SYMBOL_GPL(rds_recv_incoming);
401 * be very careful here. This is being called as the condition in
402 * wait_event_*() needs to cope with being called many times.
404 static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
409 read_lock_irqsave(&rs->rs_recv_lock, flags);
410 if (!list_empty(&rs->rs_recv_queue)) {
411 *inc = list_entry(rs->rs_recv_queue.next,
414 rds_inc_addref(*inc);
416 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
422 static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
425 struct sock *sk = rds_rs_to_sk(rs);
429 write_lock_irqsave(&rs->rs_recv_lock, flags);
430 if (!list_empty(&inc->i_item)) {
433 /* XXX make sure this i_conn is reliable */
434 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
435 -be32_to_cpu(inc->i_hdr.h_len),
437 list_del_init(&inc->i_item);
441 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
443 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
448 * Pull errors off the error queue.
449 * If msghdr is NULL, we will just purge the error queue.
451 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
453 struct rds_notifier *notifier;
454 struct rds_rdma_notify cmsg;
455 unsigned int count = 0, max_messages = ~0U;
460 memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
462 /* put_cmsg copies to user space and thus may sleep. We can't do this
463 * with rs_lock held, so first grab as many notifications as we can stuff
464 * in the user provided cmsg buffer. We don't try to copy more, to avoid
465 * losing notifications - except when the buffer is so small that it wouldn't
466 * even hold a single notification. Then we give him as much of this single
467 * msg as we can squeeze in, and set MSG_CTRUNC.
470 max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
475 spin_lock_irqsave(&rs->rs_lock, flags);
476 while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
477 notifier = list_entry(rs->rs_notify_queue.next,
478 struct rds_notifier, n_list);
479 list_move(¬ifier->n_list, ©);
482 spin_unlock_irqrestore(&rs->rs_lock, flags);
487 while (!list_empty(©)) {
488 notifier = list_entry(copy.next, struct rds_notifier, n_list);
491 cmsg.user_token = notifier->n_user_token;
492 cmsg.status = notifier->n_status;
494 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
495 sizeof(cmsg), &cmsg);
500 list_del_init(¬ifier->n_list);
504 /* If we bailed out because of an error in put_cmsg,
505 * we may be left with one or more notifications that we
506 * didn't process. Return them to the head of the list. */
507 if (!list_empty(©)) {
508 spin_lock_irqsave(&rs->rs_lock, flags);
509 list_splice(©, &rs->rs_notify_queue);
510 spin_unlock_irqrestore(&rs->rs_lock, flags);
517 * Queue a congestion notification
519 static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
521 uint64_t notify = rs->rs_cong_notify;
525 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
526 sizeof(notify), ¬ify);
530 spin_lock_irqsave(&rs->rs_lock, flags);
531 rs->rs_cong_notify &= ~notify;
532 spin_unlock_irqrestore(&rs->rs_lock, flags);
538 * Receive any control messages.
540 static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
545 if (inc->i_usercopy.rdma_cookie) {
546 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
547 sizeof(inc->i_usercopy.rdma_cookie),
548 &inc->i_usercopy.rdma_cookie);
553 if ((inc->i_usercopy.rx_tstamp != 0) &&
554 sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
555 struct __kernel_old_timeval tv =
556 ns_to_kernel_old_timeval(inc->i_usercopy.rx_tstamp);
558 if (!sock_flag(rds_rs_to_sk(rs), SOCK_TSTAMP_NEW)) {
559 ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
562 struct __kernel_sock_timeval sk_tv;
564 sk_tv.tv_sec = tv.tv_sec;
565 sk_tv.tv_usec = tv.tv_usec;
567 ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
568 sizeof(sk_tv), &sk_tv);
575 if (rs->rs_rx_traces) {
576 struct rds_cmsg_rx_trace t;
579 memset(&t, 0, sizeof(t));
580 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
581 t.rx_traces = rs->rs_rx_traces;
582 for (i = 0; i < rs->rs_rx_traces; i++) {
583 j = rs->rs_rx_trace[i];
584 t.rx_trace_pos[i] = j;
585 t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
586 inc->i_rx_lat_trace[j];
589 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
599 static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg)
601 struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
602 struct rds_msg_zcopy_info *info = NULL;
603 struct rds_zcopy_cookies *done;
606 if (!msg->msg_control)
609 if (!sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY) ||
610 msg->msg_controllen < CMSG_SPACE(sizeof(*done)))
613 spin_lock_irqsave(&q->lock, flags);
614 if (!list_empty(&q->zcookie_head)) {
615 info = list_entry(q->zcookie_head.next,
616 struct rds_msg_zcopy_info, rs_zcookie_next);
617 list_del(&info->rs_zcookie_next);
619 spin_unlock_irqrestore(&q->lock, flags);
622 done = &info->zcookies;
623 if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, sizeof(*done),
625 spin_lock_irqsave(&q->lock, flags);
626 list_add(&info->rs_zcookie_next, &q->zcookie_head);
627 spin_unlock_irqrestore(&q->lock, flags);
634 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
637 struct sock *sk = sock->sk;
638 struct rds_sock *rs = rds_sk_to_rs(sk);
640 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
641 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
642 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
643 struct rds_incoming *inc = NULL;
645 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
646 timeo = sock_rcvtimeo(sk, nonblock);
648 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
650 if (msg_flags & MSG_OOB)
652 if (msg_flags & MSG_ERRQUEUE)
653 return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR);
656 /* If there are pending notifications, do those - and nothing else */
657 if (!list_empty(&rs->rs_notify_queue)) {
658 ret = rds_notify_queue_get(rs, msg);
662 if (rs->rs_cong_notify) {
663 ret = rds_notify_cong(rs, msg);
667 if (!rds_next_incoming(rs, &inc)) {
669 bool reaped = rds_recvmsg_zcookie(rs, msg);
671 ret = reaped ? 0 : -EAGAIN;
675 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
676 (!list_empty(&rs->rs_notify_queue) ||
677 rs->rs_cong_notify ||
678 rds_next_incoming(rs, &inc)), timeo);
679 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
681 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
690 rdsdebug("copying inc %p from %pI6c:%u to user\n", inc,
691 &inc->i_conn->c_faddr,
692 ntohs(inc->i_hdr.h_sport));
693 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
698 * if the message we just copied isn't at the head of the
699 * recv queue then someone else raced us to return it, try
700 * to get the next message.
702 if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
705 rds_stats_inc(s_recv_deliver_raced);
706 iov_iter_revert(&msg->msg_iter, ret);
710 if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
711 if (msg_flags & MSG_TRUNC)
712 ret = be32_to_cpu(inc->i_hdr.h_len);
713 msg->msg_flags |= MSG_TRUNC;
716 if (rds_cmsg_recv(inc, msg, rs)) {
720 rds_recvmsg_zcookie(rs, msg);
722 rds_stats_inc(s_recv_delivered);
725 if (ipv6_addr_v4mapped(&inc->i_saddr)) {
726 sin->sin_family = AF_INET;
727 sin->sin_port = inc->i_hdr.h_sport;
728 sin->sin_addr.s_addr =
729 inc->i_saddr.s6_addr32[3];
730 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
731 msg->msg_namelen = sizeof(*sin);
733 sin6->sin6_family = AF_INET6;
734 sin6->sin6_port = inc->i_hdr.h_sport;
735 sin6->sin6_addr = inc->i_saddr;
736 sin6->sin6_flowinfo = 0;
737 sin6->sin6_scope_id = rs->rs_bound_scope_id;
738 msg->msg_namelen = sizeof(*sin6);
752 * The socket is being shut down and we're asked to drop messages that were
753 * queued for recvmsg. The caller has unbound the socket so the receive path
754 * won't queue any more incoming fragments or messages on the socket.
756 void rds_clear_recv_queue(struct rds_sock *rs)
758 struct sock *sk = rds_rs_to_sk(rs);
759 struct rds_incoming *inc, *tmp;
762 write_lock_irqsave(&rs->rs_recv_lock, flags);
763 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
764 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
765 -be32_to_cpu(inc->i_hdr.h_len),
767 list_del_init(&inc->i_item);
770 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
774 * inc->i_saddr isn't used here because it is only set in the receive
777 void rds_inc_info_copy(struct rds_incoming *inc,
778 struct rds_info_iterator *iter,
779 __be32 saddr, __be32 daddr, int flip)
781 struct rds_info_message minfo;
783 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
784 minfo.len = be32_to_cpu(inc->i_hdr.h_len);
785 minfo.tos = inc->i_conn->c_tos;
790 minfo.lport = inc->i_hdr.h_dport;
791 minfo.fport = inc->i_hdr.h_sport;
795 minfo.lport = inc->i_hdr.h_sport;
796 minfo.fport = inc->i_hdr.h_dport;
801 rds_info_copy(iter, &minfo, sizeof(minfo));
804 #if IS_ENABLED(CONFIG_IPV6)
805 void rds6_inc_info_copy(struct rds_incoming *inc,
806 struct rds_info_iterator *iter,
807 struct in6_addr *saddr, struct in6_addr *daddr,
810 struct rds6_info_message minfo6;
812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
814 minfo6.tos = inc->i_conn->c_tos;
817 minfo6.laddr = *daddr;
818 minfo6.faddr = *saddr;
819 minfo6.lport = inc->i_hdr.h_dport;
820 minfo6.fport = inc->i_hdr.h_sport;
822 minfo6.laddr = *saddr;
823 minfo6.faddr = *daddr;
824 minfo6.lport = inc->i_hdr.h_sport;
825 minfo6.fport = inc->i_hdr.h_dport;
830 rds_info_copy(iter, &minfo6, sizeof(minfo6));