1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
8 * This file is part of the SCTP kernel implementation
10 * This module provides the abstraction for an SCTP association.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Jon Grimm <jgrimm@us.ibm.com>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Daisy Chang <daisyc@us.ibm.com>
44 * Ryan Layer <rmlayer@us.ibm.com>
45 * Kevin Gao <kevin.gao@intel.com>
47 * Any bugs reported given to us we will try to fix... any fixes shared will
48 * be incorporated into the next SCTP release.
51 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53 #include <linux/types.h>
54 #include <linux/fcntl.h>
55 #include <linux/poll.h>
56 #include <linux/init.h>
58 #include <linux/slab.h>
61 #include <net/sctp/sctp.h>
62 #include <net/sctp/sm.h>
64 /* Forward declarations for internal functions. */
65 static void sctp_assoc_bh_rcv(struct work_struct *work);
66 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
67 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
69 /* Keep track of the new idr low so that we don't re-use association id
70 * numbers too fast. It is protected by they idr spin lock is in the
71 * range of 1 - INT_MAX.
73 static u32 idr_low = 1;
76 /* 1st Level Abstractions. */
78 /* Initialize a new association from provided memory. */
79 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
80 const struct sctp_endpoint *ep,
81 const struct sock *sk,
85 struct net *net = sock_net(sk);
91 /* Retrieve the SCTP per socket area. */
92 sp = sctp_sk((struct sock *)sk);
94 /* Discarding const is appropriate here. */
95 asoc->ep = (struct sctp_endpoint *)ep;
96 sctp_endpoint_hold(asoc->ep);
99 asoc->base.sk = (struct sock *)sk;
100 sock_hold(asoc->base.sk);
102 /* Initialize the common base substructure. */
103 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
105 /* Initialize the object handling fields. */
106 atomic_set(&asoc->base.refcnt, 1);
108 asoc->base.malloced = 0;
110 /* Initialize the bind addr area. */
111 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
113 asoc->state = SCTP_STATE_CLOSED;
115 /* Set these values from the socket values, a conversion between
116 * millsecons to seconds/microseconds must also be done.
118 asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
119 asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
121 asoc->frag_point = 0;
122 asoc->user_frag = sp->user_frag;
124 /* Set the association max_retrans and RTO values from the
127 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
128 asoc->pf_retrans = net->sctp.pf_retrans;
130 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
131 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
132 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
134 asoc->overall_error_count = 0;
136 /* Initialize the association's heartbeat interval based on the
137 * sock configured value.
139 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
141 /* Initialize path max retrans value. */
142 asoc->pathmaxrxt = sp->pathmaxrxt;
144 /* Initialize default path MTU. */
145 asoc->pathmtu = sp->pathmtu;
147 /* Set association default SACK delay */
148 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
149 asoc->sackfreq = sp->sackfreq;
151 /* Set the association default flags controlling
152 * Heartbeat, SACK delay, and Path MTU Discovery.
154 asoc->param_flags = sp->param_flags;
156 /* Initialize the maximum mumber of new data packets that can be sent
159 asoc->max_burst = sp->max_burst;
161 /* initialize association timers */
162 asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
163 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
164 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
165 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
166 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
167 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
169 /* sctpimpguide Section 2.12.2
170 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
171 * recommended value of 5 times 'RTO.Max'.
173 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
176 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
177 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
178 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
179 min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
181 /* Initializes the timers */
182 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
183 setup_timer(&asoc->timers[i], sctp_timer_events[i],
184 (unsigned long)asoc);
186 /* Pull default initialization values from the sock options.
187 * Note: This assumes that the values have already been
188 * validated in the sock.
190 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
191 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
192 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
194 asoc->max_init_timeo =
195 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
197 /* Allocate storage for the ssnmap after the inbound and outbound
198 * streams have been negotiated during Init.
202 /* Set the local window size for receive.
203 * This is also the rcvbuf space per association.
204 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
205 * 1500 bytes in one SCTP packet.
207 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
208 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
210 asoc->rwnd = sk->sk_rcvbuf/2;
212 asoc->a_rwnd = asoc->rwnd;
215 asoc->rwnd_press = 0;
217 /* Use my own max window until I learn something better. */
218 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
220 /* Set the sndbuf size for transmit. */
221 asoc->sndbuf_used = 0;
223 /* Initialize the receive memory counter */
224 atomic_set(&asoc->rmem_alloc, 0);
226 init_waitqueue_head(&asoc->wait);
228 asoc->c.my_vtag = sctp_generate_tag(ep);
229 asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */
230 asoc->c.peer_vtag = 0;
232 asoc->c.peer_ttag = 0;
233 asoc->c.my_port = ep->base.bind_addr.port;
235 asoc->c.initial_tsn = sctp_generate_tsn(ep);
237 asoc->next_tsn = asoc->c.initial_tsn;
239 asoc->ctsn_ack_point = asoc->next_tsn - 1;
240 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
241 asoc->highest_sacked = asoc->ctsn_ack_point;
242 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
243 asoc->unack_data = 0;
245 /* ADDIP Section 4.1 Asconf Chunk Procedures
247 * When an endpoint has an ASCONF signaled change to be sent to the
248 * remote endpoint it should do the following:
250 * A2) a serial number should be assigned to the chunk. The serial
251 * number SHOULD be a monotonically increasing number. The serial
252 * numbers SHOULD be initialized at the start of the
253 * association to the same value as the initial TSN.
255 asoc->addip_serial = asoc->c.initial_tsn;
257 INIT_LIST_HEAD(&asoc->addip_chunk_list);
258 INIT_LIST_HEAD(&asoc->asconf_ack_list);
260 /* Make an empty list of remote transport addresses. */
261 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
262 asoc->peer.transport_count = 0;
264 /* RFC 2960 5.1 Normal Establishment of an Association
266 * After the reception of the first data chunk in an
267 * association the endpoint must immediately respond with a
268 * sack to acknowledge the data chunk. Subsequent
269 * acknowledgements should be done as described in Section
272 * [We implement this by telling a new association that it
273 * already received one packet.]
275 asoc->peer.sack_needed = 1;
276 asoc->peer.sack_cnt = 0;
277 asoc->peer.sack_generation = 1;
279 /* Assume that the peer will tell us if he recognizes ASCONF
280 * as part of INIT exchange.
281 * The sctp_addip_noauth option is there for backward compatibilty
282 * and will revert old behavior.
284 asoc->peer.asconf_capable = 0;
285 if (net->sctp.addip_noauth)
286 asoc->peer.asconf_capable = 1;
287 asoc->asconf_addr_del_pending = NULL;
288 asoc->src_out_of_asoc_ok = 0;
289 asoc->new_transport = NULL;
291 /* Create an input queue. */
292 sctp_inq_init(&asoc->base.inqueue);
293 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
295 /* Create an output queue. */
296 sctp_outq_init(asoc, &asoc->outqueue);
298 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
301 memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
307 /* Assume that peer would support both address types unless we are
310 asoc->peer.ipv4_address = 1;
311 if (asoc->base.sk->sk_family == PF_INET6)
312 asoc->peer.ipv6_address = 1;
313 INIT_LIST_HEAD(&asoc->asocs);
315 asoc->autoclose = sp->autoclose;
317 asoc->default_stream = sp->default_stream;
318 asoc->default_ppid = sp->default_ppid;
319 asoc->default_flags = sp->default_flags;
320 asoc->default_context = sp->default_context;
321 asoc->default_timetolive = sp->default_timetolive;
322 asoc->default_rcv_context = sp->default_rcv_context;
324 /* SCTP_GET_ASSOC_STATS COUNTERS */
325 memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats));
327 /* AUTH related initializations */
328 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
329 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
333 asoc->active_key_id = ep->active_key_id;
334 asoc->asoc_shared_key = NULL;
336 asoc->default_hmac_id = 0;
337 /* Save the hmacs and chunks list into this association */
338 if (ep->auth_hmacs_list)
339 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
340 ntohs(ep->auth_hmacs_list->param_hdr.length));
341 if (ep->auth_chunk_list)
342 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
343 ntohs(ep->auth_chunk_list->param_hdr.length));
345 /* Get the AUTH random number for this association */
346 p = (sctp_paramhdr_t *)asoc->c.auth_random;
347 p->type = SCTP_PARAM_RANDOM;
348 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
349 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
354 sctp_endpoint_put(asoc->ep);
355 sock_put(asoc->base.sk);
359 /* Allocate and initialize a new association */
360 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
361 const struct sock *sk,
365 struct sctp_association *asoc;
367 asoc = t_new(struct sctp_association, gfp);
371 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
374 asoc->base.malloced = 1;
375 SCTP_DBG_OBJCNT_INC(assoc);
376 SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc);
386 /* Free this association if possible. There may still be users, so
387 * the actual deallocation may be delayed.
389 void sctp_association_free(struct sctp_association *asoc)
391 struct sock *sk = asoc->base.sk;
392 struct sctp_transport *transport;
393 struct list_head *pos, *temp;
396 /* Only real associations count against the endpoint, so
397 * don't bother for if this is a temporary association.
400 list_del(&asoc->asocs);
402 /* Decrement the backlog value for a TCP-style listening
405 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
406 sk->sk_ack_backlog--;
409 /* Mark as dead, so other users can know this structure is
414 /* Dispose of any data lying around in the outqueue. */
415 sctp_outq_free(&asoc->outqueue);
417 /* Dispose of any pending messages for the upper layer. */
418 sctp_ulpq_free(&asoc->ulpq);
420 /* Dispose of any pending chunks on the inqueue. */
421 sctp_inq_free(&asoc->base.inqueue);
423 sctp_tsnmap_free(&asoc->peer.tsn_map);
425 /* Free ssnmap storage. */
426 sctp_ssnmap_free(asoc->ssnmap);
428 /* Clean up the bound address list. */
429 sctp_bind_addr_free(&asoc->base.bind_addr);
431 /* Do we need to go through all of our timers and
432 * delete them? To be safe we will try to delete all, but we
433 * should be able to go through and make a guess based
436 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
437 if (del_timer(&asoc->timers[i]))
438 sctp_association_put(asoc);
441 /* Free peer's cached cookie. */
442 kfree(asoc->peer.cookie);
443 kfree(asoc->peer.peer_random);
444 kfree(asoc->peer.peer_chunks);
445 kfree(asoc->peer.peer_hmacs);
447 /* Release the transport structures. */
448 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
449 transport = list_entry(pos, struct sctp_transport, transports);
451 sctp_transport_free(transport);
454 asoc->peer.transport_count = 0;
456 sctp_asconf_queue_teardown(asoc);
458 /* Free pending address space being deleted */
459 if (asoc->asconf_addr_del_pending != NULL)
460 kfree(asoc->asconf_addr_del_pending);
462 /* AUTH - Free the endpoint shared keys */
463 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
465 /* AUTH - Free the association shared key */
466 sctp_auth_key_put(asoc->asoc_shared_key);
468 sctp_association_put(asoc);
471 /* Cleanup and free up an association. */
472 static void sctp_association_destroy(struct sctp_association *asoc)
474 SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
476 sctp_endpoint_put(asoc->ep);
477 sock_put(asoc->base.sk);
479 if (asoc->assoc_id != 0) {
480 spin_lock_bh(&sctp_assocs_id_lock);
481 idr_remove(&sctp_assocs_id, asoc->assoc_id);
482 spin_unlock_bh(&sctp_assocs_id_lock);
485 WARN_ON(atomic_read(&asoc->rmem_alloc));
487 if (asoc->base.malloced) {
489 SCTP_DBG_OBJCNT_DEC(assoc);
493 /* Change the primary destination address for the peer. */
494 void sctp_assoc_set_primary(struct sctp_association *asoc,
495 struct sctp_transport *transport)
499 /* it's a changeover only if we already have a primary path
500 * that we are changing
502 if (asoc->peer.primary_path != NULL &&
503 asoc->peer.primary_path != transport)
506 asoc->peer.primary_path = transport;
508 /* Set a default msg_name for events. */
509 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
510 sizeof(union sctp_addr));
512 /* If the primary path is changing, assume that the
513 * user wants to use this new path.
515 if ((transport->state == SCTP_ACTIVE) ||
516 (transport->state == SCTP_UNKNOWN))
517 asoc->peer.active_path = transport;
520 * SFR-CACC algorithm:
521 * Upon the receipt of a request to change the primary
522 * destination address, on the data structure for the new
523 * primary destination, the sender MUST do the following:
525 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
526 * to this destination address earlier. The sender MUST set
527 * CYCLING_CHANGEOVER to indicate that this switch is a
528 * double switch to the same destination address.
530 * Really, only bother is we have data queued or outstanding on
533 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
536 if (transport->cacc.changeover_active)
537 transport->cacc.cycling_changeover = changeover;
539 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
540 * a changeover has occurred.
542 transport->cacc.changeover_active = changeover;
544 /* 3) The sender MUST store the next TSN to be sent in
545 * next_tsn_at_change.
547 transport->cacc.next_tsn_at_change = asoc->next_tsn;
550 /* Remove a transport from an association. */
551 void sctp_assoc_rm_peer(struct sctp_association *asoc,
552 struct sctp_transport *peer)
554 struct list_head *pos;
555 struct sctp_transport *transport;
557 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ",
561 ntohs(peer->ipaddr.v4.sin_port));
563 /* If we are to remove the current retran_path, update it
564 * to the next peer before removing this peer from the list.
566 if (asoc->peer.retran_path == peer)
567 sctp_assoc_update_retran_path(asoc);
569 /* Remove this peer from the list. */
570 list_del_rcu(&peer->transports);
572 /* Get the first transport of asoc. */
573 pos = asoc->peer.transport_addr_list.next;
574 transport = list_entry(pos, struct sctp_transport, transports);
576 /* Update any entries that match the peer to be deleted. */
577 if (asoc->peer.primary_path == peer)
578 sctp_assoc_set_primary(asoc, transport);
579 if (asoc->peer.active_path == peer)
580 asoc->peer.active_path = transport;
581 if (asoc->peer.retran_path == peer)
582 asoc->peer.retran_path = transport;
583 if (asoc->peer.last_data_from == peer)
584 asoc->peer.last_data_from = transport;
586 /* If we remove the transport an INIT was last sent to, set it to
587 * NULL. Combined with the update of the retran path above, this
588 * will cause the next INIT to be sent to the next available
589 * transport, maintaining the cycle.
591 if (asoc->init_last_sent_to == peer)
592 asoc->init_last_sent_to = NULL;
594 /* If we remove the transport an SHUTDOWN was last sent to, set it
595 * to NULL. Combined with the update of the retran path above, this
596 * will cause the next SHUTDOWN to be sent to the next available
597 * transport, maintaining the cycle.
599 if (asoc->shutdown_last_sent_to == peer)
600 asoc->shutdown_last_sent_to = NULL;
602 /* If we remove the transport an ASCONF was last sent to, set it to
605 if (asoc->addip_last_asconf &&
606 asoc->addip_last_asconf->transport == peer)
607 asoc->addip_last_asconf->transport = NULL;
609 /* If we have something on the transmitted list, we have to
610 * save it off. The best place is the active path.
612 if (!list_empty(&peer->transmitted)) {
613 struct sctp_transport *active = asoc->peer.active_path;
614 struct sctp_chunk *ch;
616 /* Reset the transport of each chunk on this list */
617 list_for_each_entry(ch, &peer->transmitted,
619 ch->transport = NULL;
620 ch->rtt_in_progress = 0;
623 list_splice_tail_init(&peer->transmitted,
624 &active->transmitted);
626 /* Start a T3 timer here in case it wasn't running so
627 * that these migrated packets have a chance to get
630 if (!timer_pending(&active->T3_rtx_timer))
631 if (!mod_timer(&active->T3_rtx_timer,
632 jiffies + active->rto))
633 sctp_transport_hold(active);
636 asoc->peer.transport_count--;
638 sctp_transport_free(peer);
641 /* Add a transport address to an association. */
642 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
643 const union sctp_addr *addr,
645 const int peer_state)
647 struct net *net = sock_net(asoc->base.sk);
648 struct sctp_transport *peer;
649 struct sctp_sock *sp;
652 sp = sctp_sk(asoc->base.sk);
654 /* AF_INET and AF_INET6 share common port field. */
655 port = ntohs(addr->v4.sin_port);
657 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
658 " port: %d state:%d\n",
664 /* Set the port if it has not been set yet. */
665 if (0 == asoc->peer.port)
666 asoc->peer.port = port;
668 /* Check to see if this is a duplicate. */
669 peer = sctp_assoc_lookup_paddr(asoc, addr);
671 /* An UNKNOWN state is only set on transports added by
672 * user in sctp_connectx() call. Such transports should be
673 * considered CONFIRMED per RFC 4960, Section 5.4.
675 if (peer->state == SCTP_UNKNOWN) {
676 peer->state = SCTP_ACTIVE;
681 peer = sctp_transport_new(net, addr, gfp);
685 sctp_transport_set_owner(peer, asoc);
687 /* Initialize the peer's heartbeat interval based on the
688 * association configured value.
690 peer->hbinterval = asoc->hbinterval;
692 /* Set the path max_retrans. */
693 peer->pathmaxrxt = asoc->pathmaxrxt;
695 /* And the partial failure retrnas threshold */
696 peer->pf_retrans = asoc->pf_retrans;
698 /* Initialize the peer's SACK delay timeout based on the
699 * association configured value.
701 peer->sackdelay = asoc->sackdelay;
702 peer->sackfreq = asoc->sackfreq;
704 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
705 * based on association setting.
707 peer->param_flags = asoc->param_flags;
709 sctp_transport_route(peer, NULL, sp);
711 /* Initialize the pmtu of the transport. */
712 if (peer->param_flags & SPP_PMTUD_DISABLE) {
714 peer->pathmtu = asoc->pathmtu;
716 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
719 /* If this is the first transport addr on this association,
720 * initialize the association PMTU to the peer's PMTU.
721 * If not and the current association PMTU is higher than the new
722 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
725 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
727 asoc->pathmtu = peer->pathmtu;
729 SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
730 "%d\n", asoc, asoc->pathmtu);
731 peer->pmtu_pending = 0;
733 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
735 /* The asoc->peer.port might not be meaningful yet, but
736 * initialize the packet structure anyway.
738 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
743 * o The initial cwnd before DATA transmission or after a sufficiently
744 * long idle period MUST be set to
745 * min(4*MTU, max(2*MTU, 4380 bytes))
747 * o The initial value of ssthresh MAY be arbitrarily high
748 * (for example, implementations MAY use the size of the
749 * receiver advertised window).
751 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
753 /* At this point, we may not have the receiver's advertised window,
754 * so initialize ssthresh to the default value and it will be set
755 * later when we process the INIT.
757 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
759 peer->partial_bytes_acked = 0;
760 peer->flight_size = 0;
761 peer->burst_limited = 0;
763 /* Set the transport's RTO.initial value */
764 peer->rto = asoc->rto_initial;
765 sctp_max_rto(asoc, peer);
767 /* Set the peer's active state. */
768 peer->state = peer_state;
770 /* Attach the remote transport to our asoc. */
771 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
772 asoc->peer.transport_count++;
774 /* If we do not yet have a primary path, set one. */
775 if (!asoc->peer.primary_path) {
776 sctp_assoc_set_primary(asoc, peer);
777 asoc->peer.retran_path = peer;
780 if (asoc->peer.active_path == asoc->peer.retran_path &&
781 peer->state != SCTP_UNCONFIRMED) {
782 asoc->peer.retran_path = peer;
788 /* Delete a transport address from an association. */
789 void sctp_assoc_del_peer(struct sctp_association *asoc,
790 const union sctp_addr *addr)
792 struct list_head *pos;
793 struct list_head *temp;
794 struct sctp_transport *transport;
796 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
797 transport = list_entry(pos, struct sctp_transport, transports);
798 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
799 /* Do book keeping for removing the peer and free it. */
800 sctp_assoc_rm_peer(asoc, transport);
806 /* Lookup a transport by address. */
807 struct sctp_transport *sctp_assoc_lookup_paddr(
808 const struct sctp_association *asoc,
809 const union sctp_addr *address)
811 struct sctp_transport *t;
813 /* Cycle through all transports searching for a peer address. */
815 list_for_each_entry(t, &asoc->peer.transport_addr_list,
817 if (sctp_cmp_addr_exact(address, &t->ipaddr))
824 /* Remove all transports except a give one */
825 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
826 struct sctp_transport *primary)
828 struct sctp_transport *temp;
829 struct sctp_transport *t;
831 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
833 /* if the current transport is not the primary one, delete it */
835 sctp_assoc_rm_peer(asoc, t);
839 /* Engage in transport control operations.
840 * Mark the transport up or down and send a notification to the user.
841 * Select and update the new active and retran paths.
843 void sctp_assoc_control_transport(struct sctp_association *asoc,
844 struct sctp_transport *transport,
845 sctp_transport_cmd_t command,
846 sctp_sn_error_t error)
848 struct sctp_transport *t = NULL;
849 struct sctp_transport *first;
850 struct sctp_transport *second;
851 struct sctp_ulpevent *event;
852 struct sockaddr_storage addr;
854 bool ulp_notify = true;
856 /* Record the transition on the transport. */
858 case SCTP_TRANSPORT_UP:
859 /* If we are moving from UNCONFIRMED state due
860 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
861 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
863 if (SCTP_UNCONFIRMED == transport->state &&
864 SCTP_HEARTBEAT_SUCCESS == error)
865 spc_state = SCTP_ADDR_CONFIRMED;
867 spc_state = SCTP_ADDR_AVAILABLE;
868 /* Don't inform ULP about transition from PF to
869 * active state and set cwnd to 1, see SCTP
870 * Quick failover draft section 5.1, point 5
872 if (transport->state == SCTP_PF) {
876 transport->state = SCTP_ACTIVE;
879 case SCTP_TRANSPORT_DOWN:
880 /* If the transport was never confirmed, do not transition it
881 * to inactive state. Also, release the cached route since
882 * there may be a better route next time.
884 if (transport->state != SCTP_UNCONFIRMED)
885 transport->state = SCTP_INACTIVE;
887 dst_release(transport->dst);
888 transport->dst = NULL;
891 spc_state = SCTP_ADDR_UNREACHABLE;
894 case SCTP_TRANSPORT_PF:
895 transport->state = SCTP_PF;
903 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
907 memset(&addr, 0, sizeof(struct sockaddr_storage));
908 memcpy(&addr, &transport->ipaddr,
909 transport->af_specific->sockaddr_len);
910 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
911 0, spc_state, error, GFP_ATOMIC);
913 sctp_ulpq_tail_event(&asoc->ulpq, event);
916 /* Select new active and retran paths. */
918 /* Look for the two most recently used active transports.
920 * This code produces the wrong ordering whenever jiffies
921 * rolls over, but we still get usable transports, so we don't
924 first = NULL; second = NULL;
926 list_for_each_entry(t, &asoc->peer.transport_addr_list,
929 if ((t->state == SCTP_INACTIVE) ||
930 (t->state == SCTP_UNCONFIRMED) ||
931 (t->state == SCTP_PF))
933 if (!first || t->last_time_heard > first->last_time_heard) {
937 if (!second || t->last_time_heard > second->last_time_heard)
941 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
943 * By default, an endpoint should always transmit to the
944 * primary path, unless the SCTP user explicitly specifies the
945 * destination transport address (and possibly source
946 * transport address) to use.
948 * [If the primary is active but not most recent, bump the most
949 * recently used transport.]
951 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
952 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
953 first != asoc->peer.primary_path) {
955 first = asoc->peer.primary_path;
958 /* If we failed to find a usable transport, just camp on the
959 * primary, even if it is inactive.
962 first = asoc->peer.primary_path;
963 second = asoc->peer.primary_path;
966 /* Set the active and retran transports. */
967 asoc->peer.active_path = first;
968 asoc->peer.retran_path = second;
971 /* Hold a reference to an association. */
972 void sctp_association_hold(struct sctp_association *asoc)
974 atomic_inc(&asoc->base.refcnt);
977 /* Release a reference to an association and cleanup
978 * if there are no more references.
980 void sctp_association_put(struct sctp_association *asoc)
982 if (atomic_dec_and_test(&asoc->base.refcnt))
983 sctp_association_destroy(asoc);
986 /* Allocate the next TSN, Transmission Sequence Number, for the given
989 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
991 /* From Section 1.6 Serial Number Arithmetic:
992 * Transmission Sequence Numbers wrap around when they reach
993 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
994 * after transmitting TSN = 2*32 - 1 is TSN = 0.
996 __u32 retval = asoc->next_tsn;
1003 /* Compare two addresses to see if they match. Wildcard addresses
1004 * only match themselves.
1006 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
1007 const union sctp_addr *ss2)
1011 af = sctp_get_af_specific(ss1->sa.sa_family);
1015 return af->cmp_addr(ss1, ss2);
1018 /* Return an ecne chunk to get prepended to a packet.
1019 * Note: We are sly and return a shared, prealloced chunk. FIXME:
1020 * No we don't, but we could/should.
1022 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
1024 struct sctp_chunk *chunk;
1026 /* Send ECNE if needed.
1027 * Not being able to allocate a chunk here is not deadly.
1029 if (asoc->need_ecne)
1030 chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
1038 * Find which transport this TSN was sent on.
1040 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1043 struct sctp_transport *active;
1044 struct sctp_transport *match;
1045 struct sctp_transport *transport;
1046 struct sctp_chunk *chunk;
1047 __be32 key = htonl(tsn);
1052 * FIXME: In general, find a more efficient data structure for
1057 * The general strategy is to search each transport's transmitted
1058 * list. Return which transport this TSN lives on.
1060 * Let's be hopeful and check the active_path first.
1061 * Another optimization would be to know if there is only one
1062 * outbound path and not have to look for the TSN at all.
1066 active = asoc->peer.active_path;
1068 list_for_each_entry(chunk, &active->transmitted,
1071 if (key == chunk->subh.data_hdr->tsn) {
1077 /* If not found, go search all the other transports. */
1078 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
1081 if (transport == active)
1083 list_for_each_entry(chunk, &transport->transmitted,
1085 if (key == chunk->subh.data_hdr->tsn) {
1095 /* Is this the association we are looking for? */
1096 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1098 const union sctp_addr *laddr,
1099 const union sctp_addr *paddr)
1101 struct sctp_transport *transport;
1103 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1104 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
1105 net_eq(sock_net(asoc->base.sk), net)) {
1106 transport = sctp_assoc_lookup_paddr(asoc, paddr);
1110 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1111 sctp_sk(asoc->base.sk)))
1120 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
1121 static void sctp_assoc_bh_rcv(struct work_struct *work)
1123 struct sctp_association *asoc =
1124 container_of(work, struct sctp_association,
1125 base.inqueue.immediate);
1126 struct net *net = sock_net(asoc->base.sk);
1127 struct sctp_endpoint *ep;
1128 struct sctp_chunk *chunk;
1129 struct sctp_inq *inqueue;
1131 sctp_subtype_t subtype;
1134 /* The association should be held so we should be safe. */
1137 inqueue = &asoc->base.inqueue;
1138 sctp_association_hold(asoc);
1139 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1140 state = asoc->state;
1141 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1143 /* SCTP-AUTH, Section 6.3:
1144 * The receiver has a list of chunk types which it expects
1145 * to be received only after an AUTH-chunk. This list has
1146 * been sent to the peer during the association setup. It
1147 * MUST silently discard these chunks if they are not placed
1148 * after an AUTH chunk in the packet.
1150 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1153 /* Remember where the last DATA chunk came from so we
1154 * know where to send the SACK.
1156 if (sctp_chunk_is_data(chunk))
1157 asoc->peer.last_data_from = chunk->transport;
1159 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1160 asoc->stats.ictrlchunks++;
1161 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1162 asoc->stats.isacks++;
1165 if (chunk->transport)
1166 chunk->transport->last_time_heard = jiffies;
1168 /* Run through the state machine. */
1169 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1170 state, ep, asoc, chunk, GFP_ATOMIC);
1172 /* Check to see if the association is freed in response to
1173 * the incoming chunk. If so, get out of the while loop.
1175 if (asoc->base.dead)
1178 /* If there is an error on chunk, discard this packet. */
1180 chunk->pdiscard = 1;
1182 sctp_association_put(asoc);
1185 /* This routine moves an association from its old sk to a new sk. */
1186 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1188 struct sctp_sock *newsp = sctp_sk(newsk);
1189 struct sock *oldsk = assoc->base.sk;
1191 /* Delete the association from the old endpoint's list of
1194 list_del_init(&assoc->asocs);
1196 /* Decrement the backlog value for a TCP-style socket. */
1197 if (sctp_style(oldsk, TCP))
1198 oldsk->sk_ack_backlog--;
1200 /* Release references to the old endpoint and the sock. */
1201 sctp_endpoint_put(assoc->ep);
1202 sock_put(assoc->base.sk);
1204 /* Get a reference to the new endpoint. */
1205 assoc->ep = newsp->ep;
1206 sctp_endpoint_hold(assoc->ep);
1208 /* Get a reference to the new sock. */
1209 assoc->base.sk = newsk;
1210 sock_hold(assoc->base.sk);
1212 /* Add the association to the new endpoint's list of associations. */
1213 sctp_endpoint_add_asoc(newsp->ep, assoc);
1216 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1217 void sctp_assoc_update(struct sctp_association *asoc,
1218 struct sctp_association *new)
1220 struct sctp_transport *trans;
1221 struct list_head *pos, *temp;
1223 /* Copy in new parameters of peer. */
1225 asoc->peer.rwnd = new->peer.rwnd;
1226 asoc->peer.sack_needed = new->peer.sack_needed;
1227 asoc->peer.i = new->peer.i;
1228 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1229 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1231 /* Remove any peer addresses not present in the new association. */
1232 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1233 trans = list_entry(pos, struct sctp_transport, transports);
1234 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1235 sctp_assoc_rm_peer(asoc, trans);
1239 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1240 sctp_transport_reset(trans);
1243 /* If the case is A (association restart), use
1244 * initial_tsn as next_tsn. If the case is B, use
1245 * current next_tsn in case data sent to peer
1246 * has been discarded and needs retransmission.
1248 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1249 asoc->next_tsn = new->next_tsn;
1250 asoc->ctsn_ack_point = new->ctsn_ack_point;
1251 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1253 /* Reinitialize SSN for both local streams
1254 * and peer's streams.
1256 sctp_ssnmap_clear(asoc->ssnmap);
1258 /* Flush the ULP reassembly and ordered queue.
1259 * Any data there will now be stale and will
1262 sctp_ulpq_flush(&asoc->ulpq);
1264 /* reset the overall association error count so
1265 * that the restarted association doesn't get torn
1266 * down on the next retransmission timer.
1268 asoc->overall_error_count = 0;
1271 /* Add any peer addresses from the new association. */
1272 list_for_each_entry(trans, &new->peer.transport_addr_list,
1274 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1275 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1276 GFP_ATOMIC, trans->state);
1279 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1280 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1281 if (!asoc->ssnmap) {
1282 /* Move the ssnmap. */
1283 asoc->ssnmap = new->ssnmap;
1287 if (!asoc->assoc_id) {
1288 /* get a new association id since we don't have one
1291 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1295 /* SCTP-AUTH: Save the peer parameters from the new assocaitions
1296 * and also move the association shared keys over
1298 kfree(asoc->peer.peer_random);
1299 asoc->peer.peer_random = new->peer.peer_random;
1300 new->peer.peer_random = NULL;
1302 kfree(asoc->peer.peer_chunks);
1303 asoc->peer.peer_chunks = new->peer.peer_chunks;
1304 new->peer.peer_chunks = NULL;
1306 kfree(asoc->peer.peer_hmacs);
1307 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1308 new->peer.peer_hmacs = NULL;
1310 sctp_auth_key_put(asoc->asoc_shared_key);
1311 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1314 /* Update the retran path for sending a retransmitted packet.
1315 * Round-robin through the active transports, else round-robin
1316 * through the inactive transports as this is the next best thing
1319 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1321 struct sctp_transport *t, *next;
1322 struct list_head *head = &asoc->peer.transport_addr_list;
1323 struct list_head *pos;
1325 if (asoc->peer.transport_count == 1)
1328 /* Find the next transport in a round-robin fashion. */
1329 t = asoc->peer.retran_path;
1330 pos = &t->transports;
1334 /* Skip the head. */
1335 if (pos->next == head)
1340 t = list_entry(pos, struct sctp_transport, transports);
1342 /* We have exhausted the list, but didn't find any
1343 * other active transports. If so, use the next
1346 if (t == asoc->peer.retran_path) {
1351 /* Try to find an active transport. */
1353 if ((t->state == SCTP_ACTIVE) ||
1354 (t->state == SCTP_UNKNOWN)) {
1357 /* Keep track of the next transport in case
1358 * we don't find any active transport.
1360 if (t->state != SCTP_UNCONFIRMED && !next)
1366 asoc->peer.retran_path = t;
1368 t = asoc->peer.retran_path;
1370 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1375 ntohs(t->ipaddr.v4.sin_port));
1378 /* Choose the transport for sending retransmit packet. */
1379 struct sctp_transport *sctp_assoc_choose_alter_transport(
1380 struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1382 /* If this is the first time packet is sent, use the active path,
1383 * else use the retran path. If the last packet was sent over the
1384 * retran path, update the retran path and use it.
1387 return asoc->peer.active_path;
1389 if (last_sent_to == asoc->peer.retran_path)
1390 sctp_assoc_update_retran_path(asoc);
1391 return asoc->peer.retran_path;
1395 /* Update the association's pmtu and frag_point by going through all the
1396 * transports. This routine is called when a transport's PMTU has changed.
1398 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1400 struct sctp_transport *t;
1406 /* Get the lowest pmtu of all the transports. */
1407 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1409 if (t->pmtu_pending && t->dst) {
1410 sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1411 t->pmtu_pending = 0;
1413 if (!pmtu || (t->pathmtu < pmtu))
1418 asoc->pathmtu = pmtu;
1419 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1422 SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
1423 __func__, asoc, asoc->pathmtu, asoc->frag_point);
1426 /* Should we send a SACK to update our peer? */
1427 static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1429 struct net *net = sock_net(asoc->base.sk);
1430 switch (asoc->state) {
1431 case SCTP_STATE_ESTABLISHED:
1432 case SCTP_STATE_SHUTDOWN_PENDING:
1433 case SCTP_STATE_SHUTDOWN_RECEIVED:
1434 case SCTP_STATE_SHUTDOWN_SENT:
1435 if ((asoc->rwnd > asoc->a_rwnd) &&
1436 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1437 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1447 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1448 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1450 struct sctp_chunk *sack;
1451 struct timer_list *timer;
1453 if (asoc->rwnd_over) {
1454 if (asoc->rwnd_over >= len) {
1455 asoc->rwnd_over -= len;
1457 asoc->rwnd += (len - asoc->rwnd_over);
1458 asoc->rwnd_over = 0;
1464 /* If we had window pressure, start recovering it
1465 * once our rwnd had reached the accumulated pressure
1466 * threshold. The idea is to recover slowly, but up
1467 * to the initial advertised window.
1469 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1470 int change = min(asoc->pathmtu, asoc->rwnd_press);
1471 asoc->rwnd += change;
1472 asoc->rwnd_press -= change;
1475 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
1476 "- %u\n", __func__, asoc, len, asoc->rwnd,
1477 asoc->rwnd_over, asoc->a_rwnd);
1479 /* Send a window update SACK if the rwnd has increased by at least the
1480 * minimum of the association's PMTU and half of the receive buffer.
1481 * The algorithm used is similar to the one described in
1482 * Section 4.2.3.3 of RFC 1122.
1484 if (sctp_peer_needs_update(asoc)) {
1485 asoc->a_rwnd = asoc->rwnd;
1486 SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
1487 "rwnd: %u a_rwnd: %u\n", __func__,
1488 asoc, asoc->rwnd, asoc->a_rwnd);
1489 sack = sctp_make_sack(asoc);
1493 asoc->peer.sack_needed = 0;
1495 sctp_outq_tail(&asoc->outqueue, sack);
1497 /* Stop the SACK timer. */
1498 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1499 if (del_timer(timer))
1500 sctp_association_put(asoc);
1504 /* Decrease asoc's rwnd by len. */
1505 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1510 SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
1511 SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
1513 if (asoc->ep->rcvbuf_policy)
1514 rx_count = atomic_read(&asoc->rmem_alloc);
1516 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1518 /* If we've reached or overflowed our receive buffer, announce
1519 * a 0 rwnd if rwnd would still be positive. Store the
1520 * the pottential pressure overflow so that the window can be restored
1521 * back to original value.
1523 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1526 if (asoc->rwnd >= len) {
1529 asoc->rwnd_press += asoc->rwnd;
1533 asoc->rwnd_over = len - asoc->rwnd;
1536 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
1537 __func__, asoc, len, asoc->rwnd,
1538 asoc->rwnd_over, asoc->rwnd_press);
1541 /* Build the bind address list for the association based on info from the
1542 * local endpoint and the remote peer.
1544 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1545 sctp_scope_t scope, gfp_t gfp)
1549 /* Use scoping rules to determine the subset of addresses from
1552 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1553 if (asoc->peer.ipv4_address)
1554 flags |= SCTP_ADDR4_PEERSUPP;
1555 if (asoc->peer.ipv6_address)
1556 flags |= SCTP_ADDR6_PEERSUPP;
1558 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1559 &asoc->base.bind_addr,
1560 &asoc->ep->base.bind_addr,
1564 /* Build the association's bind address list from the cookie. */
1565 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1566 struct sctp_cookie *cookie,
1569 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1570 int var_size3 = cookie->raw_addr_list_len;
1571 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1573 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1574 asoc->ep->base.bind_addr.port, gfp);
1577 /* Lookup laddr in the bind address list of an association. */
1578 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1579 const union sctp_addr *laddr)
1583 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1584 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1585 sctp_sk(asoc->base.sk)))
1591 /* Set an association id for a given association */
1592 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1594 bool preload = gfp & __GFP_WAIT;
1597 /* If the id is already assigned, keep it. */
1603 spin_lock_bh(&sctp_assocs_id_lock);
1604 /* 0 is not a valid id, idr_low is always >= 1 */
1605 ret = idr_alloc(&sctp_assocs_id, asoc, idr_low, 0, GFP_NOWAIT);
1608 if (idr_low == INT_MAX)
1611 spin_unlock_bh(&sctp_assocs_id_lock);
1617 asoc->assoc_id = (sctp_assoc_t)ret;
1621 /* Free the ASCONF queue */
1622 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1624 struct sctp_chunk *asconf;
1625 struct sctp_chunk *tmp;
1627 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1628 list_del_init(&asconf->list);
1629 sctp_chunk_free(asconf);
1633 /* Free asconf_ack cache */
1634 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1636 struct sctp_chunk *ack;
1637 struct sctp_chunk *tmp;
1639 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1641 list_del_init(&ack->transmitted_list);
1642 sctp_chunk_free(ack);
1646 /* Clean up the ASCONF_ACK queue */
1647 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1649 struct sctp_chunk *ack;
1650 struct sctp_chunk *tmp;
1652 /* We can remove all the entries from the queue up to
1653 * the "Peer-Sequence-Number".
1655 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1657 if (ack->subh.addip_hdr->serial ==
1658 htonl(asoc->peer.addip_serial))
1661 list_del_init(&ack->transmitted_list);
1662 sctp_chunk_free(ack);
1666 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1667 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1668 const struct sctp_association *asoc,
1671 struct sctp_chunk *ack;
1673 /* Walk through the list of cached ASCONF-ACKs and find the
1674 * ack chunk whose serial number matches that of the request.
1676 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1677 if (ack->subh.addip_hdr->serial == serial) {
1678 sctp_chunk_hold(ack);
1686 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1688 /* Free any cached ASCONF_ACK chunk. */
1689 sctp_assoc_free_asconf_acks(asoc);
1691 /* Free the ASCONF queue. */
1692 sctp_assoc_free_asconf_queue(asoc);
1694 /* Free any cached ASCONF chunk. */
1695 if (asoc->addip_last_asconf)
1696 sctp_chunk_free(asoc->addip_last_asconf);