2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 267674 2014-06-20 13:26:49Z tuexen $");
38 #include <netinet/sctp_os.h>
42 #include <netinet/sctp_var.h>
43 #include <netinet/sctp_sysctl.h>
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_pcb.h>
46 #include <netinet/sctputil.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctputil.h>
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_timer.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_indata.h>
54 #include <netinet/sctp_bsd_addr.h>
55 #include <netinet/sctp_input.h>
56 #include <netinet/sctp_crc32.h>
57 #if defined(__Userspace_os_Linux)
58 #define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
60 #if defined INET || defined INET6
61 #if !defined(__Userspace_os_Windows)
62 #include <netinet/udp.h>
65 #if defined(__APPLE__)
66 #include <netinet/in.h>
68 #if defined(__FreeBSD__)
69 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
70 #include <netinet/udp_var.h>
72 #include <machine/in_cksum.h>
74 #if defined(__Userspace__) && defined(INET6)
75 #include <netinet6/sctp6_var.h>
78 #if defined(__APPLE__)
79 #define APPLE_FILE_NO 3
82 #if defined(__APPLE__)
83 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
84 #define SCTP_MAX_LINKHDR 16
88 #define SCTP_MAX_GAPS_INARRAY 4
90 uint8_t right_edge; /* mergable on the right edge */
91 uint8_t left_edge; /* mergable on the left edge */
94 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
97 struct sack_track sack_array[256] = {
98 {0, 0, 0, 0, /* 0x00 */
105 {1, 0, 1, 0, /* 0x01 */
112 {0, 0, 1, 0, /* 0x02 */
119 {1, 0, 1, 0, /* 0x03 */
126 {0, 0, 1, 0, /* 0x04 */
133 {1, 0, 2, 0, /* 0x05 */
140 {0, 0, 1, 0, /* 0x06 */
147 {1, 0, 1, 0, /* 0x07 */
154 {0, 0, 1, 0, /* 0x08 */
161 {1, 0, 2, 0, /* 0x09 */
168 {0, 0, 2, 0, /* 0x0a */
175 {1, 0, 2, 0, /* 0x0b */
182 {0, 0, 1, 0, /* 0x0c */
189 {1, 0, 2, 0, /* 0x0d */
196 {0, 0, 1, 0, /* 0x0e */
203 {1, 0, 1, 0, /* 0x0f */
210 {0, 0, 1, 0, /* 0x10 */
217 {1, 0, 2, 0, /* 0x11 */
224 {0, 0, 2, 0, /* 0x12 */
231 {1, 0, 2, 0, /* 0x13 */
238 {0, 0, 2, 0, /* 0x14 */
245 {1, 0, 3, 0, /* 0x15 */
252 {0, 0, 2, 0, /* 0x16 */
259 {1, 0, 2, 0, /* 0x17 */
266 {0, 0, 1, 0, /* 0x18 */
273 {1, 0, 2, 0, /* 0x19 */
280 {0, 0, 2, 0, /* 0x1a */
287 {1, 0, 2, 0, /* 0x1b */
294 {0, 0, 1, 0, /* 0x1c */
301 {1, 0, 2, 0, /* 0x1d */
308 {0, 0, 1, 0, /* 0x1e */
315 {1, 0, 1, 0, /* 0x1f */
322 {0, 0, 1, 0, /* 0x20 */
329 {1, 0, 2, 0, /* 0x21 */
336 {0, 0, 2, 0, /* 0x22 */
343 {1, 0, 2, 0, /* 0x23 */
350 {0, 0, 2, 0, /* 0x24 */
357 {1, 0, 3, 0, /* 0x25 */
364 {0, 0, 2, 0, /* 0x26 */
371 {1, 0, 2, 0, /* 0x27 */
378 {0, 0, 2, 0, /* 0x28 */
385 {1, 0, 3, 0, /* 0x29 */
392 {0, 0, 3, 0, /* 0x2a */
399 {1, 0, 3, 0, /* 0x2b */
406 {0, 0, 2, 0, /* 0x2c */
413 {1, 0, 3, 0, /* 0x2d */
420 {0, 0, 2, 0, /* 0x2e */
427 {1, 0, 2, 0, /* 0x2f */
434 {0, 0, 1, 0, /* 0x30 */
441 {1, 0, 2, 0, /* 0x31 */
448 {0, 0, 2, 0, /* 0x32 */
455 {1, 0, 2, 0, /* 0x33 */
462 {0, 0, 2, 0, /* 0x34 */
469 {1, 0, 3, 0, /* 0x35 */
476 {0, 0, 2, 0, /* 0x36 */
483 {1, 0, 2, 0, /* 0x37 */
490 {0, 0, 1, 0, /* 0x38 */
497 {1, 0, 2, 0, /* 0x39 */
504 {0, 0, 2, 0, /* 0x3a */
511 {1, 0, 2, 0, /* 0x3b */
518 {0, 0, 1, 0, /* 0x3c */
525 {1, 0, 2, 0, /* 0x3d */
532 {0, 0, 1, 0, /* 0x3e */
539 {1, 0, 1, 0, /* 0x3f */
546 {0, 0, 1, 0, /* 0x40 */
553 {1, 0, 2, 0, /* 0x41 */
560 {0, 0, 2, 0, /* 0x42 */
567 {1, 0, 2, 0, /* 0x43 */
574 {0, 0, 2, 0, /* 0x44 */
581 {1, 0, 3, 0, /* 0x45 */
588 {0, 0, 2, 0, /* 0x46 */
595 {1, 0, 2, 0, /* 0x47 */
602 {0, 0, 2, 0, /* 0x48 */
609 {1, 0, 3, 0, /* 0x49 */
616 {0, 0, 3, 0, /* 0x4a */
623 {1, 0, 3, 0, /* 0x4b */
630 {0, 0, 2, 0, /* 0x4c */
637 {1, 0, 3, 0, /* 0x4d */
644 {0, 0, 2, 0, /* 0x4e */
651 {1, 0, 2, 0, /* 0x4f */
658 {0, 0, 2, 0, /* 0x50 */
665 {1, 0, 3, 0, /* 0x51 */
672 {0, 0, 3, 0, /* 0x52 */
679 {1, 0, 3, 0, /* 0x53 */
686 {0, 0, 3, 0, /* 0x54 */
693 {1, 0, 4, 0, /* 0x55 */
700 {0, 0, 3, 0, /* 0x56 */
707 {1, 0, 3, 0, /* 0x57 */
714 {0, 0, 2, 0, /* 0x58 */
721 {1, 0, 3, 0, /* 0x59 */
728 {0, 0, 3, 0, /* 0x5a */
735 {1, 0, 3, 0, /* 0x5b */
742 {0, 0, 2, 0, /* 0x5c */
749 {1, 0, 3, 0, /* 0x5d */
756 {0, 0, 2, 0, /* 0x5e */
763 {1, 0, 2, 0, /* 0x5f */
770 {0, 0, 1, 0, /* 0x60 */
777 {1, 0, 2, 0, /* 0x61 */
784 {0, 0, 2, 0, /* 0x62 */
791 {1, 0, 2, 0, /* 0x63 */
798 {0, 0, 2, 0, /* 0x64 */
805 {1, 0, 3, 0, /* 0x65 */
812 {0, 0, 2, 0, /* 0x66 */
819 {1, 0, 2, 0, /* 0x67 */
826 {0, 0, 2, 0, /* 0x68 */
833 {1, 0, 3, 0, /* 0x69 */
840 {0, 0, 3, 0, /* 0x6a */
847 {1, 0, 3, 0, /* 0x6b */
854 {0, 0, 2, 0, /* 0x6c */
861 {1, 0, 3, 0, /* 0x6d */
868 {0, 0, 2, 0, /* 0x6e */
875 {1, 0, 2, 0, /* 0x6f */
882 {0, 0, 1, 0, /* 0x70 */
889 {1, 0, 2, 0, /* 0x71 */
896 {0, 0, 2, 0, /* 0x72 */
903 {1, 0, 2, 0, /* 0x73 */
910 {0, 0, 2, 0, /* 0x74 */
917 {1, 0, 3, 0, /* 0x75 */
924 {0, 0, 2, 0, /* 0x76 */
931 {1, 0, 2, 0, /* 0x77 */
938 {0, 0, 1, 0, /* 0x78 */
945 {1, 0, 2, 0, /* 0x79 */
952 {0, 0, 2, 0, /* 0x7a */
959 {1, 0, 2, 0, /* 0x7b */
966 {0, 0, 1, 0, /* 0x7c */
973 {1, 0, 2, 0, /* 0x7d */
980 {0, 0, 1, 0, /* 0x7e */
987 {1, 0, 1, 0, /* 0x7f */
994 {0, 1, 1, 0, /* 0x80 */
1001 {1, 1, 2, 0, /* 0x81 */
1008 {0, 1, 2, 0, /* 0x82 */
1015 {1, 1, 2, 0, /* 0x83 */
1022 {0, 1, 2, 0, /* 0x84 */
1029 {1, 1, 3, 0, /* 0x85 */
1036 {0, 1, 2, 0, /* 0x86 */
1043 {1, 1, 2, 0, /* 0x87 */
1050 {0, 1, 2, 0, /* 0x88 */
1057 {1, 1, 3, 0, /* 0x89 */
1064 {0, 1, 3, 0, /* 0x8a */
1071 {1, 1, 3, 0, /* 0x8b */
1078 {0, 1, 2, 0, /* 0x8c */
1085 {1, 1, 3, 0, /* 0x8d */
1092 {0, 1, 2, 0, /* 0x8e */
1099 {1, 1, 2, 0, /* 0x8f */
1106 {0, 1, 2, 0, /* 0x90 */
1113 {1, 1, 3, 0, /* 0x91 */
1120 {0, 1, 3, 0, /* 0x92 */
1127 {1, 1, 3, 0, /* 0x93 */
1134 {0, 1, 3, 0, /* 0x94 */
1141 {1, 1, 4, 0, /* 0x95 */
1148 {0, 1, 3, 0, /* 0x96 */
1155 {1, 1, 3, 0, /* 0x97 */
1162 {0, 1, 2, 0, /* 0x98 */
1169 {1, 1, 3, 0, /* 0x99 */
1176 {0, 1, 3, 0, /* 0x9a */
1183 {1, 1, 3, 0, /* 0x9b */
1190 {0, 1, 2, 0, /* 0x9c */
1197 {1, 1, 3, 0, /* 0x9d */
1204 {0, 1, 2, 0, /* 0x9e */
1211 {1, 1, 2, 0, /* 0x9f */
1218 {0, 1, 2, 0, /* 0xa0 */
1225 {1, 1, 3, 0, /* 0xa1 */
1232 {0, 1, 3, 0, /* 0xa2 */
1239 {1, 1, 3, 0, /* 0xa3 */
1246 {0, 1, 3, 0, /* 0xa4 */
1253 {1, 1, 4, 0, /* 0xa5 */
1260 {0, 1, 3, 0, /* 0xa6 */
1267 {1, 1, 3, 0, /* 0xa7 */
1274 {0, 1, 3, 0, /* 0xa8 */
1281 {1, 1, 4, 0, /* 0xa9 */
1288 {0, 1, 4, 0, /* 0xaa */
1295 {1, 1, 4, 0, /* 0xab */
1302 {0, 1, 3, 0, /* 0xac */
1309 {1, 1, 4, 0, /* 0xad */
1316 {0, 1, 3, 0, /* 0xae */
1323 {1, 1, 3, 0, /* 0xaf */
1330 {0, 1, 2, 0, /* 0xb0 */
1337 {1, 1, 3, 0, /* 0xb1 */
1344 {0, 1, 3, 0, /* 0xb2 */
1351 {1, 1, 3, 0, /* 0xb3 */
1358 {0, 1, 3, 0, /* 0xb4 */
1365 {1, 1, 4, 0, /* 0xb5 */
1372 {0, 1, 3, 0, /* 0xb6 */
1379 {1, 1, 3, 0, /* 0xb7 */
1386 {0, 1, 2, 0, /* 0xb8 */
1393 {1, 1, 3, 0, /* 0xb9 */
1400 {0, 1, 3, 0, /* 0xba */
1407 {1, 1, 3, 0, /* 0xbb */
1414 {0, 1, 2, 0, /* 0xbc */
1421 {1, 1, 3, 0, /* 0xbd */
1428 {0, 1, 2, 0, /* 0xbe */
1435 {1, 1, 2, 0, /* 0xbf */
1442 {0, 1, 1, 0, /* 0xc0 */
1449 {1, 1, 2, 0, /* 0xc1 */
1456 {0, 1, 2, 0, /* 0xc2 */
1463 {1, 1, 2, 0, /* 0xc3 */
1470 {0, 1, 2, 0, /* 0xc4 */
1477 {1, 1, 3, 0, /* 0xc5 */
1484 {0, 1, 2, 0, /* 0xc6 */
1491 {1, 1, 2, 0, /* 0xc7 */
1498 {0, 1, 2, 0, /* 0xc8 */
1505 {1, 1, 3, 0, /* 0xc9 */
1512 {0, 1, 3, 0, /* 0xca */
1519 {1, 1, 3, 0, /* 0xcb */
1526 {0, 1, 2, 0, /* 0xcc */
1533 {1, 1, 3, 0, /* 0xcd */
1540 {0, 1, 2, 0, /* 0xce */
1547 {1, 1, 2, 0, /* 0xcf */
1554 {0, 1, 2, 0, /* 0xd0 */
1561 {1, 1, 3, 0, /* 0xd1 */
1568 {0, 1, 3, 0, /* 0xd2 */
1575 {1, 1, 3, 0, /* 0xd3 */
1582 {0, 1, 3, 0, /* 0xd4 */
1589 {1, 1, 4, 0, /* 0xd5 */
1596 {0, 1, 3, 0, /* 0xd6 */
1603 {1, 1, 3, 0, /* 0xd7 */
1610 {0, 1, 2, 0, /* 0xd8 */
1617 {1, 1, 3, 0, /* 0xd9 */
1624 {0, 1, 3, 0, /* 0xda */
1631 {1, 1, 3, 0, /* 0xdb */
1638 {0, 1, 2, 0, /* 0xdc */
1645 {1, 1, 3, 0, /* 0xdd */
1652 {0, 1, 2, 0, /* 0xde */
1659 {1, 1, 2, 0, /* 0xdf */
1666 {0, 1, 1, 0, /* 0xe0 */
1673 {1, 1, 2, 0, /* 0xe1 */
1680 {0, 1, 2, 0, /* 0xe2 */
1687 {1, 1, 2, 0, /* 0xe3 */
1694 {0, 1, 2, 0, /* 0xe4 */
1701 {1, 1, 3, 0, /* 0xe5 */
1708 {0, 1, 2, 0, /* 0xe6 */
1715 {1, 1, 2, 0, /* 0xe7 */
1722 {0, 1, 2, 0, /* 0xe8 */
1729 {1, 1, 3, 0, /* 0xe9 */
1736 {0, 1, 3, 0, /* 0xea */
1743 {1, 1, 3, 0, /* 0xeb */
1750 {0, 1, 2, 0, /* 0xec */
1757 {1, 1, 3, 0, /* 0xed */
1764 {0, 1, 2, 0, /* 0xee */
1771 {1, 1, 2, 0, /* 0xef */
1778 {0, 1, 1, 0, /* 0xf0 */
1785 {1, 1, 2, 0, /* 0xf1 */
1792 {0, 1, 2, 0, /* 0xf2 */
1799 {1, 1, 2, 0, /* 0xf3 */
1806 {0, 1, 2, 0, /* 0xf4 */
1813 {1, 1, 3, 0, /* 0xf5 */
1820 {0, 1, 2, 0, /* 0xf6 */
1827 {1, 1, 2, 0, /* 0xf7 */
1834 {0, 1, 1, 0, /* 0xf8 */
1841 {1, 1, 2, 0, /* 0xf9 */
1848 {0, 1, 2, 0, /* 0xfa */
1855 {1, 1, 2, 0, /* 0xfb */
1862 {0, 1, 1, 0, /* 0xfc */
1869 {1, 1, 2, 0, /* 0xfd */
1876 {0, 1, 1, 0, /* 0xfe */
1883 {1, 1, 1, 0, /* 0xff */
1894 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1895 struct sctp_scoping *scope,
1898 if ((scope->loopback_scope == 0) &&
1899 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1901 * skip loopback if not in scope *
1905 switch (ifa->address.sa.sa_family) {
1908 if (scope->ipv4_addr_legal) {
1909 struct sockaddr_in *sin;
1911 sin = (struct sockaddr_in *)&ifa->address.sin;
1912 if (sin->sin_addr.s_addr == 0) {
1913 /* not in scope , unspecified */
1916 if ((scope->ipv4_local_scope == 0) &&
1917 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1918 /* private address not in scope */
1928 if (scope->ipv6_addr_legal) {
1929 struct sockaddr_in6 *sin6;
1931 #if !defined(__Panda__)
1932 /* Must update the flags, bummer, which
1933 * means any IFA locks must now be applied HERE <->
1936 sctp_gather_internal_ifa_flags(ifa);
1939 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1942 /* ok to use deprecated addresses? */
1943 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1944 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1945 /* skip unspecifed addresses */
1948 if ( /* (local_scope == 0) && */
1949 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1952 if ((scope->site_scope == 0) &&
1953 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1961 #if defined(__Userspace__)
1963 if (!scope->conn_addr_legal) {
1974 static struct mbuf *
1975 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1977 #if defined(INET) || defined(INET6)
1978 struct sctp_paramhdr *parmh;
1983 switch (ifa->address.sa.sa_family) {
1986 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1991 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1997 #if defined(INET) || defined(INET6)
1998 if (M_TRAILINGSPACE(m) >= plen) {
1999 /* easy side we just drop it on the end */
2000 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
2003 /* Need more space */
2005 while (SCTP_BUF_NEXT(mret) != NULL) {
2006 mret = SCTP_BUF_NEXT(mret);
2008 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2009 if (SCTP_BUF_NEXT(mret) == NULL) {
2010 /* We are hosed, can't add more addresses */
2013 mret = SCTP_BUF_NEXT(mret);
2014 parmh = mtod(mret, struct sctp_paramhdr *);
2016 /* now add the parameter */
2017 switch (ifa->address.sa.sa_family) {
2021 struct sctp_ipv4addr_param *ipv4p;
2022 struct sockaddr_in *sin;
2024 sin = (struct sockaddr_in *)&ifa->address.sin;
2025 ipv4p = (struct sctp_ipv4addr_param *)parmh;
2026 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
2027 parmh->param_length = htons(plen);
2028 ipv4p->addr = sin->sin_addr.s_addr;
2029 SCTP_BUF_LEN(mret) += plen;
2036 struct sctp_ipv6addr_param *ipv6p;
2037 struct sockaddr_in6 *sin6;
2039 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2040 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2041 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2042 parmh->param_length = htons(plen);
2043 memcpy(ipv6p->addr, &sin6->sin6_addr,
2044 sizeof(ipv6p->addr));
2045 #if defined(SCTP_EMBEDDED_V6_SCOPE)
2046 /* clear embedded scope in the address */
2047 in6_clearscope((struct in6_addr *)ipv6p->addr);
2049 SCTP_BUF_LEN(mret) += plen;
2065 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2066 struct sctp_scoping *scope,
2067 struct mbuf *m_at, int cnt_inits_to,
2068 uint16_t *padding_len, uint16_t *chunk_len)
2070 struct sctp_vrf *vrf = NULL;
2071 int cnt, limit_out = 0, total_count;
2074 vrf_id = inp->def_vrf_id;
2075 SCTP_IPI_ADDR_RLOCK();
2076 vrf = sctp_find_vrf(vrf_id);
2078 SCTP_IPI_ADDR_RUNLOCK();
2081 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2082 struct sctp_ifa *sctp_ifap;
2083 struct sctp_ifn *sctp_ifnp;
2086 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2088 cnt = SCTP_ADDRESS_LIMIT;
2091 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2092 if ((scope->loopback_scope == 0) &&
2093 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2095 * Skip loopback devices if loopback_scope
2100 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2101 #if defined(__FreeBSD__)
2103 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2104 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2105 &sctp_ifap->address.sin.sin_addr) != 0)) {
2110 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2111 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2112 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2117 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2120 #if defined(__Userspace__)
2121 if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2125 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2129 if (cnt > SCTP_ADDRESS_LIMIT) {
2133 if (cnt > SCTP_ADDRESS_LIMIT) {
2140 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2142 if ((scope->loopback_scope == 0) &&
2143 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2145 * Skip loopback devices if
2146 * loopback_scope not set
2150 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2151 #if defined(__FreeBSD__)
2153 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2154 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2155 &sctp_ifap->address.sin.sin_addr) != 0)) {
2160 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2161 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2162 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2167 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2170 #if defined(__Userspace__)
2171 if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2175 if (sctp_is_address_in_scope(sctp_ifap,
2179 if ((chunk_len != NULL) &&
2180 (padding_len != NULL) &&
2181 (*padding_len > 0)) {
2182 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2183 SCTP_BUF_LEN(m_at) += *padding_len;
2184 *chunk_len += *padding_len;
2187 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2192 /* two from each address */
2195 if (total_count > SCTP_ADDRESS_LIMIT) {
2196 /* No more addresses */
2204 struct sctp_laddr *laddr;
2207 /* First, how many ? */
2208 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2209 if (laddr->ifa == NULL) {
2212 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2213 /* Address being deleted by the system, dont
2217 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2218 /* Address being deleted on this ep
2223 #if defined(__Userspace__)
2224 if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2228 if (sctp_is_address_in_scope(laddr->ifa,
2235 * To get through a NAT we only list addresses if we have
2236 * more than one. That way if you just bind a single address
2237 * we let the source of the init dictate our address.
2241 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2242 if (laddr->ifa == NULL) {
2245 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2248 #if defined(__Userspace__)
2249 if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2253 if (sctp_is_address_in_scope(laddr->ifa,
2257 if ((chunk_len != NULL) &&
2258 (padding_len != NULL) &&
2259 (*padding_len > 0)) {
2260 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2261 SCTP_BUF_LEN(m_at) += *padding_len;
2262 *chunk_len += *padding_len;
2265 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2267 if (cnt >= SCTP_ADDRESS_LIMIT) {
2273 SCTP_IPI_ADDR_RUNLOCK();
2277 static struct sctp_ifa *
2278 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2279 uint8_t dest_is_loop,
2280 uint8_t dest_is_priv,
2283 uint8_t dest_is_global = 0;
2284 /* dest_is_priv is true if destination is a private address */
2285 /* dest_is_loop is true if destination is a loopback addresses */
2288 * Here we determine if its a preferred address. A preferred address
2289 * means it is the same scope or higher scope then the destination.
2290 * L = loopback, P = private, G = global
2291 * -----------------------------------------
2292 * src | dest | result
2293 * ----------------------------------------
2295 * -----------------------------------------
2296 * P | L | yes-v4 no-v6
2297 * -----------------------------------------
2298 * G | L | yes-v4 no-v6
2299 * -----------------------------------------
2301 * -----------------------------------------
2303 * -----------------------------------------
2305 * -----------------------------------------
2307 * -----------------------------------------
2309 * -----------------------------------------
2311 * -----------------------------------------
2314 if (ifa->address.sa.sa_family != fam) {
2315 /* forget mis-matched family */
2318 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2321 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2322 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2323 /* Ok the address may be ok */
2325 if (fam == AF_INET6) {
2326 /* ok to use deprecated addresses? no lets not! */
2327 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2328 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2331 if (ifa->src_is_priv && !ifa->src_is_loop) {
2333 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2337 if (ifa->src_is_glob) {
2339 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2345 /* Now that we know what is what, implement or table
2346 * this could in theory be done slicker (it used to be), but this
2347 * is straightforward and easier to validate :-)
2349 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2350 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2351 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2352 dest_is_loop, dest_is_priv, dest_is_global);
2354 if ((ifa->src_is_loop) && (dest_is_priv)) {
2355 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2358 if ((ifa->src_is_glob) && (dest_is_priv)) {
2359 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2362 if ((ifa->src_is_loop) && (dest_is_global)) {
2363 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2366 if ((ifa->src_is_priv) && (dest_is_global)) {
2367 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2370 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2371 /* its a preferred address */
2375 static struct sctp_ifa *
2376 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2377 uint8_t dest_is_loop,
2378 uint8_t dest_is_priv,
2381 uint8_t dest_is_global = 0;
2384 * Here we determine if its a acceptable address. A acceptable
2385 * address means it is the same scope or higher scope but we can
2386 * allow for NAT which means its ok to have a global dest and a
2389 * L = loopback, P = private, G = global
2390 * -----------------------------------------
2391 * src | dest | result
2392 * -----------------------------------------
2394 * -----------------------------------------
2395 * P | L | yes-v4 no-v6
2396 * -----------------------------------------
2398 * -----------------------------------------
2400 * -----------------------------------------
2402 * -----------------------------------------
2403 * G | P | yes - May not work
2404 * -----------------------------------------
2406 * -----------------------------------------
2407 * P | G | yes - May not work
2408 * -----------------------------------------
2410 * -----------------------------------------
2413 if (ifa->address.sa.sa_family != fam) {
2414 /* forget non matching family */
2415 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2416 ifa->address.sa.sa_family, fam);
2419 /* Ok the address may be ok */
2420 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2421 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2422 dest_is_loop, dest_is_priv);
2423 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2427 if (fam == AF_INET6) {
2428 /* ok to use deprecated addresses? */
2429 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2432 if (ifa->src_is_priv) {
2433 /* Special case, linklocal to loop */
2440 * Now that we know what is what, implement our table.
2441 * This could in theory be done slicker (it used to be), but this
2442 * is straightforward and easier to validate :-)
2444 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2447 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2450 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2453 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2456 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2457 /* its an acceptable address */
2462 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2464 struct sctp_laddr *laddr;
2467 /* There are no restrictions, no TCB :-) */
2470 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2471 if (laddr->ifa == NULL) {
2472 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2476 if (laddr->ifa == ifa) {
2477 /* Yes it is on the list */
2486 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2488 struct sctp_laddr *laddr;
2492 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2493 if (laddr->ifa == NULL) {
2494 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2498 if ((laddr->ifa == ifa) && laddr->action == 0)
2507 static struct sctp_ifa *
2508 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2511 int non_asoc_addr_ok,
2512 uint8_t dest_is_priv,
2513 uint8_t dest_is_loop,
2516 struct sctp_laddr *laddr, *starting_point;
2519 struct sctp_ifn *sctp_ifn;
2520 struct sctp_ifa *sctp_ifa, *sifa;
2521 struct sctp_vrf *vrf;
2524 vrf = sctp_find_vrf(vrf_id);
2528 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2529 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2530 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2532 * first question, is the ifn we will emit on in our list, if so, we
2533 * want such an address. Note that we first looked for a
2534 * preferred address.
2537 /* is a preferred one on the interface we route out? */
2538 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2539 #if defined(__FreeBSD__)
2541 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2542 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2543 &sctp_ifa->address.sin.sin_addr) != 0)) {
2548 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2549 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2550 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2555 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2556 (non_asoc_addr_ok == 0))
2558 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2563 if (sctp_is_addr_in_ep(inp, sifa)) {
2564 atomic_add_int(&sifa->refcount, 1);
2570 * ok, now we now need to find one on the list of the addresses.
2571 * We can't get one on the emitting interface so let's find first
2572 * a preferred one. If not that an acceptable one otherwise...
2575 starting_point = inp->next_addr_touse;
2577 if (inp->next_addr_touse == NULL) {
2578 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2581 for (laddr = inp->next_addr_touse; laddr;
2582 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2583 if (laddr->ifa == NULL) {
2584 /* address has been removed */
2587 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2588 /* address is being deleted */
2591 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2595 atomic_add_int(&sifa->refcount, 1);
2598 if (resettotop == 0) {
2599 inp->next_addr_touse = NULL;
2603 inp->next_addr_touse = starting_point;
2606 if (inp->next_addr_touse == NULL) {
2607 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2611 /* ok, what about an acceptable address in the inp */
2612 for (laddr = inp->next_addr_touse; laddr;
2613 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2614 if (laddr->ifa == NULL) {
2615 /* address has been removed */
2618 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2619 /* address is being deleted */
2622 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2626 atomic_add_int(&sifa->refcount, 1);
2629 if (resettotop == 0) {
2630 inp->next_addr_touse = NULL;
2631 goto once_again_too;
2635 * no address bound can be a source for the destination we are in
2643 static struct sctp_ifa *
2644 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2645 struct sctp_tcb *stcb,
2648 uint8_t dest_is_priv,
2649 uint8_t dest_is_loop,
2650 int non_asoc_addr_ok,
2653 struct sctp_laddr *laddr, *starting_point;
2655 struct sctp_ifn *sctp_ifn;
2656 struct sctp_ifa *sctp_ifa, *sifa;
2657 uint8_t start_at_beginning = 0;
2658 struct sctp_vrf *vrf;
2662 * first question, is the ifn we will emit on in our list, if so, we
2665 vrf = sctp_find_vrf(vrf_id);
2669 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2670 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2671 sctp_ifn = sctp_find_ifn( ifn, ifn_index);
2674 * first question, is the ifn we will emit on in our list? If so,
2675 * we want that one. First we look for a preferred. Second, we go
2676 * for an acceptable.
2679 /* first try for a preferred address on the ep */
2680 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2681 #if defined(__FreeBSD__)
2683 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2684 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2685 &sctp_ifa->address.sin.sin_addr) != 0)) {
2690 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2691 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2692 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2697 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2699 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2700 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2703 if (((non_asoc_addr_ok == 0) &&
2704 (sctp_is_addr_restricted(stcb, sifa))) ||
2705 (non_asoc_addr_ok &&
2706 (sctp_is_addr_restricted(stcb, sifa)) &&
2707 (!sctp_is_addr_pending(stcb, sifa)))) {
2708 /* on the no-no list */
2711 atomic_add_int(&sifa->refcount, 1);
2715 /* next try for an acceptable address on the ep */
2716 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2717 #if defined(__FreeBSD__)
2719 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2720 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2721 &sctp_ifa->address.sin.sin_addr) != 0)) {
2726 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2727 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2728 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2733 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2735 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2736 sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2739 if (((non_asoc_addr_ok == 0) &&
2740 (sctp_is_addr_restricted(stcb, sifa))) ||
2741 (non_asoc_addr_ok &&
2742 (sctp_is_addr_restricted(stcb, sifa)) &&
2743 (!sctp_is_addr_pending(stcb, sifa)))) {
2744 /* on the no-no list */
2747 atomic_add_int(&sifa->refcount, 1);
2754 * if we can't find one like that then we must look at all
2755 * addresses bound to pick one at first preferable then
2756 * secondly acceptable.
2758 starting_point = stcb->asoc.last_used_address;
2760 if (stcb->asoc.last_used_address == NULL) {
2761 start_at_beginning = 1;
2762 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2764 /* search beginning with the last used address */
2765 for (laddr = stcb->asoc.last_used_address; laddr;
2766 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2767 if (laddr->ifa == NULL) {
2768 /* address has been removed */
2771 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2772 /* address is being deleted */
2775 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2778 if (((non_asoc_addr_ok == 0) &&
2779 (sctp_is_addr_restricted(stcb, sifa))) ||
2780 (non_asoc_addr_ok &&
2781 (sctp_is_addr_restricted(stcb, sifa)) &&
2782 (!sctp_is_addr_pending(stcb, sifa)))) {
2783 /* on the no-no list */
2786 stcb->asoc.last_used_address = laddr;
2787 atomic_add_int(&sifa->refcount, 1);
2790 if (start_at_beginning == 0) {
2791 stcb->asoc.last_used_address = NULL;
2792 goto sctp_from_the_top;
2794 /* now try for any higher scope than the destination */
2795 stcb->asoc.last_used_address = starting_point;
2796 start_at_beginning = 0;
2798 if (stcb->asoc.last_used_address == NULL) {
2799 start_at_beginning = 1;
2800 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2802 /* search beginning with the last used address */
2803 for (laddr = stcb->asoc.last_used_address; laddr;
2804 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2805 if (laddr->ifa == NULL) {
2806 /* address has been removed */
2809 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2810 /* address is being deleted */
2813 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2817 if (((non_asoc_addr_ok == 0) &&
2818 (sctp_is_addr_restricted(stcb, sifa))) ||
2819 (non_asoc_addr_ok &&
2820 (sctp_is_addr_restricted(stcb, sifa)) &&
2821 (!sctp_is_addr_pending(stcb, sifa)))) {
2822 /* on the no-no list */
2825 stcb->asoc.last_used_address = laddr;
2826 atomic_add_int(&sifa->refcount, 1);
2829 if (start_at_beginning == 0) {
2830 stcb->asoc.last_used_address = NULL;
2831 goto sctp_from_the_top2;
2836 static struct sctp_ifa *
2837 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2838 #if defined(__FreeBSD__)
2839 struct sctp_inpcb *inp,
2841 struct sctp_inpcb *inp SCTP_UNUSED,
2843 struct sctp_tcb *stcb,
2844 int non_asoc_addr_ok,
2845 uint8_t dest_is_loop,
2846 uint8_t dest_is_priv,
2852 struct sctp_ifa *ifa, *sifa;
2853 int num_eligible_addr = 0;
2855 #ifdef SCTP_EMBEDDED_V6_SCOPE
2856 struct sockaddr_in6 sin6, lsa6;
2858 if (fam == AF_INET6) {
2859 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2861 (void)sa6_recoverscope(&sin6);
2863 (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2864 #endif /* SCTP_KAME */
2866 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2868 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2869 #if defined(__FreeBSD__)
2871 if ((ifa->address.sa.sa_family == AF_INET) &&
2872 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2873 &ifa->address.sin.sin_addr) != 0)) {
2878 if ((ifa->address.sa.sa_family == AF_INET6) &&
2879 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2880 &ifa->address.sin6.sin6_addr) != 0)) {
2885 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2886 (non_asoc_addr_ok == 0))
2888 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2893 if (fam == AF_INET6 &&
2895 sifa->src_is_loop && sifa->src_is_priv) {
2896 /* don't allow fe80::1 to be a src on loop ::1, we don't list it
2897 * to the peer so we will get an abort.
2901 #ifdef SCTP_EMBEDDED_V6_SCOPE
2902 if (fam == AF_INET6 &&
2903 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2904 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2905 /* link-local <-> link-local must belong to the same scope. */
2906 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2908 (void)sa6_recoverscope(&lsa6);
2910 (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2911 #endif /* SCTP_KAME */
2912 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2916 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2919 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2920 /* Check if the IPv6 address matches to next-hop.
2921 In the mobile case, old IPv6 address may be not deleted
2922 from the interface. Then, the interface has previous and
2923 new addresses. We should use one corresponding to the
2924 next-hop. (by micchie)
2927 if (stcb && fam == AF_INET6 &&
2928 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2929 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2936 /* Avoid topologically incorrect IPv4 address */
2937 if (stcb && fam == AF_INET &&
2938 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2939 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2946 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2949 if (((non_asoc_addr_ok == 0) &&
2950 (sctp_is_addr_restricted(stcb, sifa))) ||
2951 (non_asoc_addr_ok &&
2952 (sctp_is_addr_restricted(stcb, sifa)) &&
2953 (!sctp_is_addr_pending(stcb, sifa)))) {
2955 * It is restricted for some reason..
2956 * probably not yet added.
2961 if (num_eligible_addr >= addr_wanted) {
2964 num_eligible_addr++;
2971 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2972 #if defined(__FreeBSD__)
2973 struct sctp_inpcb *inp,
2975 struct sctp_inpcb *inp SCTP_UNUSED,
2977 struct sctp_tcb *stcb,
2978 int non_asoc_addr_ok,
2979 uint8_t dest_is_loop,
2980 uint8_t dest_is_priv,
2983 struct sctp_ifa *ifa, *sifa;
2984 int num_eligible_addr = 0;
2986 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2987 #if defined(__FreeBSD__)
2989 if ((ifa->address.sa.sa_family == AF_INET) &&
2990 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2991 &ifa->address.sin.sin_addr) != 0)) {
2996 if ((ifa->address.sa.sa_family == AF_INET6) &&
2998 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2999 &ifa->address.sin6.sin6_addr) != 0)) {
3004 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3005 (non_asoc_addr_ok == 0)) {
3008 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
3014 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
3017 if (((non_asoc_addr_ok == 0) &&
3018 (sctp_is_addr_restricted(stcb, sifa))) ||
3019 (non_asoc_addr_ok &&
3020 (sctp_is_addr_restricted(stcb, sifa)) &&
3021 (!sctp_is_addr_pending(stcb, sifa)))) {
3023 * It is restricted for some reason..
3024 * probably not yet added.
3029 num_eligible_addr++;
3031 return (num_eligible_addr);
3034 static struct sctp_ifa *
3035 sctp_choose_boundall(struct sctp_inpcb *inp,
3036 struct sctp_tcb *stcb,
3037 struct sctp_nets *net,
3040 uint8_t dest_is_priv,
3041 uint8_t dest_is_loop,
3042 int non_asoc_addr_ok,
3045 int cur_addr_num = 0, num_preferred = 0;
3047 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
3048 struct sctp_ifa *sctp_ifa, *sifa;
3050 struct sctp_vrf *vrf;
3056 * For boundall we can use any address in the association.
3057 * If non_asoc_addr_ok is set we can use any address (at least in
3058 * theory). So we look for preferred addresses first. If we find one,
3059 * we use it. Otherwise we next try to get an address on the
3060 * interface, which we should be able to do (unless non_asoc_addr_ok
3061 * is false and we are routed out that way). In these cases where we
3062 * can't use the address of the interface we go through all the
3063 * ifn's looking for an address we can use and fill that in. Punting
3064 * means we send back address 0, which will probably cause problems
3065 * actually since then IP will fill in the address of the route ifn,
3066 * which means we probably already rejected it.. i.e. here comes an
3069 vrf = sctp_find_vrf(vrf_id);
3073 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3074 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
3075 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
3076 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
3077 if (sctp_ifn == NULL) {
3078 /* ?? We don't have this guy ?? */
3079 SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
3080 goto bound_all_plan_b;
3082 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
3083 ifn_index, sctp_ifn->ifn_name);
3086 cur_addr_num = net->indx_of_eligible_next_to_use;
3088 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3093 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3094 num_preferred, sctp_ifn->ifn_name);
3095 if (num_preferred == 0) {
3097 * no eligible addresses, we must use some other interface
3098 * address if we can find one.
3100 goto bound_all_plan_b;
3103 * Ok we have num_eligible_addr set with how many we can use, this
3104 * may vary from call to call due to addresses being deprecated
3107 if (cur_addr_num >= num_preferred) {
3111 * select the nth address from the list (where cur_addr_num is the
3112 * nth) and 0 is the first one, 1 is the second one etc...
3114 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3116 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3117 dest_is_priv, cur_addr_num, fam, ro);
3119 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3121 atomic_add_int(&sctp_ifa->refcount, 1);
3123 /* save off where the next one we will want */
3124 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3129 * plan_b: Look at all interfaces and find a preferred address. If
3130 * no preferred fall through to plan_c.
3133 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3134 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3135 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3136 sctp_ifn->ifn_name);
3137 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3138 /* wrong base scope */
3139 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3142 if ((sctp_ifn == looked_at) && looked_at) {
3143 /* already looked at this guy */
3144 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3147 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3148 dest_is_loop, dest_is_priv, fam);
3149 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3150 "Found ifn:%p %d preferred source addresses\n",
3151 ifn, num_preferred);
3152 if (num_preferred == 0) {
3153 /* None on this interface. */
3154 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
3157 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3158 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3159 num_preferred, (void *)sctp_ifn, cur_addr_num);
3162 * Ok we have num_eligible_addr set with how many we can
3163 * use, this may vary from call to call due to addresses
3164 * being deprecated etc..
3166 if (cur_addr_num >= num_preferred) {
3169 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3170 dest_is_priv, cur_addr_num, fam, ro);
3174 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3175 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3177 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3178 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3179 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3180 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3182 atomic_add_int(&sifa->refcount, 1);
3186 again_with_private_addresses_allowed:
3188 /* plan_c: do we have an acceptable address on the emit interface */
3190 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3191 if (emit_ifn == NULL) {
3192 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3195 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3196 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3197 #if defined(__FreeBSD__)
3199 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3200 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3201 &sctp_ifa->address.sin.sin_addr) != 0)) {
3202 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3207 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3208 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3209 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3210 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3215 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3216 (non_asoc_addr_ok == 0)) {
3217 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3220 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3223 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3227 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3228 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3232 if (((non_asoc_addr_ok == 0) &&
3233 (sctp_is_addr_restricted(stcb, sifa))) ||
3234 (non_asoc_addr_ok &&
3235 (sctp_is_addr_restricted(stcb, sifa)) &&
3236 (!sctp_is_addr_pending(stcb, sifa)))) {
3238 * It is restricted for some
3239 * reason.. probably not yet added.
3241 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3246 SCTP_PRINTF("Stcb is null - no print\n");
3248 atomic_add_int(&sifa->refcount, 1);
3253 * plan_d: We are in trouble. No preferred address on the emit
3254 * interface. And not even a preferred address on all interfaces.
3255 * Go out and see if we can find an acceptable address somewhere
3256 * amongst all interfaces.
3258 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3259 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3260 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3261 /* wrong base scope */
3264 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3265 #if defined(__FreeBSD__)
3267 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3268 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3269 &sctp_ifa->address.sin.sin_addr) != 0)) {
3274 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3275 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3276 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3281 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3282 (non_asoc_addr_ok == 0))
3284 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3290 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3294 if (((non_asoc_addr_ok == 0) &&
3295 (sctp_is_addr_restricted(stcb, sifa))) ||
3296 (non_asoc_addr_ok &&
3297 (sctp_is_addr_restricted(stcb, sifa)) &&
3298 (!sctp_is_addr_pending(stcb, sifa)))) {
3300 * It is restricted for some
3301 * reason.. probably not yet added.
3311 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3312 stcb->asoc.scope.ipv4_local_scope = 1;
3314 goto again_with_private_addresses_allowed;
3315 } else if (retried == 1) {
3316 stcb->asoc.scope.ipv4_local_scope = 0;
3323 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3324 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3325 /* wrong base scope */
3328 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3329 struct sctp_ifa *tmp_sifa;
3331 #if defined(__FreeBSD__)
3333 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3334 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3335 &sctp_ifa->address.sin.sin_addr) != 0)) {
3340 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3341 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3342 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3347 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3348 (non_asoc_addr_ok == 0))
3350 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3353 if (tmp_sifa == NULL) {
3356 if (tmp_sifa == sifa) {
3360 if (sctp_is_address_in_scope(tmp_sifa,
3361 &stcb->asoc.scope, 0) == 0) {
3364 if (((non_asoc_addr_ok == 0) &&
3365 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3366 (non_asoc_addr_ok &&
3367 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3368 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3370 * It is restricted for some
3371 * reason.. probably not yet added.
3376 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3377 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3378 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3383 atomic_add_int(&sifa->refcount, 1);
3391 /* tcb may be NULL */
3393 sctp_source_address_selection(struct sctp_inpcb *inp,
3394 struct sctp_tcb *stcb,
3396 struct sctp_nets *net,
3397 int non_asoc_addr_ok, uint32_t vrf_id)
3399 struct sctp_ifa *answer;
3400 uint8_t dest_is_priv, dest_is_loop;
3403 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3406 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3410 * Rules: - Find the route if needed, cache if I can. - Look at
3411 * interface address in route, Is it in the bound list. If so we
3412 * have the best source. - If not we must rotate amongst the
3417 * Do we need to pay attention to scope. We can have a private address
3418 * or a global address we are sourcing or sending to. So if we draw
3420 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3422 * ------------------------------------------
3423 * source * dest * result
3424 * -----------------------------------------
3425 * <a> Private * Global * NAT
3426 * -----------------------------------------
3427 * <b> Private * Private * No problem
3428 * -----------------------------------------
3429 * <c> Global * Private * Huh, How will this work?
3430 * -----------------------------------------
3431 * <d> Global * Global * No Problem
3432 *------------------------------------------
3433 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3435 *------------------------------------------
3436 * source * dest * result
3437 * -----------------------------------------
3438 * <a> Linklocal * Global *
3439 * -----------------------------------------
3440 * <b> Linklocal * Linklocal * No problem
3441 * -----------------------------------------
3442 * <c> Global * Linklocal * Huh, How will this work?
3443 * -----------------------------------------
3444 * <d> Global * Global * No Problem
3445 *------------------------------------------
3446 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3448 * And then we add to that what happens if there are multiple addresses
3449 * assigned to an interface. Remember the ifa on a ifn is a linked
3450 * list of addresses. So one interface can have more than one IP
3451 * address. What happens if we have both a private and a global
3452 * address? Do we then use context of destination to sort out which
3453 * one is best? And what about NAT's sending P->G may get you a NAT
3454 * translation, or should you select the G thats on the interface in
3459 * - count the number of addresses on the interface.
3460 * - if it is one, no problem except case <c>.
3461 * For <a> we will assume a NAT out there.
3462 * - if there are more than one, then we need to worry about scope P
3463 * or G. We should prefer G -> G and P -> P if possible.
3464 * Then as a secondary fall back to mixed types G->P being a last
3466 * - The above all works for bound all, but bound specific we need to
3467 * use the same concept but instead only consider the bound
3468 * addresses. If the bound set is NOT assigned to the interface then
3469 * we must use rotation amongst the bound addresses..
3471 if (ro->ro_rt == NULL) {
3473 * Need a route to cache.
3475 SCTP_RTALLOC(ro, vrf_id);
3477 if (ro->ro_rt == NULL) {
3480 fam = ro->ro_dst.sa_family;
3481 dest_is_priv = dest_is_loop = 0;
3482 /* Setup our scopes for the destination */
3486 /* Scope based on outbound address */
3487 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3490 /* mark it as local */
3491 net->addr_is_local = 1;
3493 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3500 /* Scope based on outbound address */
3501 #if defined(__Userspace_os_Windows)
3502 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3504 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3505 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3508 * If the address is a loopback address, which
3509 * consists of "::1" OR "fe80::1%lo0", we are loopback
3510 * scope. But we don't use dest_is_priv (link local
3515 /* mark it as local */
3516 net->addr_is_local = 1;
3518 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3524 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3525 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3526 SCTP_IPI_ADDR_RLOCK();
3527 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3531 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3532 dest_is_priv, dest_is_loop,
3533 non_asoc_addr_ok, fam);
3534 SCTP_IPI_ADDR_RUNLOCK();
3541 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3542 vrf_id, dest_is_priv,
3544 non_asoc_addr_ok, fam);
3546 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3551 SCTP_IPI_ADDR_RUNLOCK();
3556 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3558 #if defined(__Userspace_os_Windows)
3563 int tlen, at, found;
3564 struct sctp_sndinfo sndinfo;
3565 struct sctp_prinfo prinfo;
3566 struct sctp_authinfo authinfo;
3568 tlen = SCTP_BUF_LEN(control);
3572 * Independent of how many mbufs, find the c_type inside the control
3573 * structure and copy out the data.
3576 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3577 /* There is not enough room for one more. */
3580 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3581 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3582 /* We dont't have a complete CMSG header. */
3585 if (((int)cmh.cmsg_len + at) > tlen) {
3586 /* We don't have the complete CMSG. */
3589 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3590 ((c_type == cmh.cmsg_type) ||
3591 ((c_type == SCTP_SNDRCV) &&
3592 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3593 (cmh.cmsg_type == SCTP_PRINFO) ||
3594 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3595 if (c_type == cmh.cmsg_type) {
3596 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3599 /* It is exactly what we want. Copy it out. */
3600 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
3603 struct sctp_sndrcvinfo *sndrcvinfo;
3605 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3607 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3610 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3612 switch (cmh.cmsg_type) {
3614 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3617 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3618 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3619 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3620 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3621 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3622 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3625 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3628 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3629 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3630 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3632 sndrcvinfo->sinfo_timetolive = 0;
3634 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3637 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3640 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3641 sndrcvinfo->sinfo_keynumber_valid = 1;
3642 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3650 at += CMSG_ALIGN(cmh.cmsg_len);
3656 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3658 #if defined(__Userspace_os_Windows)
3664 struct sctp_initmsg initmsg;
3666 struct sockaddr_in sin;
3669 struct sockaddr_in6 sin6;
3672 tlen = SCTP_BUF_LEN(control);
3675 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3676 /* There is not enough room for one more. */
3680 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3681 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3682 /* We dont't have a complete CMSG header. */
3686 if (((int)cmh.cmsg_len + at) > tlen) {
3687 /* We don't have the complete CMSG. */
3691 if (cmh.cmsg_level == IPPROTO_SCTP) {
3692 switch (cmh.cmsg_type) {
3694 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3698 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3699 if (initmsg.sinit_max_attempts)
3700 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3701 if (initmsg.sinit_num_ostreams)
3702 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3703 if (initmsg.sinit_max_instreams)
3704 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3705 if (initmsg.sinit_max_init_timeo)
3706 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3707 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3708 struct sctp_stream_out *tmp_str;
3711 /* Default is NOT correct */
3712 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3713 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3714 SCTP_TCB_UNLOCK(stcb);
3715 SCTP_MALLOC(tmp_str,
3716 struct sctp_stream_out *,
3717 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3719 SCTP_TCB_LOCK(stcb);
3720 if (tmp_str != NULL) {
3721 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3722 stcb->asoc.strmout = tmp_str;
3723 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3725 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3727 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3728 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3729 stcb->asoc.strmout[i].chunks_on_queues = 0;
3730 stcb->asoc.strmout[i].next_sequence_send = 0;
3731 stcb->asoc.strmout[i].stream_no = i;
3732 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3733 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3738 case SCTP_DSTADDRV4:
3739 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3743 memset(&sin, 0, sizeof(struct sockaddr_in));
3744 sin.sin_family = AF_INET;
3746 sin.sin_len = sizeof(struct sockaddr_in);
3748 sin.sin_port = stcb->rport;
3749 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3750 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3751 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3752 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3756 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3757 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3764 case SCTP_DSTADDRV6:
3765 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3769 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3770 sin6.sin6_family = AF_INET6;
3771 #ifdef HAVE_SIN6_LEN
3772 sin6.sin6_len = sizeof(struct sockaddr_in6);
3774 sin6.sin6_port = stcb->rport;
3775 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3776 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3777 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3782 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3783 in6_sin6_2_sin(&sin, &sin6);
3784 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3785 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3786 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3790 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3791 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3797 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3798 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3808 at += CMSG_ALIGN(cmh.cmsg_len);
3813 static struct sctp_tcb *
3814 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3816 struct mbuf *control,
3817 struct sctp_nets **net_p,
3820 #if defined(__Userspace_os_Windows)
3826 struct sctp_tcb *stcb;
3827 struct sockaddr *addr;
3829 struct sockaddr_in sin;
3832 struct sockaddr_in6 sin6;
3835 tlen = SCTP_BUF_LEN(control);
3838 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3839 /* There is not enough room for one more. */
3843 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3844 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3845 /* We dont't have a complete CMSG header. */
3849 if (((int)cmh.cmsg_len + at) > tlen) {
3850 /* We don't have the complete CMSG. */
3854 if (cmh.cmsg_level == IPPROTO_SCTP) {
3855 switch (cmh.cmsg_type) {
3857 case SCTP_DSTADDRV4:
3858 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3862 memset(&sin, 0, sizeof(struct sockaddr_in));
3863 sin.sin_family = AF_INET;
3865 sin.sin_len = sizeof(struct sockaddr_in);
3867 sin.sin_port = port;
3868 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3869 addr = (struct sockaddr *)&sin;
3873 case SCTP_DSTADDRV6:
3874 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3878 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3879 sin6.sin6_family = AF_INET6;
3880 #ifdef HAVE_SIN6_LEN
3881 sin6.sin6_len = sizeof(struct sockaddr_in6);
3883 sin6.sin6_port = port;
3884 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3886 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3887 in6_sin6_2_sin(&sin, &sin6);
3888 addr = (struct sockaddr *)&sin;
3891 addr = (struct sockaddr *)&sin6;
3899 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3905 at += CMSG_ALIGN(cmh.cmsg_len);
3910 static struct mbuf *
3911 sctp_add_cookie(struct mbuf *init, int init_offset,
3912 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3914 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3915 struct sctp_state_cookie *stc;
3916 struct sctp_paramhdr *ph;
3921 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3922 sizeof(struct sctp_paramhdr)), 0,
3923 M_NOWAIT, 1, MT_DATA);
3927 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3928 if (copy_init == NULL) {
3932 #ifdef SCTP_MBUF_LOGGING
3933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3936 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
3937 if (SCTP_BUF_IS_EXTENDED(mat)) {
3938 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3943 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3945 if (copy_initack == NULL) {
3947 sctp_m_freem(copy_init);
3950 #ifdef SCTP_MBUF_LOGGING
3951 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3954 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
3955 if (SCTP_BUF_IS_EXTENDED(mat)) {
3956 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3961 /* easy side we just drop it on the end */
3962 ph = mtod(mret, struct sctp_paramhdr *);
3963 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3964 sizeof(struct sctp_paramhdr);
3965 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3966 sizeof(struct sctp_paramhdr));
3967 ph->param_type = htons(SCTP_STATE_COOKIE);
3968 ph->param_length = 0; /* fill in at the end */
3969 /* Fill in the stc cookie data */
3970 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3972 /* tack the INIT and then the INIT-ACK onto the chain */
3974 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3975 cookie_sz += SCTP_BUF_LEN(m_at);
3976 if (SCTP_BUF_NEXT(m_at) == NULL) {
3977 SCTP_BUF_NEXT(m_at) = copy_init;
3981 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3982 cookie_sz += SCTP_BUF_LEN(m_at);
3983 if (SCTP_BUF_NEXT(m_at) == NULL) {
3984 SCTP_BUF_NEXT(m_at) = copy_initack;
3988 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3989 cookie_sz += SCTP_BUF_LEN(m_at);
3990 if (SCTP_BUF_NEXT(m_at) == NULL) {
3994 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3996 /* no space, so free the entire chain */
4000 SCTP_BUF_LEN(sig) = 0;
4001 SCTP_BUF_NEXT(m_at) = sig;
4003 foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset);
4004 memset(foo, 0, SCTP_SIGNATURE_SIZE);
4006 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
4007 cookie_sz += SCTP_SIGNATURE_SIZE;
4008 ph->param_length = htons(cookie_sz);
4014 sctp_get_ect(struct sctp_tcb *stcb)
4016 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
4017 return (SCTP_ECT0_BIT);
4023 #if defined(INET) || defined(INET6)
4025 sctp_handle_no_route(struct sctp_tcb *stcb,
4026 struct sctp_nets *net,
4029 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
4032 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
4033 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
4034 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
4035 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
4036 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
4037 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
4041 net->dest_state &= ~SCTP_ADDR_REACHABLE;
4042 net->dest_state &= ~SCTP_ADDR_PF;
4046 if (net == stcb->asoc.primary_destination) {
4047 /* need a new primary */
4048 struct sctp_nets *alt;
4050 alt = sctp_find_alternate_net(stcb, net, 0);
4052 if (stcb->asoc.alternate) {
4053 sctp_free_remote_addr(stcb->asoc.alternate);
4055 stcb->asoc.alternate = alt;
4056 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
4057 if (net->ro._s_addr) {
4058 sctp_free_ifa(net->ro._s_addr);
4059 net->ro._s_addr = NULL;
4061 net->src_addr_selected = 0;
4070 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
4071 struct sctp_tcb *stcb, /* may be NULL */
4072 struct sctp_nets *net,
4073 struct sockaddr *to,
4075 uint32_t auth_offset,
4076 struct sctp_auth_chunk *auth,
4077 uint16_t auth_keyid,
4078 int nofragment_flag,
4085 union sctp_sockstore *over_addr,
4086 #if defined(__FreeBSD__)
4087 uint8_t use_mflowid, uint32_t mflowid,
4089 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4090 int so_locked SCTP_UNUSED
4095 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4098 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4099 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4100 * - fill in the HMAC digest of any AUTH chunk in the packet.
4101 * - calculate and fill in the SCTP checksum.
4102 * - prepend an IP address header.
4103 * - if boundall use INADDR_ANY.
4104 * - if boundspecific do source address selection.
4105 * - set fragmentation option for ipV4.
4106 * - On return from IP output, check/adjust mtu size of output
4107 * interface and smallest_mtu size as well.
4109 /* Will need ifdefs around this */
4111 pakhandle_type o_pak;
4114 struct sctphdr *sctphdr;
4117 #if defined(INET) || defined(INET6)
4120 #if defined(INET) || defined(INET6)
4121 #if !defined(__Panda__)
4124 sctp_route_t *ro = NULL;
4125 struct udphdr *udp = NULL;
4128 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4129 struct socket *so = NULL;
4132 #if defined(__APPLE__)
4134 sctp_lock_assert(SCTP_INP_SO(inp));
4135 SCTP_TCB_LOCK_ASSERT(stcb);
4137 sctp_unlock_assert(SCTP_INP_SO(inp));
4140 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4141 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4145 #if defined(INET) || defined(INET6)
4147 vrf_id = stcb->asoc.vrf_id;
4149 vrf_id = inp->def_vrf_id;
4152 /* fill in the HMAC digest for any AUTH chunk in the packet */
4153 if ((auth != NULL) && (stcb != NULL)) {
4154 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4158 tos_value = net->dscp;
4160 tos_value = stcb->asoc.default_dscp;
4162 tos_value = inp->sctp_ep.default_dscp;
4165 switch (to->sa_family) {
4169 struct ip *ip = NULL;
4170 sctp_route_t iproute;
4173 len = sizeof(struct ip) + sizeof(struct sctphdr);
4175 len += sizeof(struct udphdr);
4177 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4180 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4183 SCTP_ALIGN_TO_END(newm, len);
4184 SCTP_BUF_LEN(newm) = len;
4185 SCTP_BUF_NEXT(newm) = m;
4187 #if defined(__FreeBSD__)
4190 if (net->flowidset == 0) {
4191 panic("Flow ID not set");
4194 m->m_pkthdr.flowid = net->flowid;
4195 m->m_flags |= M_FLOWID;
4197 if (use_mflowid != 0) {
4198 m->m_pkthdr.flowid = mflowid;
4199 m->m_flags |= M_FLOWID;
4203 packet_length = sctp_calculate_len(m);
4204 ip = mtod(m, struct ip *);
4205 ip->ip_v = IPVERSION;
4206 ip->ip_hl = (sizeof(struct ip) >> 2);
4207 if (tos_value == 0) {
4209 * This means especially, that it is not set at the
4210 * SCTP layer. So use the value from the IP layer.
4212 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4213 tos_value = inp->ip_inp.inp.inp_ip_tos;
4215 tos_value = inp->inp_ip_tos;
4220 tos_value |= sctp_get_ect(stcb);
4222 if ((nofragment_flag) && (port == 0)) {
4223 #if defined(__FreeBSD__)
4224 #if __FreeBSD_version >= 1000000
4225 ip->ip_off = htons(IP_DF);
4229 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__)
4232 ip->ip_off = htons(IP_DF);
4235 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4236 ip->ip_off = htons(0);
4241 #if defined(__FreeBSD__)
4242 /* FreeBSD has a function for ip_id's */
4243 ip->ip_id = ip_newid();
4244 #elif defined(RANDOM_IP_ID)
4245 /* Apple has RANDOM_IP_ID switch */
4246 ip->ip_id = htons(ip_randomid());
4247 #elif defined(__Userspace__)
4248 ip->ip_id = htons(SCTP_IP_ID(inp)++);
4250 ip->ip_id = SCTP_IP_ID(inp)++;
4253 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4254 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4256 ip->ip_ttl = inp->inp_ip_ttl;
4258 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4259 ip->ip_len = htons(packet_length);
4261 ip->ip_len = packet_length;
4263 ip->ip_tos = tos_value;
4265 ip->ip_p = IPPROTO_UDP;
4267 ip->ip_p = IPPROTO_SCTP;
4272 memset(&iproute, 0, sizeof(iproute));
4274 memcpy(&ro->ro_dst, to, to->sa_len);
4276 memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4279 ro = (sctp_route_t *)&net->ro;
4281 /* Now the address selection part */
4282 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4284 /* call the routine to select the src address */
4285 if (net && out_of_asoc_ok == 0) {
4286 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4287 sctp_free_ifa(net->ro._s_addr);
4288 net->ro._s_addr = NULL;
4289 net->src_addr_selected = 0;
4295 if (net->src_addr_selected == 0) {
4296 /* Cache the source address */
4297 net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4300 net->src_addr_selected = 1;
4302 if (net->ro._s_addr == NULL) {
4303 /* No route to host */
4304 net->src_addr_selected = 0;
4305 sctp_handle_no_route(stcb, net, so_locked);
4306 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4308 return (EHOSTUNREACH);
4310 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4312 if (over_addr == NULL) {
4313 struct sctp_ifa *_lsrc;
4315 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4319 if (_lsrc == NULL) {
4320 sctp_handle_no_route(stcb, net, so_locked);
4321 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4323 return (EHOSTUNREACH);
4325 ip->ip_src = _lsrc->address.sin.sin_addr;
4326 sctp_free_ifa(_lsrc);
4328 ip->ip_src = over_addr->sin.sin_addr;
4329 SCTP_RTALLOC(ro, vrf_id);
4333 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4334 sctp_handle_no_route(stcb, net, so_locked);
4335 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4337 return (EHOSTUNREACH);
4339 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4340 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4341 udp->uh_dport = port;
4342 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4343 #if !defined(__Windows__) && !defined(__Userspace__)
4344 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4346 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4351 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4356 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4358 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4361 sctphdr->src_port = src_port;
4362 sctphdr->dest_port = dest_port;
4363 sctphdr->v_tag = v_tag;
4364 sctphdr->checksum = 0;
4367 * If source address selection fails and we find no route
4368 * then the ip_output should fail as well with a
4369 * NO_ROUTE_TO_HOST type error. We probably should catch
4370 * that somewhere and abort the association right away
4371 * (assuming this is an INIT being sent).
4373 if (ro->ro_rt == NULL) {
4375 * src addr selection failed to find a route (or
4376 * valid source addr), so we can't get there from
4379 sctp_handle_no_route(stcb, net, so_locked);
4380 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4382 return (EHOSTUNREACH);
4384 if (ro != &iproute) {
4385 memcpy(&iproute, ro, sizeof(*ro));
4387 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4388 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4389 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4390 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4391 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4394 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4395 /* failed to prepend data, give up */
4396 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4400 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4402 #if defined(SCTP_WITH_NO_CSUM)
4403 SCTP_STAT_INCR(sctps_sendnocrc);
4405 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4406 SCTP_STAT_INCR(sctps_sendswcrc);
4408 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4410 SCTP_ENABLE_UDP_CSUM(o_pak);
4413 SCTP_ENABLE_UDP_CSUM(o_pak);
4416 #if defined(SCTP_WITH_NO_CSUM)
4417 SCTP_STAT_INCR(sctps_sendnocrc);
4419 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4420 m->m_pkthdr.csum_flags = CSUM_SCTP;
4421 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4422 SCTP_STAT_INCR(sctps_sendhwcrc);
4424 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4425 (stcb) && (stcb->asoc.scope.loopback_scope))) {
4426 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4427 SCTP_STAT_INCR(sctps_sendswcrc);
4429 SCTP_STAT_INCR(sctps_sendnocrc);
4434 #ifdef SCTP_PACKET_LOGGING
4435 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4436 sctp_packet_log(o_pak);
4438 /* send it out. table id is taken from stcb */
4439 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4440 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4441 so = SCTP_INP_SO(inp);
4442 SCTP_SOCKET_UNLOCK(so, 0);
4445 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4446 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4447 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4448 atomic_add_int(&stcb->asoc.refcnt, 1);
4449 SCTP_TCB_UNLOCK(stcb);
4450 SCTP_SOCKET_LOCK(so, 0);
4451 SCTP_TCB_LOCK(stcb);
4452 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4455 SCTP_STAT_INCR(sctps_sendpackets);
4456 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4458 SCTP_STAT_INCR(sctps_senderrors);
4460 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4462 /* free tempy routes */
4463 #if defined(__FreeBSD__) && __FreeBSD_version > 901000
4472 /* PMTU check versus smallest asoc MTU goes here */
4473 if ((ro->ro_rt != NULL) &&
4474 (net->ro._s_addr)) {
4476 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4478 mtu -= sizeof(struct udphdr);
4480 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4481 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4484 } else if (ro->ro_rt == NULL) {
4485 /* route was freed */
4486 if (net->ro._s_addr &&
4487 net->src_addr_selected) {
4488 sctp_free_ifa(net->ro._s_addr);
4489 net->ro._s_addr = NULL;
4491 net->src_addr_selected = 0;
4500 uint32_t flowlabel, flowinfo;
4501 struct ip6_hdr *ip6h;
4502 struct route_in6 ip6route;
4503 #if !(defined(__Panda__) || defined(__Userspace__))
4506 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4508 #ifdef SCTP_EMBEDDED_V6_SCOPE
4509 struct sockaddr_in6 lsa6_storage;
4512 u_short prev_port = 0;
4516 flowlabel = net->flowlabel;
4518 flowlabel = stcb->asoc.default_flowlabel;
4520 flowlabel = inp->sctp_ep.default_flowlabel;
4522 if (flowlabel == 0) {
4524 * This means especially, that it is not set at the
4525 * SCTP layer. So use the value from the IP layer.
4527 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4528 flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
4530 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4533 flowlabel &= 0x000fffff;
4534 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4536 len += sizeof(struct udphdr);
4538 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4541 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4544 SCTP_ALIGN_TO_END(newm, len);
4545 SCTP_BUF_LEN(newm) = len;
4546 SCTP_BUF_NEXT(newm) = m;
4548 #if defined(__FreeBSD__)
4551 if (net->flowidset == 0) {
4552 panic("Flow ID not set");
4555 m->m_pkthdr.flowid = net->flowid;
4556 m->m_flags |= M_FLOWID;
4558 if (use_mflowid != 0) {
4559 m->m_pkthdr.flowid = mflowid;
4560 m->m_flags |= M_FLOWID;
4564 packet_length = sctp_calculate_len(m);
4566 ip6h = mtod(m, struct ip6_hdr *);
4567 /* protect *sin6 from overwrite */
4568 sin6 = (struct sockaddr_in6 *)to;
4572 #ifdef SCTP_EMBEDDED_V6_SCOPE
4573 /* KAME hack: embed scopeid */
4574 #if defined(__APPLE__)
4575 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4576 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4578 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4580 #elif defined(SCTP_KAME)
4581 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4583 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4586 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4589 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4591 memset(&ip6route, 0, sizeof(ip6route));
4592 ro = (sctp_route_t *)&ip6route;
4593 #ifdef HAVE_SIN6_LEN
4594 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4596 memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4599 ro = (sctp_route_t *)&net->ro;
4602 * We assume here that inp_flow is in host byte order within
4605 if (tos_value == 0) {
4607 * This means especially, that it is not set at the
4608 * SCTP layer. So use the value from the IP layer.
4610 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4611 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4612 tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
4614 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4620 tos_value |= sctp_get_ect(stcb);
4624 flowinfo |= tos_value;
4626 flowinfo |= flowlabel;
4627 ip6h->ip6_flow = htonl(flowinfo);
4629 ip6h->ip6_nxt = IPPROTO_UDP;
4631 ip6h->ip6_nxt = IPPROTO_SCTP;
4633 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4634 ip6h->ip6_dst = sin6->sin6_addr;
4637 * Add SRC address selection here: we can only reuse to a
4638 * limited degree the kame src-addr-sel, since we can try
4639 * their selection but it may not be bound.
4641 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4642 lsa6_tmp.sin6_family = AF_INET6;
4643 #ifdef HAVE_SIN6_LEN
4644 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4647 if (net && out_of_asoc_ok == 0) {
4648 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4649 sctp_free_ifa(net->ro._s_addr);
4650 net->ro._s_addr = NULL;
4651 net->src_addr_selected = 0;
4657 if (net->src_addr_selected == 0) {
4658 #ifdef SCTP_EMBEDDED_V6_SCOPE
4659 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4660 /* KAME hack: embed scopeid */
4661 #if defined(__APPLE__)
4662 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4663 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4665 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4667 #elif defined(SCTP_KAME)
4668 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4670 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4673 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4676 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4677 /* Cache the source address */
4678 net->ro._s_addr = sctp_source_address_selection(inp,
4684 #ifdef SCTP_EMBEDDED_V6_SCOPE
4686 (void)sa6_recoverscope(sin6);
4688 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4689 #endif /* SCTP_KAME */
4690 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4691 net->src_addr_selected = 1;
4693 if (net->ro._s_addr == NULL) {
4694 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4695 net->src_addr_selected = 0;
4696 sctp_handle_no_route(stcb, net, so_locked);
4697 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4699 return (EHOSTUNREACH);
4701 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4703 #ifdef SCTP_EMBEDDED_V6_SCOPE
4704 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4705 /* KAME hack: embed scopeid */
4706 #if defined(__APPLE__)
4707 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4708 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4710 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4712 #elif defined(SCTP_KAME)
4713 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4715 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4718 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4721 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4722 if (over_addr == NULL) {
4723 struct sctp_ifa *_lsrc;
4725 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4729 if (_lsrc == NULL) {
4730 sctp_handle_no_route(stcb, net, so_locked);
4731 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4733 return (EHOSTUNREACH);
4735 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4736 sctp_free_ifa(_lsrc);
4738 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4739 SCTP_RTALLOC(ro, vrf_id);
4741 #ifdef SCTP_EMBEDDED_V6_SCOPE
4743 (void)sa6_recoverscope(sin6);
4745 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4746 #endif /* SCTP_KAME */
4747 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4749 lsa6->sin6_port = inp->sctp_lport;
4751 if (ro->ro_rt == NULL) {
4753 * src addr selection failed to find a route (or
4754 * valid source addr), so we can't get there from
4757 sctp_handle_no_route(stcb, net, so_locked);
4758 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4760 return (EHOSTUNREACH);
4762 #ifndef SCOPEDROUTING
4763 #ifdef SCTP_EMBEDDED_V6_SCOPE
4765 * XXX: sa6 may not have a valid sin6_scope_id in the
4766 * non-SCOPEDROUTING case.
4768 bzero(&lsa6_storage, sizeof(lsa6_storage));
4769 lsa6_storage.sin6_family = AF_INET6;
4770 #ifdef HAVE_SIN6_LEN
4771 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4774 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4775 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4777 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4779 #endif /* SCTP_KAME */
4780 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4785 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4786 lsa6_storage.sin6_port = inp->sctp_lport;
4787 lsa6 = &lsa6_storage;
4788 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4789 #endif /* SCOPEDROUTING */
4790 ip6h->ip6_src = lsa6->sin6_addr;
4793 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4794 sctp_handle_no_route(stcb, net, so_locked);
4795 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4797 return (EHOSTUNREACH);
4799 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4800 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4801 udp->uh_dport = port;
4802 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4804 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4806 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4809 sctphdr->src_port = src_port;
4810 sctphdr->dest_port = dest_port;
4811 sctphdr->v_tag = v_tag;
4812 sctphdr->checksum = 0;
4815 * We set the hop limit now since there is a good chance
4816 * that our ro pointer is now filled
4818 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4819 #if !(defined(__Panda__) || defined(__Userspace__))
4820 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4824 /* Copy to be sure something bad is not happening */
4825 sin6->sin6_addr = ip6h->ip6_dst;
4826 lsa6->sin6_addr = ip6h->ip6_src;
4829 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4830 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4831 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4832 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4833 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4835 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4836 /* preserve the port and scope for link local send */
4837 prev_scope = sin6->sin6_scope_id;
4838 prev_port = sin6->sin6_port;
4841 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4842 /* failed to prepend data, give up */
4844 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4847 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4849 #if defined(SCTP_WITH_NO_CSUM)
4850 SCTP_STAT_INCR(sctps_sendnocrc);
4852 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4853 SCTP_STAT_INCR(sctps_sendswcrc);
4855 #if defined(__Windows__)
4857 #elif !defined(__Userspace__)
4858 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4859 udp->uh_sum = 0xffff;
4863 #if defined(SCTP_WITH_NO_CSUM)
4864 SCTP_STAT_INCR(sctps_sendnocrc);
4866 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4867 #if __FreeBSD_version < 900000
4868 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4869 SCTP_STAT_INCR(sctps_sendswcrc);
4871 #if __FreeBSD_version > 901000
4872 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4874 m->m_pkthdr.csum_flags = CSUM_SCTP;
4876 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4877 SCTP_STAT_INCR(sctps_sendhwcrc);
4880 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4881 (stcb) && (stcb->asoc.scope.loopback_scope))) {
4882 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4883 SCTP_STAT_INCR(sctps_sendswcrc);
4885 SCTP_STAT_INCR(sctps_sendnocrc);
4890 /* send it out. table id is taken from stcb */
4891 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4892 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4893 so = SCTP_INP_SO(inp);
4894 SCTP_SOCKET_UNLOCK(so, 0);
4897 #ifdef SCTP_PACKET_LOGGING
4898 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4899 sctp_packet_log(o_pak);
4901 #if !(defined(__Panda__) || defined(__Userspace__))
4902 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4904 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4906 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4907 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4908 atomic_add_int(&stcb->asoc.refcnt, 1);
4909 SCTP_TCB_UNLOCK(stcb);
4910 SCTP_SOCKET_LOCK(so, 0);
4911 SCTP_TCB_LOCK(stcb);
4912 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4916 /* for link local this must be done */
4917 sin6->sin6_scope_id = prev_scope;
4918 sin6->sin6_port = prev_port;
4920 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4921 SCTP_STAT_INCR(sctps_sendpackets);
4922 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4924 SCTP_STAT_INCR(sctps_senderrors);
4927 /* Now if we had a temp route free it */
4928 #if defined(__FreeBSD__) && __FreeBSD_version > 901000
4937 /* PMTU check versus smallest asoc MTU goes here */
4938 if (ro->ro_rt == NULL) {
4939 /* Route was freed */
4940 if (net->ro._s_addr &&
4941 net->src_addr_selected) {
4942 sctp_free_ifa(net->ro._s_addr);
4943 net->ro._s_addr = NULL;
4945 net->src_addr_selected = 0;
4947 if ((ro->ro_rt != NULL) &&
4948 (net->ro._s_addr)) {
4950 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4952 (stcb->asoc.smallest_mtu > mtu)) {
4953 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4956 net->mtu -= sizeof(struct udphdr);
4960 #if !defined(__Panda__) && !defined(__Userspace__)
4962 #if defined(__Windows__)
4963 #define ND_IFINFO(ifp) (ifp)
4964 #define linkmtu if_mtu
4966 if (ND_IFINFO(ifp)->linkmtu &&
4967 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4968 sctp_mtu_size_reset(inp,
4970 ND_IFINFO(ifp)->linkmtu);
4978 #if defined(__Userspace__)
4982 struct sockaddr_conn *sconn;
4985 sconn = (struct sockaddr_conn *)to;
4986 len = sizeof(struct sctphdr);
4987 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4990 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4993 SCTP_ALIGN_TO_END(newm, len);
4994 SCTP_BUF_LEN(newm) = len;
4995 SCTP_BUF_NEXT(newm) = m;
4997 packet_length = sctp_calculate_len(m);
4998 sctphdr = mtod(m, struct sctphdr *);
4999 sctphdr->src_port = src_port;
5000 sctphdr->dest_port = dest_port;
5001 sctphdr->v_tag = v_tag;
5002 sctphdr->checksum = 0;
5003 #if defined(SCTP_WITH_NO_CSUM)
5004 SCTP_STAT_INCR(sctps_sendnocrc);
5006 sctphdr->checksum = sctp_calculate_cksum(m, 0);
5007 SCTP_STAT_INCR(sctps_sendswcrc);
5009 if (tos_value == 0) {
5010 tos_value = inp->ip_inp.inp.inp_ip_tos;
5014 tos_value |= sctp_get_ect(stcb);
5016 /* Don't alloc/free for each packet */
5017 if ((buffer = malloc(packet_length)) != NULL) {
5018 m_copydata(m, 0, packet_length, buffer);
5019 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
5029 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
5030 ((struct sockaddr *)to)->sa_family);
5032 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
5039 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
5040 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5046 struct sctp_nets *net;
5047 struct sctp_init_chunk *init;
5048 struct sctp_supported_addr_param *sup_addr;
5049 struct sctp_adaptation_layer_indication *ali;
5050 struct sctp_supported_chunk_types_param *pr_supported;
5051 struct sctp_paramhdr *ph;
5052 int cnt_inits_to = 0;
5054 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5056 #if defined(__APPLE__)
5058 sctp_lock_assert(SCTP_INP_SO(inp));
5060 sctp_unlock_assert(SCTP_INP_SO(inp));
5063 /* INIT's always go to the primary (and usually ONLY address) */
5064 net = stcb->asoc.primary_destination;
5066 net = TAILQ_FIRST(&stcb->asoc.nets);
5071 /* we confirm any address we send an INIT to */
5072 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5073 (void)sctp_set_primary_addr(stcb, NULL, net);
5075 /* we confirm any address we send an INIT to */
5076 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5078 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
5080 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
5082 * special hook, if we are sending to link local it will not
5083 * show up in our private address count.
5085 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
5089 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5090 /* This case should not happen */
5091 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
5094 /* start the INIT timer */
5095 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
5097 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
5099 /* No memory, INIT timer will re-attempt. */
5100 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
5103 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
5106 * assume peer supports asconf in order to be able to queue
5107 * local address changes while an INIT is in flight and before
5108 * the assoc is established.
5110 stcb->asoc.peer_supports_asconf = 1;
5111 /* Now lets put the chunk header in place */
5112 init = mtod(m, struct sctp_init_chunk *);
5113 /* now the chunk header */
5114 init->ch.chunk_type = SCTP_INITIATION;
5115 init->ch.chunk_flags = 0;
5116 /* fill in later from mbuf we build */
5117 init->ch.chunk_length = 0;
5118 /* place in my tag */
5119 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
5120 /* set up some of the credits. */
5121 init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
5122 SCTP_MINIMAL_RWND));
5123 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
5124 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
5125 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
5127 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
5130 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5131 if (stcb->asoc.scope.ipv4_addr_legal) {
5132 parameter_len += (uint16_t)sizeof(uint16_t);
5134 if (stcb->asoc.scope.ipv6_addr_legal) {
5135 parameter_len += (uint16_t)sizeof(uint16_t);
5137 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
5138 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
5139 sup_addr->ph.param_length = htons(parameter_len);
5141 if (stcb->asoc.scope.ipv4_addr_legal) {
5142 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
5144 if (stcb->asoc.scope.ipv6_addr_legal) {
5145 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
5147 padding_len = 4 - 2 * i;
5148 chunk_len += parameter_len;
5151 /* Adaptation layer indication parameter */
5152 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5153 if (padding_len > 0) {
5154 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5155 chunk_len += padding_len;
5158 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5159 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
5160 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5161 ali->ph.param_length = htons(parameter_len);
5162 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5163 chunk_len += parameter_len;
5166 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
5167 /* Add NAT friendly parameter. */
5168 if (padding_len > 0) {
5169 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5170 chunk_len += padding_len;
5173 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5174 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5175 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5176 ph->param_length = htons(parameter_len);
5177 chunk_len += parameter_len;
5180 /* now any cookie time extensions */
5181 if (stcb->asoc.cookie_preserve_req) {
5182 struct sctp_cookie_perserve_param *cookie_preserve;
5184 if (padding_len > 0) {
5185 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5186 chunk_len += padding_len;
5189 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5190 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5191 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5192 cookie_preserve->ph.param_length = htons(parameter_len);
5193 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5194 stcb->asoc.cookie_preserve_req = 0;
5195 chunk_len += parameter_len;
5199 if (stcb->asoc.ecn_allowed == 1) {
5200 if (padding_len > 0) {
5201 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5202 chunk_len += padding_len;
5205 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5206 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5207 ph->param_type = htons(SCTP_ECN_CAPABLE);
5208 ph->param_length = htons(parameter_len);
5209 chunk_len += parameter_len;
5212 /* And now tell the peer we do support PR-SCTP. */
5213 if (padding_len > 0) {
5214 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5215 chunk_len += padding_len;
5218 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5219 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5220 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5221 ph->param_length = htons(parameter_len);
5222 chunk_len += parameter_len;
5224 /* And now tell the peer we do all the extensions */
5225 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5226 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5228 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5229 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5230 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5231 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5232 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5233 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5234 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5236 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
5237 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5239 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5240 pr_supported->ph.param_length = htons(parameter_len);
5241 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5242 chunk_len += parameter_len;
5244 /* add authentication parameters */
5245 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5246 /* attach RANDOM parameter, if available */
5247 if (stcb->asoc.authinfo.random != NULL) {
5248 struct sctp_auth_random *randp;
5250 if (padding_len > 0) {
5251 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5252 chunk_len += padding_len;
5255 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5256 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5257 /* random key already contains the header */
5258 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5259 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5260 chunk_len += parameter_len;
5262 /* add HMAC_ALGO parameter */
5263 if ((stcb->asoc.local_hmacs != NULL) &&
5264 (stcb->asoc.local_hmacs->num_algo > 0)) {
5265 struct sctp_auth_hmac_algo *hmacs;
5267 if (padding_len > 0) {
5268 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5269 chunk_len += padding_len;
5272 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5273 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5274 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5275 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5276 hmacs->ph.param_length = htons(parameter_len);
5277 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5278 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5279 chunk_len += parameter_len;
5281 /* add CHUNKS parameter */
5282 if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
5283 struct sctp_auth_chunk_list *chunks;
5285 if (padding_len > 0) {
5286 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5287 chunk_len += padding_len;
5290 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5291 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5292 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5293 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5294 chunks->ph.param_length = htons(parameter_len);
5295 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5296 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5297 chunk_len += parameter_len;
5300 SCTP_BUF_LEN(m) = chunk_len;
5302 /* now the addresses */
5303 /* To optimize this we could put the scoping stuff
5304 * into a structure and remove the individual uint8's from
5305 * the assoc structure. Then we could just sifa in the
5306 * address within the stcb. But for now this is a quick
5307 * hack to get the address stuff teased apart.
5309 sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len);
5311 init->ch.chunk_length = htons(chunk_len);
5312 if (padding_len > 0) {
5313 struct mbuf *m_at, *mp_last;
5316 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
5317 if (SCTP_BUF_NEXT(m_at) == NULL)
5320 if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
5325 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5326 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
5327 (struct sockaddr *)&net->ro._l_addr,
5328 m, 0, NULL, 0, 0, 0, 0,
5329 inp->sctp_lport, stcb->rport, htonl(0),
5331 #if defined(__FreeBSD__)
5335 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
5336 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5337 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5341 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5342 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
5345 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5346 * being equal to the beginning of the params i.e. (iphlen +
5347 * sizeof(struct sctp_init_msg) parse through the parameters to the
5348 * end of the mbuf verifying that all parameters are known.
5350 * For unknown parameters build and return a mbuf with
5351 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5352 * processing this chunk stop, and set *abort_processing to 1.
5354 * By having param_offset be pre-set to where parameters begin it is
5355 * hoped that this routine may be reused in the future by new
5358 struct sctp_paramhdr *phdr, params;
5360 struct mbuf *mat, *op_err;
5361 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
5362 int at, limit, pad_needed;
5363 uint16_t ptype, plen, padded_size;
5366 *abort_processing = 0;
5369 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5372 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5373 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5374 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5375 ptype = ntohs(phdr->param_type);
5376 plen = ntohs(phdr->param_length);
5377 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5378 /* wacked parameter */
5379 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5382 limit -= SCTP_SIZE32(plen);
5384 * All parameters for all chunks that we know/understand are
5385 * listed here. We process them other places and make
5386 * appropriate stop actions per the upper bits. However this
5387 * is the generic routine processor's can call to get back
5388 * an operr.. to either incorporate (init-ack) or send.
5390 padded_size = SCTP_SIZE32(plen);
5392 /* Param's with variable size */
5393 case SCTP_HEARTBEAT_INFO:
5394 case SCTP_STATE_COOKIE:
5395 case SCTP_UNRECOG_PARAM:
5396 case SCTP_ERROR_CAUSE_IND:
5400 /* Param's with variable size within a range */
5401 case SCTP_CHUNK_LIST:
5402 case SCTP_SUPPORTED_CHUNK_EXT:
5403 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5404 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5409 case SCTP_SUPPORTED_ADDRTYPE:
5410 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5411 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5417 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5418 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5423 case SCTP_SET_PRIM_ADDR:
5424 case SCTP_DEL_IP_ADDRESS:
5425 case SCTP_ADD_IP_ADDRESS:
5426 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5427 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5428 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5433 /* Param's with a fixed size */
5434 case SCTP_IPV4_ADDRESS:
5435 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5436 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5441 case SCTP_IPV6_ADDRESS:
5442 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5443 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5448 case SCTP_COOKIE_PRESERVE:
5449 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5450 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5455 case SCTP_HAS_NAT_SUPPORT:
5458 case SCTP_PRSCTP_SUPPORTED:
5460 if (padded_size != sizeof(struct sctp_paramhdr)) {
5461 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5466 case SCTP_ECN_CAPABLE:
5467 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
5468 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5473 case SCTP_ULP_ADAPTATION:
5474 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5475 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5480 case SCTP_SUCCESS_REPORT:
5481 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5482 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5487 case SCTP_HOSTNAME_ADDRESS:
5489 /* We can NOT handle HOST NAME addresses!! */
5492 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5493 *abort_processing = 1;
5494 if (op_err == NULL) {
5495 /* Ok need to try to get a mbuf */
5497 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5499 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5502 l_len += sizeof(struct sctp_paramhdr);
5503 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5505 SCTP_BUF_LEN(op_err) = 0;
5507 * pre-reserve space for ip and sctp
5508 * header and chunk hdr
5511 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5513 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5515 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5516 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5520 /* If we have space */
5521 struct sctp_paramhdr s;
5524 uint32_t cpthis = 0;
5526 pad_needed = 4 - (err_at % 4);
5527 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5528 err_at += pad_needed;
5530 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5531 s.param_length = htons(sizeof(s) + plen);
5532 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5533 err_at += sizeof(s);
5534 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
5536 sctp_m_freem(op_err);
5538 * we are out of memory but we still
5539 * need to have a look at what to do
5540 * (the system is in trouble
5545 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5552 * we do not recognize the parameter figure out what
5555 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5556 if ((ptype & 0x4000) == 0x4000) {
5557 /* Report bit is set?? */
5558 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5559 if (op_err == NULL) {
5561 /* Ok need to try to get an mbuf */
5563 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5565 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5568 l_len += sizeof(struct sctp_paramhdr);
5569 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5571 SCTP_BUF_LEN(op_err) = 0;
5573 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5575 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5577 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5578 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5582 /* If we have space */
5583 struct sctp_paramhdr s;
5586 uint32_t cpthis = 0;
5588 pad_needed = 4 - (err_at % 4);
5589 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5590 err_at += pad_needed;
5592 s.param_type = htons(SCTP_UNRECOG_PARAM);
5593 s.param_length = htons(sizeof(s) + plen);
5594 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5595 err_at += sizeof(s);
5596 if (plen > sizeof(tempbuf)) {
5597 plen = sizeof(tempbuf);
5599 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
5601 sctp_m_freem(op_err);
5603 * we are out of memory but
5604 * we still need to have a
5605 * look at what to do (the
5606 * system is in trouble
5610 goto more_processing;
5612 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5617 if ((ptype & 0x8000) == 0x0000) {
5618 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5621 /* skip this chunk and continue processing */
5622 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5623 at += SCTP_SIZE32(plen);
5628 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5632 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5633 *abort_processing = 1;
5634 if ((op_err == NULL) && phdr) {
5637 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5639 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5641 l_len += (2 * sizeof(struct sctp_paramhdr));
5642 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5644 SCTP_BUF_LEN(op_err) = 0;
5646 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5648 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5650 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5651 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5654 if ((op_err) && phdr) {
5655 struct sctp_paramhdr s;
5658 uint32_t cpthis = 0;
5660 pad_needed = 4 - (err_at % 4);
5661 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5662 err_at += pad_needed;
5664 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5665 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5666 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5667 err_at += sizeof(s);
5668 /* Only copy back the p-hdr that caused the issue */
5669 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5675 sctp_are_there_new_addresses(struct sctp_association *asoc,
5676 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5679 * Given a INIT packet, look through the packet to verify that there
5680 * are NO new addresses. As we go through the parameters add reports
5681 * of any un-understood parameters that require an error. Also we
5682 * must return (1) to drop the packet if we see a un-understood
5683 * parameter that tells us to drop the chunk.
5685 struct sockaddr *sa_touse;
5686 struct sockaddr *sa;
5687 struct sctp_paramhdr *phdr, params;
5688 uint16_t ptype, plen;
5690 struct sctp_nets *net;
5692 struct sockaddr_in sin4, *sa4;
5695 struct sockaddr_in6 sin6, *sa6;
5699 memset(&sin4, 0, sizeof(sin4));
5700 sin4.sin_family = AF_INET;
5702 sin4.sin_len = sizeof(sin4);
5706 memset(&sin6, 0, sizeof(sin6));
5707 sin6.sin6_family = AF_INET6;
5708 #ifdef HAVE_SIN6_LEN
5709 sin6.sin6_len = sizeof(sin6);
5712 /* First what about the src address of the pkt ? */
5714 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5715 sa = (struct sockaddr *)&net->ro._l_addr;
5716 if (sa->sa_family == src->sa_family) {
5718 if (sa->sa_family == AF_INET) {
5719 struct sockaddr_in *src4;
5721 sa4 = (struct sockaddr_in *)sa;
5722 src4 = (struct sockaddr_in *)src;
5723 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5730 if (sa->sa_family == AF_INET6) {
5731 struct sockaddr_in6 *src6;
5733 sa6 = (struct sockaddr_in6 *)sa;
5734 src6 = (struct sockaddr_in6 *)src;
5735 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5744 /* New address added! no need to look futher. */
5747 /* Ok so far lets munge through the rest of the packet */
5748 offset += sizeof(struct sctp_init_chunk);
5749 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5752 ptype = ntohs(phdr->param_type);
5753 plen = ntohs(phdr->param_length);
5756 case SCTP_IPV4_ADDRESS:
5758 struct sctp_ipv4addr_param *p4, p4_buf;
5760 phdr = sctp_get_next_param(in_initpkt, offset,
5761 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5762 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5766 p4 = (struct sctp_ipv4addr_param *)phdr;
5767 sin4.sin_addr.s_addr = p4->addr;
5768 sa_touse = (struct sockaddr *)&sin4;
5773 case SCTP_IPV6_ADDRESS:
5775 struct sctp_ipv6addr_param *p6, p6_buf;
5777 phdr = sctp_get_next_param(in_initpkt, offset,
5778 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5779 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5783 p6 = (struct sctp_ipv6addr_param *)phdr;
5784 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5786 sa_touse = (struct sockaddr *)&sin6;
5795 /* ok, sa_touse points to one to check */
5797 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5798 sa = (struct sockaddr *)&net->ro._l_addr;
5799 if (sa->sa_family != sa_touse->sa_family) {
5803 if (sa->sa_family == AF_INET) {
5804 sa4 = (struct sockaddr_in *)sa;
5805 if (sa4->sin_addr.s_addr ==
5806 sin4.sin_addr.s_addr) {
5813 if (sa->sa_family == AF_INET6) {
5814 sa6 = (struct sockaddr_in6 *)sa;
5815 if (SCTP6_ARE_ADDR_EQUAL(
5824 /* New addr added! no need to look further */
5828 offset += SCTP_SIZE32(plen);
5829 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5835 * Given a MBUF chain that was sent into us containing an INIT. Build a
5836 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5837 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5838 * message (i.e. the struct sctp_init_msg).
5841 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5842 struct mbuf *init_pkt, int iphlen, int offset,
5843 struct sockaddr *src, struct sockaddr *dst,
5844 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5845 #if defined(__FreeBSD__)
5846 uint8_t use_mflowid, uint32_t mflowid,
5848 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5850 struct sctp_association *asoc;
5851 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5852 struct sctp_init_ack_chunk *initack;
5853 struct sctp_adaptation_layer_indication *ali;
5854 struct sctp_ecn_supported_param *ecn;
5855 struct sctp_prsctp_supported_param *prsctp;
5856 struct sctp_supported_chunk_types_param *pr_supported;
5857 union sctp_sockstore *over_addr;
5859 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5860 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5861 struct sockaddr_in *sin;
5864 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5865 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5866 struct sockaddr_in6 *sin6;
5868 #if defined(__Userspace__)
5869 struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5870 struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
5871 struct sockaddr_conn *sconn;
5873 struct sockaddr *to;
5874 struct sctp_state_cookie stc;
5875 struct sctp_nets *net = NULL;
5876 uint8_t *signature = NULL;
5877 int cnt_inits_to = 0;
5878 uint16_t his_limit, i_want;
5879 int abort_flag, padval;
5882 int nat_friendly = 0;
5891 if ((asoc != NULL) &&
5892 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5893 (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
5894 /* new addresses, out of here in non-cookie-wait states */
5896 * Send a ABORT, we don't add the new address error clause
5897 * though we even set the T bit and copy in the 0 tag.. this
5898 * looks no different than if no listener was present.
5900 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5902 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5903 #if defined(__FreeBSD__)
5904 use_mflowid, mflowid,
5910 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5911 (offset + sizeof(struct sctp_init_chunk)),
5912 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5915 if (op_err == NULL) {
5916 char msg[SCTP_DIAG_INFO_LEN];
5918 snprintf(msg, sizeof(msg), "%s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5919 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5922 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5923 init_chk->init.initiate_tag, op_err,
5924 #if defined(__FreeBSD__)
5925 use_mflowid, mflowid,
5930 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5932 /* No memory, INIT timer will re-attempt. */
5934 sctp_m_freem(op_err);
5937 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5940 * We might not overwrite the identification[] completely and on
5941 * some platforms time_entered will contain some padding.
5942 * Therefore zero out the cookie to avoid putting
5943 * uninitialized memory on the wire.
5945 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5947 /* the time I built cookie */
5948 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5950 /* populate any tie tags */
5952 /* unlock before tag selections */
5953 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5954 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5955 stc.cookie_life = asoc->cookie_life;
5956 net = asoc->primary_destination;
5958 stc.tie_tag_my_vtag = 0;
5959 stc.tie_tag_peer_vtag = 0;
5960 /* life I will award this cookie */
5961 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5964 /* copy in the ports for later check */
5965 stc.myport = sh->dest_port;
5966 stc.peerport = sh->src_port;
5969 * If we wanted to honor cookie life extentions, we would add to
5970 * stc.cookie_life. For now we should NOT honor any extension
5972 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5973 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5974 stc.ipv6_addr_legal = 1;
5975 if (SCTP_IPV6_V6ONLY(inp)) {
5976 stc.ipv4_addr_legal = 0;
5978 stc.ipv4_addr_legal = 1;
5980 #if defined(__Userspace__)
5981 stc.conn_addr_legal = 0;
5984 stc.ipv6_addr_legal = 0;
5985 #if defined(__Userspace__)
5986 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
5987 stc.conn_addr_legal = 1;
5988 stc.ipv4_addr_legal = 0;
5990 stc.conn_addr_legal = 0;
5991 stc.ipv4_addr_legal = 1;
5994 stc.ipv4_addr_legal = 1;
5997 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
6004 switch (dst->sa_family) {
6008 /* lookup address */
6009 stc.address[0] = src4->sin_addr.s_addr;
6013 stc.addr_type = SCTP_IPV4_ADDRESS;
6014 /* local from address */
6015 stc.laddress[0] = dst4->sin_addr.s_addr;
6016 stc.laddress[1] = 0;
6017 stc.laddress[2] = 0;
6018 stc.laddress[3] = 0;
6019 stc.laddr_type = SCTP_IPV4_ADDRESS;
6020 /* scope_id is only for v6 */
6022 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
6023 if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
6028 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
6029 /* Must use the address in this case */
6030 if (sctp_is_address_on_local_host(src, vrf_id)) {
6031 stc.loopback_scope = 1;
6034 stc.local_scope = 0;
6042 stc.addr_type = SCTP_IPV6_ADDRESS;
6043 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
6044 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
6045 stc.scope_id = in6_getscope(&src6->sin6_addr);
6049 if (sctp_is_address_on_local_host(src, vrf_id)) {
6050 stc.loopback_scope = 1;
6051 stc.local_scope = 0;
6054 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
6056 * If the new destination is a LINK_LOCAL we
6057 * must have common both site and local
6058 * scope. Don't set local scope though since
6059 * we must depend on the source to be added
6060 * implicitly. We cannot assure just because
6061 * we share one link that all links are
6064 #if defined(__APPLE__)
6065 /* Mac OS X currently doesn't have in6_getscope() */
6066 stc.scope_id = src6->sin6_addr.s6_addr16[1];
6068 stc.local_scope = 0;
6072 * we start counting for the private address
6073 * stuff at 1. since the link local we
6074 * source from won't show up in our scoped
6078 /* pull out the scope_id from incoming pkt */
6079 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
6081 * If the new destination is SITE_LOCAL then
6082 * we must have site scope in common.
6086 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
6087 stc.laddr_type = SCTP_IPV6_ADDRESS;
6091 #if defined(__Userspace__)
6094 /* lookup address */
6099 memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
6100 stc.addr_type = SCTP_CONN_ADDRESS;
6101 /* local from address */
6102 stc.laddress[0] = 0;
6103 stc.laddress[1] = 0;
6104 stc.laddress[2] = 0;
6105 stc.laddress[3] = 0;
6106 memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
6107 stc.laddr_type = SCTP_CONN_ADDRESS;
6108 /* scope_id is only for v6 */
6119 /* set the scope per the existing tcb */
6122 struct sctp_nets *lnet;
6125 stc.loopback_scope = asoc->scope.loopback_scope;
6126 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
6127 stc.site_scope = asoc->scope.site_scope;
6128 stc.local_scope = asoc->scope.local_scope;
6130 /* Why do we not consider IPv4 LL addresses? */
6131 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
6132 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
6133 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
6135 * if we have a LL address, start
6143 /* use the net pointer */
6144 to = (struct sockaddr *)&net->ro._l_addr;
6145 switch (to->sa_family) {
6148 sin = (struct sockaddr_in *)to;
6149 stc.address[0] = sin->sin_addr.s_addr;
6153 stc.addr_type = SCTP_IPV4_ADDRESS;
6154 if (net->src_addr_selected == 0) {
6156 * strange case here, the INIT should have
6157 * did the selection.
6159 net->ro._s_addr = sctp_source_address_selection(inp,
6160 stcb, (sctp_route_t *)&net->ro,
6162 if (net->ro._s_addr == NULL)
6165 net->src_addr_selected = 1;
6168 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
6169 stc.laddress[1] = 0;
6170 stc.laddress[2] = 0;
6171 stc.laddress[3] = 0;
6172 stc.laddr_type = SCTP_IPV4_ADDRESS;
6173 /* scope_id is only for v6 */
6179 sin6 = (struct sockaddr_in6 *)to;
6180 memcpy(&stc.address, &sin6->sin6_addr,
6181 sizeof(struct in6_addr));
6182 stc.addr_type = SCTP_IPV6_ADDRESS;
6183 stc.scope_id = sin6->sin6_scope_id;
6184 if (net->src_addr_selected == 0) {
6186 * strange case here, the INIT should have
6187 * done the selection.
6189 net->ro._s_addr = sctp_source_address_selection(inp,
6190 stcb, (sctp_route_t *)&net->ro,
6192 if (net->ro._s_addr == NULL)
6195 net->src_addr_selected = 1;
6197 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6198 sizeof(struct in6_addr));
6199 stc.laddr_type = SCTP_IPV6_ADDRESS;
6202 #if defined(__Userspace__)
6204 sconn = (struct sockaddr_conn *)to;
6209 memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6210 stc.addr_type = SCTP_CONN_ADDRESS;
6211 stc.laddress[0] = 0;
6212 stc.laddress[1] = 0;
6213 stc.laddress[2] = 0;
6214 stc.laddress[3] = 0;
6215 memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6216 stc.laddr_type = SCTP_CONN_ADDRESS;
6222 /* Now lets put the SCTP header in place */
6223 initack = mtod(m, struct sctp_init_ack_chunk *);
6224 /* Save it off for quick ref */
6225 stc.peers_vtag = init_chk->init.initiate_tag;
6227 memcpy(stc.identification, SCTP_VERSION_STRING,
6228 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6229 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6230 /* now the chunk header */
6231 initack->ch.chunk_type = SCTP_INITIATION_ACK;
6232 initack->ch.chunk_flags = 0;
6233 /* fill in later from mbuf we build */
6234 initack->ch.chunk_length = 0;
6235 /* place in my tag */
6236 if ((asoc != NULL) &&
6237 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
6238 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
6239 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
6240 /* re-use the v-tags and init-seq here */
6241 initack->init.initiate_tag = htonl(asoc->my_vtag);
6242 initack->init.initial_tsn = htonl(asoc->init_seq_number);
6244 uint32_t vtag, itsn;
6245 if (hold_inp_lock) {
6246 SCTP_INP_INCR_REF(inp);
6247 SCTP_INP_RUNLOCK(inp);
6250 atomic_add_int(&asoc->refcnt, 1);
6251 SCTP_TCB_UNLOCK(stcb);
6253 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6254 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
6255 /* Got a duplicate vtag on some guy behind a nat
6256 * make sure we don't use it.
6260 initack->init.initiate_tag = htonl(vtag);
6261 /* get a TSN to use too */
6262 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6263 initack->init.initial_tsn = htonl(itsn);
6264 SCTP_TCB_LOCK(stcb);
6265 atomic_add_int(&asoc->refcnt, -1);
6267 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6268 initack->init.initiate_tag = htonl(vtag);
6269 /* get a TSN to use too */
6270 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6272 if (hold_inp_lock) {
6273 SCTP_INP_RLOCK(inp);
6274 SCTP_INP_DECR_REF(inp);
6277 /* save away my tag to */
6278 stc.my_vtag = initack->init.initiate_tag;
6280 /* set up some of the credits. */
6281 so = inp->sctp_socket;
6283 /* memory problem */
6287 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6289 /* set what I want */
6290 his_limit = ntohs(init_chk->init.num_inbound_streams);
6291 /* choose what I want */
6293 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
6294 i_want = asoc->streamoutcnt;
6296 i_want = inp->sctp_ep.pre_open_stream_count;
6299 i_want = inp->sctp_ep.pre_open_stream_count;
6301 if (his_limit < i_want) {
6302 /* I Want more :< */
6303 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6305 /* I can have what I want :> */
6306 initack->init.num_outbound_streams = htons(i_want);
6308 /* tell him his limit. */
6309 initack->init.num_inbound_streams =
6310 htons(inp->sctp_ep.max_open_streams_intome);
6312 /* adaptation layer indication parameter */
6313 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6314 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
6315 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6316 ali->ph.param_length = htons(sizeof(*ali));
6317 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
6318 SCTP_BUF_LEN(m) += sizeof(*ali);
6319 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
6321 ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack));
6325 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
6326 (inp->sctp_ecn_enable == 1)) {
6327 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
6328 ecn->ph.param_length = htons(sizeof(*ecn));
6329 SCTP_BUF_LEN(m) += sizeof(*ecn);
6331 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
6334 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
6336 /* And now tell the peer we do pr-sctp */
6337 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
6338 prsctp->ph.param_length = htons(sizeof(*prsctp));
6339 SCTP_BUF_LEN(m) += sizeof(*prsctp);
6341 /* Add NAT friendly parameter */
6342 struct sctp_paramhdr *ph;
6344 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6345 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6346 ph->param_length = htons(sizeof(struct sctp_paramhdr));
6347 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
6349 /* And now tell the peer we do all the extensions */
6350 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6351 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6353 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6354 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6355 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6356 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6357 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6358 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
6359 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6360 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
6361 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6362 p_len = sizeof(*pr_supported) + num_ext;
6363 pr_supported->ph.param_length = htons(p_len);
6364 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
6365 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
6367 /* add authentication parameters */
6368 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
6369 struct sctp_auth_random *randp;
6370 struct sctp_auth_hmac_algo *hmacs;
6371 struct sctp_auth_chunk_list *chunks;
6372 uint16_t random_len;
6374 /* generate and add RANDOM parameter */
6375 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6376 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6377 randp->ph.param_type = htons(SCTP_RANDOM);
6378 p_len = sizeof(*randp) + random_len;
6379 randp->ph.param_length = htons(p_len);
6380 SCTP_READ_RANDOM(randp->random_data, random_len);
6381 /* zero out any padding required */
6382 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
6383 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
6385 /* add HMAC_ALGO parameter */
6386 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6387 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6388 (uint8_t *) hmacs->hmac_ids);
6390 p_len += sizeof(*hmacs);
6391 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6392 hmacs->ph.param_length = htons(p_len);
6393 /* zero out any padding required */
6394 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
6395 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
6397 /* add CHUNKS parameter */
6398 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6399 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6400 chunks->chunk_types);
6402 p_len += sizeof(*chunks);
6403 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6404 chunks->ph.param_length = htons(p_len);
6405 /* zero out any padding required */
6406 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
6407 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
6411 /* now the addresses */
6413 struct sctp_scoping scp;
6414 /* To optimize this we could put the scoping stuff
6415 * into a structure and remove the individual uint8's from
6416 * the stc structure. Then we could just sifa in the
6417 * address within the stc.. but for now this is a quick
6418 * hack to get the address stuff teased apart.
6420 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6421 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6422 #if defined(__Userspace__)
6423 scp.conn_addr_legal = stc.conn_addr_legal;
6425 scp.loopback_scope = stc.loopback_scope;
6426 scp.ipv4_local_scope = stc.ipv4_scope;
6427 scp.local_scope = stc.local_scope;
6428 scp.site_scope = stc.site_scope;
6429 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
6432 /* tack on the operational error if present */
6440 llen += SCTP_BUF_LEN(ol);
6441 ol = SCTP_BUF_NEXT(ol);
6444 /* must add a pad to the param */
6445 uint32_t cpthis = 0;
6448 padlen = 4 - (llen % 4);
6449 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
6451 while (SCTP_BUF_NEXT(m_at) != NULL) {
6452 m_at = SCTP_BUF_NEXT(m_at);
6454 SCTP_BUF_NEXT(m_at) = op_err;
6455 while (SCTP_BUF_NEXT(m_at) != NULL) {
6456 m_at = SCTP_BUF_NEXT(m_at);
6459 /* pre-calulate the size and update pkt header and chunk header */
6461 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6462 p_len += SCTP_BUF_LEN(m_tmp);
6463 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6464 /* m_tmp should now point to last one */
6469 /* Now we must build a cookie */
6470 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6471 if (m_cookie == NULL) {
6472 /* memory problem */
6476 /* Now append the cookie to the end and update the space/size */
6477 SCTP_BUF_NEXT(m_tmp) = m_cookie;
6479 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6480 p_len += SCTP_BUF_LEN(m_tmp);
6481 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6482 /* m_tmp should now point to last one */
6487 /* Place in the size, but we don't include
6488 * the last pad (if any) in the INIT-ACK.
6490 initack->ch.chunk_length = htons(p_len);
6492 /* Time to sign the cookie, we don't sign over the cookie
6493 * signature though thus we set trailer.
6495 (void)sctp_hmac_m(SCTP_HMAC,
6496 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6497 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6498 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6500 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6501 * here since the timer will drive a retranmission.
6504 if ((padval) && (mp_last)) {
6505 /* see my previous comments on mp_last */
6506 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
6507 /* Houston we have a problem, no space */
6512 if (stc.loopback_scope) {
6513 over_addr = (union sctp_sockstore *)dst;
6518 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6520 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6522 #if defined(__FreeBSD__)
6523 use_mflowid, mflowid,
6525 SCTP_SO_NOT_LOCKED);
6526 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6531 sctp_prune_prsctp(struct sctp_tcb *stcb,
6532 struct sctp_association *asoc,
6533 struct sctp_sndrcvinfo *srcv,
6537 struct sctp_tmit_chunk *chk, *nchk;
6539 SCTP_TCB_LOCK_ASSERT(stcb);
6540 if ((asoc->peer_supports_prsctp) &&
6541 (asoc->sent_queue_cnt_removeable > 0)) {
6542 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6544 * Look for chunks marked with the PR_SCTP flag AND
6545 * the buffer space flag. If the one being sent is
6546 * equal or greater priority then purge the old one
6547 * and free some space.
6549 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6551 * This one is PR-SCTP AND buffer space
6554 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6556 * Lower numbers equates to higher
6557 * priority so if the one we are
6558 * looking at has a larger or equal
6559 * priority we want to drop the data
6560 * and NOT retransmit it.
6564 * We release the book_size
6565 * if the mbuf is here
6570 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6574 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6577 freed_spc += ret_spc;
6578 if (freed_spc >= dataout) {
6581 } /* if chunk was present */
6582 } /* if of sufficent priority */
6583 } /* if chunk has enabled */
6584 } /* tailqforeach */
6586 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6587 /* Here we must move to the sent queue and mark */
6588 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6589 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6592 * We release the book_size
6593 * if the mbuf is here
6597 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6600 freed_spc += ret_spc;
6601 if (freed_spc >= dataout) {
6604 } /* end if chk->data */
6605 } /* end if right class */
6606 } /* end if chk pr-sctp */
6607 } /* tailqforeachsafe (chk) */
6608 } /* if enabled in asoc */
6612 sctp_get_frag_point(struct sctp_tcb *stcb,
6613 struct sctp_association *asoc)
6618 * For endpoints that have both v6 and v4 addresses we must reserve
6619 * room for the ipv6 header, for those that are only dealing with V4
6620 * we use a larger frag point.
6622 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6623 ovh = SCTP_MED_OVERHEAD;
6625 ovh = SCTP_MED_V4_OVERHEAD;
6628 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6629 siz = asoc->smallest_mtu - ovh;
6631 siz = (stcb->asoc.sctp_frag_point - ovh);
6633 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6635 /* A data chunk MUST fit in a cluster */
6636 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6639 /* adjust for an AUTH chunk if DATA requires auth */
6640 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6641 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6644 /* make it an even word boundary please */
6651 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6654 * We assume that the user wants PR_SCTP_TTL if the user
6655 * provides a positive lifetime but does not specify any
6658 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6659 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6660 } else if (sp->timetolive > 0) {
6661 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6662 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6666 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6667 case CHUNK_FLAGS_PR_SCTP_BUF:
6669 * Time to live is a priority stored in tv_sec when
6670 * doing the buffer drop thing.
6672 sp->ts.tv_sec = sp->timetolive;
6675 case CHUNK_FLAGS_PR_SCTP_TTL:
6678 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6679 tv.tv_sec = sp->timetolive / 1000;
6680 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6681 /* TODO sctp_constants.h needs alternative time macros when
6682 * _KERNEL is undefined.
6685 timeradd(&sp->ts, &tv, &sp->ts);
6687 timevaladd(&sp->ts, &tv);
6691 case CHUNK_FLAGS_PR_SCTP_RTX:
6693 * Time to live is a the number or retransmissions
6696 sp->ts.tv_sec = sp->timetolive;
6700 SCTPDBG(SCTP_DEBUG_USRREQ1,
6701 "Unknown PR_SCTP policy %u.\n",
6702 PR_SCTP_POLICY(sp->sinfo_flags));
6708 sctp_msg_append(struct sctp_tcb *stcb,
6709 struct sctp_nets *net,
6711 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6715 struct sctp_stream_queue_pending *sp = NULL;
6716 struct sctp_stream_out *strm;
6718 /* Given an mbuf chain, put it
6719 * into the association send queue and
6720 * place it on the wheel
6722 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6723 /* Invalid stream number */
6724 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6728 if ((stcb->asoc.stream_locked) &&
6729 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6730 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6734 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6735 /* Now can we send this? */
6736 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6737 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6738 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6739 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6740 /* got data while shutting down */
6741 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6745 sctp_alloc_a_strmoq(stcb, sp);
6747 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6751 sp->sinfo_flags = srcv->sinfo_flags;
6752 sp->timetolive = srcv->sinfo_timetolive;
6753 sp->ppid = srcv->sinfo_ppid;
6754 sp->context = srcv->sinfo_context;
6755 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6757 atomic_add_int(&sp->net->ref_count, 1);
6761 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6762 sp->stream = srcv->sinfo_stream;
6763 sp->msg_is_complete = 1;
6764 sp->sender_all_done = 1;
6767 sp->tail_mbuf = NULL;
6768 sctp_set_prsctp_policy(sp);
6769 /* We could in theory (for sendall) sifa the length
6770 * in, but we would still have to hunt through the
6771 * chain since we need to setup the tail_mbuf
6774 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6775 if (SCTP_BUF_NEXT(at) == NULL)
6777 sp->length += SCTP_BUF_LEN(at);
6779 if (srcv->sinfo_keynumber_valid) {
6780 sp->auth_keyid = srcv->sinfo_keynumber;
6782 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6784 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6785 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6786 sp->holds_key_ref = 1;
6788 if (hold_stcb_lock == 0) {
6789 SCTP_TCB_SEND_LOCK(stcb);
6791 sctp_snd_sb_alloc(stcb, sp->length);
6792 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6793 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6794 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6796 if (hold_stcb_lock == 0) {
6797 SCTP_TCB_SEND_UNLOCK(stcb);
6807 static struct mbuf *
6808 sctp_copy_mbufchain(struct mbuf *clonechain,
6809 struct mbuf *outchain,
6810 struct mbuf **endofchain,
6813 uint8_t copy_by_ref)
6816 struct mbuf *appendchain;
6820 if (endofchain == NULL) {
6824 sctp_m_freem(outchain);
6827 if (can_take_mbuf) {
6828 appendchain = clonechain;
6831 #if defined(__Panda__)
6834 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6837 /* Its not in a cluster */
6838 if (*endofchain == NULL) {
6839 /* lets get a mbuf cluster */
6840 if (outchain == NULL) {
6841 /* This is the general case */
6843 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6844 if (outchain == NULL) {
6847 SCTP_BUF_LEN(outchain) = 0;
6848 *endofchain = outchain;
6849 /* get the prepend space */
6850 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
6852 /* We really should not get a NULL in endofchain */
6856 if (SCTP_BUF_NEXT(m) == NULL) {
6860 m = SCTP_BUF_NEXT(m);
6863 if (*endofchain == NULL) {
6864 /* huh, TSNH XXX maybe we should panic */
6865 sctp_m_freem(outchain);
6869 /* get the new end of length */
6870 len = M_TRAILINGSPACE(*endofchain);
6872 /* how much is left at the end? */
6873 len = M_TRAILINGSPACE(*endofchain);
6875 /* Find the end of the data, for appending */
6876 cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
6878 /* Now lets copy it out */
6879 if (len >= sizeofcpy) {
6880 /* It all fits, copy it in */
6881 m_copydata(clonechain, 0, sizeofcpy, cp);
6882 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6884 /* fill up the end of the chain */
6886 m_copydata(clonechain, 0, len, cp);
6887 SCTP_BUF_LEN((*endofchain)) += len;
6888 /* now we need another one */
6891 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6896 SCTP_BUF_NEXT((*endofchain)) = m;
6898 cp = mtod((*endofchain), caddr_t);
6899 m_copydata(clonechain, len, sizeofcpy, cp);
6900 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6904 /* copy the old fashion way */
6905 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6906 #ifdef SCTP_MBUF_LOGGING
6907 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6910 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
6911 if (SCTP_BUF_IS_EXTENDED(mat)) {
6912 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6919 if (appendchain == NULL) {
6922 sctp_m_freem(outchain);
6926 /* tack on to the end */
6927 if (*endofchain != NULL) {
6928 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6932 if (SCTP_BUF_NEXT(m) == NULL) {
6933 SCTP_BUF_NEXT(m) = appendchain;
6936 m = SCTP_BUF_NEXT(m);
6940 * save off the end and update the end-chain
6945 if (SCTP_BUF_NEXT(m) == NULL) {
6949 m = SCTP_BUF_NEXT(m);
6953 /* save off the end and update the end-chain postion */
6956 if (SCTP_BUF_NEXT(m) == NULL) {
6960 m = SCTP_BUF_NEXT(m);
6962 return (appendchain);
6967 sctp_med_chunk_output(struct sctp_inpcb *inp,
6968 struct sctp_tcb *stcb,
6969 struct sctp_association *asoc,
6972 int control_only, int from_where,
6973 struct timeval *now, int *now_filled, int frag_point, int so_locked
6974 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6980 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6981 uint32_t val SCTP_UNUSED)
6983 struct sctp_copy_all *ca;
6986 int added_control = 0;
6987 int un_sent, do_chunk_output = 1;
6988 struct sctp_association *asoc;
6989 struct sctp_nets *net;
6991 ca = (struct sctp_copy_all *)ptr;
6992 if (ca->m == NULL) {
6995 if (ca->inp != inp) {
6999 if (ca->sndlen > 0) {
7000 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
7002 /* can't copy so we are done */
7006 #ifdef SCTP_MBUF_LOGGING
7007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7010 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7011 if (SCTP_BUF_IS_EXTENDED(mat)) {
7012 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7020 SCTP_TCB_LOCK_ASSERT(stcb);
7021 if (stcb->asoc.alternate) {
7022 net = stcb->asoc.alternate;
7024 net = stcb->asoc.primary_destination;
7026 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
7027 /* Abort this assoc with m as the user defined reason */
7029 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
7031 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
7032 0, M_NOWAIT, 1, MT_DATA);
7033 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
7036 struct sctp_paramhdr *ph;
7038 ph = mtod(m, struct sctp_paramhdr *);
7039 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7040 ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen);
7042 /* We add one here to keep the assoc from
7043 * dis-appearing on us.
7045 atomic_add_int(&stcb->asoc.refcnt, 1);
7046 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
7047 /* sctp_abort_an_association calls sctp_free_asoc()
7048 * free association will NOT free it since we
7049 * incremented the refcnt .. we do this to prevent
7050 * it being freed and things getting tricky since
7051 * we could end up (from free_asoc) calling inpcb_free
7052 * which would get a recursive lock call to the
7053 * iterator lock.. But as a consequence of that the
7054 * stcb will return to us un-locked.. since free_asoc
7055 * returns with either no TCB or the TCB unlocked, we
7056 * must relock.. to unlock in the iterator timer :-0
7058 SCTP_TCB_LOCK(stcb);
7059 atomic_add_int(&stcb->asoc.refcnt, -1);
7060 goto no_chunk_output;
7063 ret = sctp_msg_append(stcb, net, m,
7067 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
7068 /* shutdown this assoc */
7070 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
7072 if (TAILQ_EMPTY(&asoc->send_queue) &&
7073 TAILQ_EMPTY(&asoc->sent_queue) &&
7075 if (asoc->locked_on_sending) {
7078 /* there is nothing queued to send, so I'm done... */
7079 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
7080 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7081 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7082 /* only send SHUTDOWN the first time through */
7083 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
7084 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7086 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
7087 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7088 sctp_stop_timers_for_shutdown(stcb);
7089 sctp_send_shutdown(stcb, net);
7090 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
7092 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7093 asoc->primary_destination);
7095 do_chunk_output = 0;
7099 * we still got (or just got) data to send, so set
7103 * XXX sockets draft says that SCTP_EOF should be
7104 * sent with no data. currently, we will allow user
7105 * data to be sent first and move to
7108 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
7109 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7110 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7111 if (asoc->locked_on_sending) {
7112 /* Locked to send out the data */
7113 struct sctp_stream_queue_pending *sp;
7114 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
7116 if ((sp->length == 0) && (sp->msg_is_complete == 0))
7117 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7120 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
7121 if (TAILQ_EMPTY(&asoc->send_queue) &&
7122 TAILQ_EMPTY(&asoc->sent_queue) &&
7123 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
7125 atomic_add_int(&stcb->asoc.refcnt, 1);
7126 sctp_abort_an_association(stcb->sctp_ep, stcb,
7127 NULL, SCTP_SO_NOT_LOCKED);
7128 atomic_add_int(&stcb->asoc.refcnt, -1);
7129 goto no_chunk_output;
7131 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7132 asoc->primary_destination);
7138 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7139 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
7141 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
7142 (stcb->asoc.total_flight > 0) &&
7143 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
7144 do_chunk_output = 0;
7146 if (do_chunk_output)
7147 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
7148 else if (added_control) {
7149 int num_out = 0, reason = 0, now_filled = 0;
7152 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7153 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
7154 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
7165 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
7167 struct sctp_copy_all *ca;
7169 ca = (struct sctp_copy_all *)ptr;
7171 * Do a notify here? Kacheong suggests that the notify be done at
7172 * the send time.. so you would push up a notification if any send
7173 * failed. Don't know if this is feasable since the only failures we
7174 * have is "memory" related and if you cannot get an mbuf to send
7175 * the data you surely can't get an mbuf to send up to notify the
7176 * user you can't send the data :->
7179 /* now free everything */
7180 sctp_m_freem(ca->m);
7181 SCTP_FREE(ca, SCTP_M_COPYAL);
7185 #define MC_ALIGN(m, len) do { \
7186 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
7191 static struct mbuf *
7192 sctp_copy_out_all(struct uio *uio, int len)
7194 struct mbuf *ret, *at;
7195 int left, willcpy, cancpy, error;
7197 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7203 SCTP_BUF_LEN(ret) = 0;
7204 /* save space for the data chunk header */
7205 cancpy = M_TRAILINGSPACE(ret);
7206 willcpy = min(cancpy, left);
7209 /* Align data to the end */
7210 error = uiomove(mtod(at, caddr_t), willcpy, uio);
7216 SCTP_BUF_LEN(at) = willcpy;
7217 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7220 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
7221 if (SCTP_BUF_NEXT(at) == NULL) {
7224 at = SCTP_BUF_NEXT(at);
7225 SCTP_BUF_LEN(at) = 0;
7226 cancpy = M_TRAILINGSPACE(at);
7227 willcpy = min(cancpy, left);
7234 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7235 struct sctp_sndrcvinfo *srcv)
7238 struct sctp_copy_all *ca;
7240 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7244 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7247 memset(ca, 0, sizeof(struct sctp_copy_all));
7251 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7254 * take off the sendall flag, it would be bad if we failed to do
7257 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7258 /* get length and mbuf chain */
7260 #if defined(__APPLE__)
7261 #if defined(APPLE_LEOPARD)
7262 ca->sndlen = uio->uio_resid;
7264 ca->sndlen = uio_resid(uio);
7267 ca->sndlen = uio->uio_resid;
7269 #if defined(__APPLE__)
7270 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7272 ca->m = sctp_copy_out_all(uio, ca->sndlen);
7273 #if defined(__APPLE__)
7274 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7276 if (ca->m == NULL) {
7277 SCTP_FREE(ca, SCTP_M_COPYAL);
7278 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7282 /* Gather the length of the send */
7286 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7287 ca->sndlen += SCTP_BUF_LEN(mat);
7290 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7291 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7292 SCTP_ASOC_ANY_STATE,
7294 sctp_sendall_completes, inp, 1);
7296 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
7297 SCTP_FREE(ca, SCTP_M_COPYAL);
7298 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7306 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7308 struct sctp_tmit_chunk *chk, *nchk;
7310 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7311 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7312 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7314 sctp_m_freem(chk->data);
7317 asoc->ctrl_queue_cnt--;
7318 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7324 sctp_toss_old_asconf(struct sctp_tcb *stcb)
7326 struct sctp_association *asoc;
7327 struct sctp_tmit_chunk *chk, *nchk;
7328 struct sctp_asconf_chunk *acp;
7331 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7332 /* find SCTP_ASCONF chunk in queue */
7333 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7335 acp = mtod(chk->data, struct sctp_asconf_chunk *);
7336 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7341 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7343 sctp_m_freem(chk->data);
7346 asoc->ctrl_queue_cnt--;
7347 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7354 sctp_clean_up_datalist(struct sctp_tcb *stcb,
7355 struct sctp_association *asoc,
7356 struct sctp_tmit_chunk **data_list,
7358 struct sctp_nets *net)
7361 struct sctp_tmit_chunk *tp1;
7363 for (i = 0; i < bundle_at; i++) {
7364 /* off of the send queue */
7365 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7366 asoc->send_queue_cnt--;
7369 * Any chunk NOT 0 you zap the time chunk 0 gets
7370 * zapped or set based on if a RTO measurment is
7373 data_list[i]->do_rtt = 0;
7376 data_list[i]->sent_rcv_time = net->last_sent_time;
7377 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7378 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
7379 if (data_list[i]->whoTo == NULL) {
7380 data_list[i]->whoTo = net;
7381 atomic_add_int(&net->ref_count, 1);
7383 /* on to the sent queue */
7384 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7385 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
7386 struct sctp_tmit_chunk *tpp;
7388 /* need to move back */
7390 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7392 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7396 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
7399 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7401 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7406 /* This does not lower until the cum-ack passes it */
7407 asoc->sent_queue_cnt++;
7408 if ((asoc->peers_rwnd <= 0) &&
7409 (asoc->total_flight == 0) &&
7411 /* Mark the chunk as being a window probe */
7412 SCTP_STAT_INCR(sctps_windowprobed);
7414 #ifdef SCTP_AUDITING_ENABLED
7415 sctp_audit_log(0xC2, 3);
7417 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7418 data_list[i]->snd_count = 1;
7419 data_list[i]->rec.data.chunk_was_revoked = 0;
7420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7421 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7422 data_list[i]->whoTo->flight_size,
7423 data_list[i]->book_size,
7424 (uintptr_t)data_list[i]->whoTo,
7425 data_list[i]->rec.data.TSN_seq);
7427 sctp_flight_size_increase(data_list[i]);
7428 sctp_total_flight_increase(stcb, data_list[i]);
7429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7430 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7431 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7433 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7434 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7435 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7436 /* SWS sender side engages */
7437 asoc->peers_rwnd = 0;
7440 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7441 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7446 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7447 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7452 struct sctp_tmit_chunk *chk, *nchk;
7454 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7455 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7456 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7457 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7458 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7459 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7460 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7461 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7462 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7463 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7464 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7465 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7466 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7467 /* Stray chunks must be cleaned up */
7469 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7471 sctp_m_freem(chk->data);
7474 asoc->ctrl_queue_cnt--;
7475 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7476 asoc->fwd_tsn_cnt--;
7477 sctp_free_a_chunk(stcb, chk, so_locked);
7478 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7479 /* special handling, we must look into the param */
7480 if (chk != asoc->str_reset) {
7481 goto clean_up_anyway;
7489 sctp_can_we_split_this(struct sctp_tcb *stcb,
7491 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7493 /* Make a decision on if I should split a
7494 * msg into multiple parts. This is only asked of
7495 * incomplete messages.
7498 /* If we are doing EEOR we need to always send
7499 * it if its the entire thing, since it might
7500 * be all the guy is putting in the hopper.
7502 if (goal_mtu >= length) {
7504 * If we have data outstanding,
7505 * we get another chance when the sack
7506 * arrives to transmit - wait for more data
7508 if (stcb->asoc.total_flight == 0) {
7509 /* If nothing is in flight, we zero
7510 * the packet counter.
7517 /* You can fill the rest */
7522 * For those strange folk that make the send buffer
7523 * smaller than our fragmentation point, we can't
7524 * get a full msg in so we have to allow splitting.
7526 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7530 if ((length <= goal_mtu) ||
7531 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7532 /* Sub-optimial residual don't split in non-eeor mode. */
7535 /* If we reach here length is larger
7536 * than the goal_mtu. Do we wish to split
7537 * it for the sake of packet putting together?
7539 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7540 /* Its ok to split it */
7541 return (min(goal_mtu, frag_point));
7543 /* Nope, can't split */
7549 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7550 struct sctp_stream_out *strq,
7552 uint32_t frag_point,
7558 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7563 /* Move from the stream to the send_queue keeping track of the total */
7564 struct sctp_association *asoc;
7565 struct sctp_stream_queue_pending *sp;
7566 struct sctp_tmit_chunk *chk;
7567 struct sctp_data_chunk *dchkh;
7568 uint32_t to_move, length;
7569 uint8_t rcv_flags = 0;
7571 uint8_t send_lock_up = 0;
7573 SCTP_TCB_LOCK_ASSERT(stcb);
7576 /*sa_ignore FREED_MEMORY*/
7577 sp = TAILQ_FIRST(&strq->outqueue);
7580 if (send_lock_up == 0) {
7581 SCTP_TCB_SEND_LOCK(stcb);
7584 sp = TAILQ_FIRST(&strq->outqueue);
7588 if (strq->last_msg_incomplete) {
7589 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7591 strq->last_msg_incomplete);
7592 strq->last_msg_incomplete = 0;
7596 SCTP_TCB_SEND_UNLOCK(stcb);
7601 if ((sp->msg_is_complete) && (sp->length == 0)) {
7602 if (sp->sender_all_done) {
7603 /* We are doing differed cleanup. Last
7604 * time through when we took all the data
7605 * the sender_all_done was not set.
7607 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7608 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7609 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7610 sp->sender_all_done,
7612 sp->msg_is_complete,
7616 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7617 SCTP_TCB_SEND_LOCK(stcb);
7620 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7621 TAILQ_REMOVE(&strq->outqueue, sp, next);
7622 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7624 sctp_free_remote_addr(sp->net);
7628 sctp_m_freem(sp->data);
7631 sctp_free_a_strmoq(stcb, sp, so_locked);
7632 /* we can't be locked to it */
7634 stcb->asoc.locked_on_sending = NULL;
7636 SCTP_TCB_SEND_UNLOCK(stcb);
7639 /* back to get the next msg */
7642 /* sender just finished this but
7643 * still holds a reference
7651 /* is there some to get */
7652 if (sp->length == 0) {
7658 } else if (sp->discard_rest) {
7659 if (send_lock_up == 0) {
7660 SCTP_TCB_SEND_LOCK(stcb);
7663 /* Whack down the size */
7664 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7665 if ((stcb->sctp_socket != NULL) && \
7666 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7667 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7668 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7671 sctp_m_freem(sp->data);
7673 sp->tail_mbuf = NULL;
7683 some_taken = sp->some_taken;
7684 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7685 sp->msg_is_complete = 1;
7688 length = sp->length;
7689 if (sp->msg_is_complete) {
7690 /* The message is complete */
7691 to_move = min(length, frag_point);
7692 if (to_move == length) {
7693 /* All of it fits in the MTU */
7694 if (sp->some_taken) {
7695 rcv_flags |= SCTP_DATA_LAST_FRAG;
7696 sp->put_last_out = 1;
7698 rcv_flags |= SCTP_DATA_NOT_FRAG;
7699 sp->put_last_out = 1;
7702 /* Not all of it fits, we fragment */
7703 if (sp->some_taken == 0) {
7704 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7709 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7712 * We use a snapshot of length in case it
7713 * is expanding during the compare.
7718 if (to_move >= llen) {
7720 if (send_lock_up == 0) {
7722 * We are taking all of an incomplete msg
7723 * thus we need a send lock.
7725 SCTP_TCB_SEND_LOCK(stcb);
7727 if (sp->msg_is_complete) {
7728 /* the sender finished the msg */
7733 if (sp->some_taken == 0) {
7734 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7738 /* Nothing to take. */
7739 if (sp->some_taken) {
7748 /* If we reach here, we can copy out a chunk */
7749 sctp_alloc_a_chunk(stcb, chk);
7751 /* No chunk memory */
7756 /* Setup for unordered if needed by looking
7757 * at the user sent info flags.
7759 if (sp->sinfo_flags & SCTP_UNORDERED) {
7760 rcv_flags |= SCTP_DATA_UNORDERED;
7762 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7763 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7764 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7766 /* clear out the chunk before setting up */
7767 memset(chk, 0, sizeof(*chk));
7768 chk->rec.data.rcv_flags = rcv_flags;
7770 if (to_move >= length) {
7771 /* we think we can steal the whole thing */
7772 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7773 SCTP_TCB_SEND_LOCK(stcb);
7776 if (to_move < sp->length) {
7777 /* bail, it changed */
7780 chk->data = sp->data;
7781 chk->last_mbuf = sp->tail_mbuf;
7782 /* register the stealing */
7783 sp->data = sp->tail_mbuf = NULL;
7787 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7788 chk->last_mbuf = NULL;
7789 if (chk->data == NULL) {
7790 sp->some_taken = some_taken;
7791 sctp_free_a_chunk(stcb, chk, so_locked);
7796 #ifdef SCTP_MBUF_LOGGING
7797 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7800 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
7801 if (SCTP_BUF_IS_EXTENDED(mat)) {
7802 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7807 /* Pull off the data */
7808 m_adj(sp->data, to_move);
7809 /* Now lets work our way down and compact it */
7811 while (m && (SCTP_BUF_LEN(m) == 0)) {
7812 sp->data = SCTP_BUF_NEXT(m);
7813 SCTP_BUF_NEXT(m) = NULL;
7814 if (sp->tail_mbuf == m) {
7816 * Freeing tail? TSNH since
7817 * we supposedly were taking less
7818 * than the sp->length.
7821 panic("Huh, freing tail? - TSNH");
7823 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7824 sp->tail_mbuf = sp->data = NULL;
7833 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7834 chk->copy_by_ref = 1;
7836 chk->copy_by_ref = 0;
7838 /* get last_mbuf and counts of mb useage
7839 * This is ugly but hopefully its only one mbuf.
7841 if (chk->last_mbuf == NULL) {
7842 chk->last_mbuf = chk->data;
7843 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7844 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7848 if (to_move > length) {
7849 /*- This should not happen either
7850 * since we always lower to_move to the size
7851 * of sp->length if its larger.
7854 panic("Huh, how can to_move be larger?");
7856 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7860 atomic_subtract_int(&sp->length, to_move);
7862 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7863 /* Not enough room for a chunk header, get some */
7865 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7868 * we're in trouble here. _PREPEND below will free
7869 * all the data if there is no leading space, so we
7870 * must put the data back and restore.
7872 if (send_lock_up == 0) {
7873 SCTP_TCB_SEND_LOCK(stcb);
7876 if (chk->data == NULL) {
7877 /* unsteal the data */
7878 sp->data = chk->data;
7879 sp->tail_mbuf = chk->last_mbuf;
7882 /* reassemble the data */
7884 sp->data = chk->data;
7885 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7887 sp->some_taken = some_taken;
7888 atomic_add_int(&sp->length, to_move);
7891 sctp_free_a_chunk(stcb, chk, so_locked);
7895 SCTP_BUF_LEN(m) = 0;
7896 SCTP_BUF_NEXT(m) = chk->data;
7898 M_ALIGN(chk->data, 4);
7901 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
7902 if (chk->data == NULL) {
7903 /* HELP, TSNH since we assured it would not above? */
7905 panic("prepend failes HELP?");
7907 SCTP_PRINTF("prepend fails HELP?\n");
7908 sctp_free_a_chunk(stcb, chk, so_locked);
7914 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7915 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7916 chk->book_size_scale = 0;
7917 chk->sent = SCTP_DATAGRAM_UNSENT;
7920 chk->asoc = &stcb->asoc;
7921 chk->pad_inplace = 0;
7922 chk->no_fr_allowed = 0;
7923 chk->rec.data.stream_seq = strq->next_sequence_send;
7924 if ((rcv_flags & SCTP_DATA_LAST_FRAG) &&
7925 !(rcv_flags & SCTP_DATA_UNORDERED)) {
7926 strq->next_sequence_send++;
7928 chk->rec.data.stream_number = sp->stream;
7929 chk->rec.data.payloadtype = sp->ppid;
7930 chk->rec.data.context = sp->context;
7931 chk->rec.data.doing_fast_retransmit = 0;
7933 chk->rec.data.timetodrop = sp->ts;
7934 chk->flags = sp->act_flags;
7937 chk->whoTo = sp->net;
7938 atomic_add_int(&chk->whoTo->ref_count, 1);
7942 if (sp->holds_key_ref) {
7943 chk->auth_keyid = sp->auth_keyid;
7944 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7945 chk->holds_key_ref = 1;
7948 #if defined(__FreeBSD__) || defined(__Panda__)
7949 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7951 chk->rec.data.TSN_seq = asoc->sending_seq++;
7953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7954 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7955 (uintptr_t)stcb, sp->length,
7956 (uint32_t)((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7957 chk->rec.data.TSN_seq);
7959 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7961 * Put the rest of the things in place now. Size was done
7962 * earlier in previous loop prior to padding.
7965 #ifdef SCTP_ASOCLOG_OF_TSNS
7966 SCTP_TCB_LOCK_ASSERT(stcb);
7967 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7968 asoc->tsn_out_at = 0;
7969 asoc->tsn_out_wrapped = 1;
7971 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7972 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7973 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7974 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7975 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7976 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7977 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7978 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7982 dchkh->ch.chunk_type = SCTP_DATA;
7983 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7984 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7985 dchkh->dp.stream_id = htons(strq->stream_no);
7986 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7987 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7988 dchkh->ch.chunk_length = htons(chk->send_size);
7989 /* Now advance the chk->send_size by the actual pad needed. */
7990 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7995 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7996 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7997 chk->pad_inplace = 1;
7999 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
8000 /* pad added an mbuf */
8001 chk->last_mbuf = lm;
8003 chk->send_size += pads;
8005 if (PR_SCTP_ENABLED(chk->flags)) {
8006 asoc->pr_sctp_cnt++;
8008 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
8009 /* All done pull and kill the message */
8010 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
8011 if (sp->put_last_out == 0) {
8012 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
8013 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
8014 sp->sender_all_done,
8016 sp->msg_is_complete,
8020 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
8021 SCTP_TCB_SEND_LOCK(stcb);
8024 TAILQ_REMOVE(&strq->outqueue, sp, next);
8025 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
8027 sctp_free_remote_addr(sp->net);
8031 sctp_m_freem(sp->data);
8034 sctp_free_a_strmoq(stcb, sp, so_locked);
8036 /* we can't be locked to it */
8038 stcb->asoc.locked_on_sending = NULL;
8040 /* more to go, we are locked */
8043 asoc->chunks_on_out_queue++;
8044 strq->chunks_on_queues++;
8045 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
8046 asoc->send_queue_cnt++;
8049 SCTP_TCB_SEND_UNLOCK(stcb);
8056 sctp_fill_outqueue(struct sctp_tcb *stcb,
8057 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
8058 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8063 struct sctp_association *asoc;
8064 struct sctp_stream_out *strq;
8065 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
8068 SCTP_TCB_LOCK_ASSERT(stcb);
8070 switch (net->ro._l_addr.sa.sa_family) {
8073 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8078 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
8081 #if defined(__Userspace__)
8083 goal_mtu = net->mtu - sizeof(struct sctphdr);
8088 goal_mtu = net->mtu;
8091 /* Need an allowance for the data chunk header too */
8092 goal_mtu -= sizeof(struct sctp_data_chunk);
8094 /* must make even word boundary */
8095 goal_mtu &= 0xfffffffc;
8096 if (asoc->locked_on_sending) {
8097 /* We are stuck on one stream until the message completes. */
8098 strq = asoc->locked_on_sending;
8101 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8104 while ((goal_mtu > 0) && strq) {
8107 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
8108 &giveup, eeor_mode, &bail, so_locked);
8110 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
8113 asoc->locked_on_sending = strq;
8114 if ((moved_how_much == 0) || (giveup) || bail)
8115 /* no more to move for now */
8118 asoc->locked_on_sending = NULL;
8119 if ((giveup) || bail) {
8122 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8127 total_moved += moved_how_much;
8128 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
8129 goal_mtu &= 0xfffffffc;
8134 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
8136 if (total_moved == 0) {
8137 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
8138 (net == stcb->asoc.primary_destination)) {
8139 /* ran dry for primary network net */
8140 SCTP_STAT_INCR(sctps_primary_randry);
8141 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
8142 /* ran dry with CMT on */
8143 SCTP_STAT_INCR(sctps_cmt_randry);
8149 sctp_fix_ecn_echo(struct sctp_association *asoc)
8151 struct sctp_tmit_chunk *chk;
8153 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8154 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8155 chk->sent = SCTP_DATAGRAM_UNSENT;
8161 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
8163 struct sctp_association *asoc;
8164 struct sctp_tmit_chunk *chk;
8165 struct sctp_stream_queue_pending *sp;
8172 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
8173 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
8174 if (sp->net == net) {
8175 sctp_free_remote_addr(sp->net);
8180 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8181 if (chk->whoTo == net) {
8182 sctp_free_remote_addr(chk->whoTo);
8189 sctp_med_chunk_output(struct sctp_inpcb *inp,
8190 struct sctp_tcb *stcb,
8191 struct sctp_association *asoc,
8194 int control_only, int from_where,
8195 struct timeval *now, int *now_filled, int frag_point, int so_locked
8196 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8202 * Ok this is the generic chunk service queue. we must do the
8203 * following: - Service the stream queue that is next, moving any
8204 * message (note I must get a complete message i.e. FIRST/MIDDLE and
8205 * LAST to the out queue in one pass) and assigning TSN's - Check to
8206 * see if the cwnd/rwnd allows any output, if so we go ahead and
8207 * fomulate and send the low level chunks. Making sure to combine
8208 * any control in the control chunk queue also.
8210 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8211 struct mbuf *outchain, *endoutchain;
8212 struct sctp_tmit_chunk *chk, *nchk;
8214 /* temp arrays for unlinking */
8215 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8216 int no_fragmentflg, error;
8217 unsigned int max_rwnd_per_dest, max_send_per_dest;
8218 int one_chunk, hbflag, skip_data_for_this_net;
8219 int asconf, cookie, no_out_cnt;
8220 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8221 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8223 uint32_t auth_offset = 0;
8224 struct sctp_auth_chunk *auth = NULL;
8225 uint16_t auth_keyid;
8226 int override_ok = 1;
8227 int skip_fill_up = 0;
8228 int data_auth_reqd = 0;
8229 /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8233 #if defined(__APPLE__)
8235 sctp_lock_assert(SCTP_INP_SO(inp));
8237 sctp_unlock_assert(SCTP_INP_SO(inp));
8241 auth_keyid = stcb->asoc.authinfo.active_keyid;
8243 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8244 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
8245 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8250 ctl_cnt = no_out_cnt = asconf = cookie = 0;
8252 * First lets prime the pump. For each destination, if there is room
8253 * in the flight size, attempt to pull an MTU's worth out of the
8254 * stream queues into the general send_queue
8256 #ifdef SCTP_AUDITING_ENABLED
8257 sctp_audit_log(0xC2, 2);
8259 SCTP_TCB_LOCK_ASSERT(stcb);
8261 if ((control_only) || (asoc->stream_reset_outstanding))
8266 /* Nothing to possible to send? */
8267 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8268 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8269 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8270 TAILQ_EMPTY(&asoc->send_queue) &&
8271 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
8276 if (asoc->peers_rwnd == 0) {
8277 /* No room in peers rwnd */
8279 if (asoc->total_flight > 0) {
8280 /* we are allowed one chunk in flight */
8284 if (stcb->asoc.ecn_echo_cnt_onq) {
8285 /* Record where a sack goes, if any */
8286 if (no_data_chunks &&
8287 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8288 /* Nothing but ECNe to send - we don't do that */
8289 goto nothing_to_send;
8291 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8292 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8293 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8294 sack_goes_to = chk->whoTo;
8299 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8300 if (stcb->sctp_socket)
8301 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8303 max_send_per_dest = 0;
8304 if (no_data_chunks == 0) {
8305 /* How many non-directed chunks are there? */
8306 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8307 if (chk->whoTo == NULL) {
8308 /* We already have non-directed
8309 * chunks on the queue, no need
8318 if ((no_data_chunks == 0) &&
8319 (skip_fill_up == 0) &&
8320 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8321 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8323 * This for loop we are in takes in
8324 * each net, if its's got space in cwnd and
8325 * has data sent to it (when CMT is off) then it
8326 * calls sctp_fill_outqueue for the net. This gets
8327 * data on the send queue for that network.
8329 * In sctp_fill_outqueue TSN's are assigned and
8330 * data is copied out of the stream buffers. Note
8331 * mostly copy by reference (we hope).
8333 net->window_probe = 0;
8334 if ((net != stcb->asoc.alternate) &&
8335 ((net->dest_state & SCTP_ADDR_PF) ||
8336 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8337 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8339 sctp_log_cwnd(stcb, net, 1,
8340 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8344 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8345 (net->flight_size == 0)) {
8346 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8348 if (net->flight_size >= net->cwnd) {
8349 /* skip this network, no room - can't fill */
8350 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8351 sctp_log_cwnd(stcb, net, 3,
8352 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8357 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8359 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8361 /* memory alloc failure */
8367 /* now service each destination and send out what we can for it */
8368 /* Nothing to send? */
8369 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8370 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8371 TAILQ_EMPTY(&asoc->send_queue)) {
8376 if (asoc->sctp_cmt_on_off > 0) {
8377 /* get the last start point */
8378 start_at = asoc->last_net_cmt_send_started;
8379 if (start_at == NULL) {
8380 /* null so to beginning */
8381 start_at = TAILQ_FIRST(&asoc->nets);
8383 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8384 if (start_at == NULL) {
8385 start_at = TAILQ_FIRST(&asoc->nets);
8388 asoc->last_net_cmt_send_started = start_at;
8390 start_at = TAILQ_FIRST(&asoc->nets);
8392 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8393 if (chk->whoTo == NULL) {
8394 if (asoc->alternate) {
8395 chk->whoTo = asoc->alternate;
8397 chk->whoTo = asoc->primary_destination;
8399 atomic_add_int(&chk->whoTo->ref_count, 1);
8402 old_start_at = NULL;
8403 again_one_more_time:
8404 for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8405 /* how much can we send? */
8406 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8407 if (old_start_at && (old_start_at == net)) {
8408 /* through list ocmpletely. */
8412 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8413 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8414 (net->flight_size >= net->cwnd)) {
8415 /* Nothing on control or asconf and flight is full, we can skip
8416 * even in the CMT case.
8421 endoutchain = outchain = NULL;
8424 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8425 skip_data_for_this_net = 1;
8427 skip_data_for_this_net = 0;
8429 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) || defined(__APPLE__))
8430 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
8432 * if we have a route and an ifp check to see if we
8433 * have room to send to this guy
8437 ifp = net->ro.ro_rt->rt_ifp;
8438 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
8439 SCTP_STAT_INCR(sctps_ifnomemqueued);
8440 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
8441 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
8447 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8450 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8455 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8458 #if defined(__Userspace__)
8460 mtu = net->mtu - sizeof(struct sctphdr);
8470 if (mtu > asoc->peers_rwnd) {
8471 if (asoc->total_flight > 0) {
8472 /* We have a packet in flight somewhere */
8473 r_mtu = asoc->peers_rwnd;
8475 /* We are always allowed to send one MTU out */
8482 /************************/
8483 /* ASCONF transmission */
8484 /************************/
8485 /* Now first lets go through the asconf queue */
8486 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8487 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8490 if (chk->whoTo == NULL) {
8491 if (asoc->alternate == NULL) {
8492 if (asoc->primary_destination != net) {
8496 if (asoc->alternate != net) {
8501 if (chk->whoTo != net) {
8505 if (chk->data == NULL) {
8508 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8509 chk->sent != SCTP_DATAGRAM_RESEND) {
8513 * if no AUTH is yet included and this chunk
8514 * requires it, make sure to account for it. We
8515 * don't apply the size until the AUTH chunk is
8516 * actually added below in case there is no room for
8517 * this chunk. NOTE: we overload the use of "omtu"
8520 if ((auth == NULL) &&
8521 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8522 stcb->asoc.peer_auth_chunks)) {
8523 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8526 /* Here we do NOT factor the r_mtu */
8527 if ((chk->send_size < (int)(mtu - omtu)) ||
8528 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8530 * We probably should glom the mbuf chain
8531 * from the chk->data for control but the
8532 * problem is it becomes yet one more level
8533 * of tracking to do if for some reason
8534 * output fails. Then I have got to
8535 * reconstruct the merged control chain.. el
8536 * yucko.. for now we take the easy way and
8540 * Add an AUTH chunk, if chunk requires it
8541 * save the offset into the chain for AUTH
8543 if ((auth == NULL) &&
8544 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8545 stcb->asoc.peer_auth_chunks))) {
8546 outchain = sctp_add_auth_chunk(outchain,
8551 chk->rec.chunk_id.id);
8552 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8554 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8555 (int)chk->rec.chunk_id.can_take_data,
8556 chk->send_size, chk->copy_by_ref);
8557 if (outchain == NULL) {
8559 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8562 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8563 /* update our MTU size */
8564 if (mtu > (chk->send_size + omtu))
8565 mtu -= (chk->send_size + omtu);
8568 to_out += (chk->send_size + omtu);
8569 /* Do clear IP_DF ? */
8570 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8573 if (chk->rec.chunk_id.can_take_data)
8576 * set hb flag since we can
8582 * should sysctl this: don't
8583 * bundle data with ASCONF
8584 * since it requires AUTH
8587 chk->sent = SCTP_DATAGRAM_SENT;
8588 if (chk->whoTo == NULL) {
8590 atomic_add_int(&net->ref_count, 1);
8595 * Ok we are out of room but we can
8596 * output without effecting the
8597 * flight size since this little guy
8598 * is a control only packet.
8600 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8602 * do NOT clear the asconf
8603 * flag as it is used to do
8604 * appropriate source address
8607 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8608 (struct sockaddr *)&net->ro._l_addr,
8609 outchain, auth_offset, auth,
8610 stcb->asoc.authinfo.active_keyid,
8611 no_fragmentflg, 0, asconf,
8612 inp->sctp_lport, stcb->rport,
8613 htonl(stcb->asoc.peer_vtag),
8615 #if defined(__FreeBSD__)
8619 if (error == ENOBUFS) {
8620 asoc->ifp_had_enobuf = 1;
8621 SCTP_STAT_INCR(sctps_lowlevelerr);
8623 if (from_where == 0) {
8624 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8626 if (*now_filled == 0) {
8627 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8629 *now = net->last_sent_time;
8631 net->last_sent_time = *now;
8634 /* error, could not output */
8635 if (error == EHOSTUNREACH) {
8641 sctp_move_chunks_from_net(stcb, net);
8646 asoc->ifp_had_enobuf = 0;
8647 if (*now_filled == 0) {
8648 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8650 *now = net->last_sent_time;
8652 net->last_sent_time = *now;
8656 * increase the number we sent, if a
8657 * cookie is sent we don't tell them
8660 outchain = endoutchain = NULL;
8664 *num_out += ctl_cnt;
8665 /* recalc a clean slate and setup */
8666 switch (net->ro._l_addr.sa.sa_family) {
8669 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8674 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8677 #if defined(__Userspace__)
8679 mtu = net->mtu - sizeof(struct sctphdr);
8692 /************************/
8693 /* Control transmission */
8694 /************************/
8695 /* Now first lets go through the control queue */
8696 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8697 if ((sack_goes_to) &&
8698 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8699 (chk->whoTo != sack_goes_to)) {
8701 * if we have a sack in queue, and we are looking at an
8702 * ecn echo that is NOT queued to where the sack is going..
8704 if (chk->whoTo == net) {
8705 /* Don't transmit it to where its going (current net) */
8707 } else if (sack_goes_to == net) {
8708 /* But do transmit it to this address */
8709 goto skip_net_check;
8712 if (chk->whoTo == NULL) {
8713 if (asoc->alternate == NULL) {
8714 if (asoc->primary_destination != net) {
8718 if (asoc->alternate != net) {
8723 if (chk->whoTo != net) {
8728 if (chk->data == NULL) {
8731 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8733 * It must be unsent. Cookies and ASCONF's
8734 * hang around but there timers will force
8735 * when marked for resend.
8740 * if no AUTH is yet included and this chunk
8741 * requires it, make sure to account for it. We
8742 * don't apply the size until the AUTH chunk is
8743 * actually added below in case there is no room for
8744 * this chunk. NOTE: we overload the use of "omtu"
8747 if ((auth == NULL) &&
8748 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8749 stcb->asoc.peer_auth_chunks)) {
8750 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8753 /* Here we do NOT factor the r_mtu */
8754 if ((chk->send_size <= (int)(mtu - omtu)) ||
8755 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8757 * We probably should glom the mbuf chain
8758 * from the chk->data for control but the
8759 * problem is it becomes yet one more level
8760 * of tracking to do if for some reason
8761 * output fails. Then I have got to
8762 * reconstruct the merged control chain.. el
8763 * yucko.. for now we take the easy way and
8767 * Add an AUTH chunk, if chunk requires it
8768 * save the offset into the chain for AUTH
8770 if ((auth == NULL) &&
8771 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8772 stcb->asoc.peer_auth_chunks))) {
8773 outchain = sctp_add_auth_chunk(outchain,
8778 chk->rec.chunk_id.id);
8779 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8781 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8782 (int)chk->rec.chunk_id.can_take_data,
8783 chk->send_size, chk->copy_by_ref);
8784 if (outchain == NULL) {
8786 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8789 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8790 /* update our MTU size */
8791 if (mtu > (chk->send_size + omtu))
8792 mtu -= (chk->send_size + omtu);
8795 to_out += (chk->send_size + omtu);
8796 /* Do clear IP_DF ? */
8797 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8800 if (chk->rec.chunk_id.can_take_data)
8802 /* Mark things to be removed, if needed */
8803 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8804 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8805 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8806 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8807 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8808 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8809 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8810 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8811 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8812 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8813 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8814 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8817 /* remove these chunks at the end */
8818 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8819 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8820 /* turn off the timer */
8821 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8822 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8823 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_1);
8829 * Other chunks, since they have
8830 * timers running (i.e. COOKIE)
8831 * we just "trust" that it
8832 * gets sent or retransmitted.
8835 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8838 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8840 * Increment ecne send count here
8841 * this means we may be over-zealous in
8842 * our counting if the send fails, but its
8843 * the best place to do it (we used to do
8844 * it in the queue of the chunk, but that did
8845 * not tell how many times it was sent.
8847 SCTP_STAT_INCR(sctps_sendecne);
8849 chk->sent = SCTP_DATAGRAM_SENT;
8850 if (chk->whoTo == NULL) {
8852 atomic_add_int(&net->ref_count, 1);
8858 * Ok we are out of room but we can
8859 * output without effecting the
8860 * flight size since this little guy
8861 * is a control only packet.
8864 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8866 * do NOT clear the asconf
8867 * flag as it is used to do
8868 * appropriate source address
8873 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8876 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8877 (struct sockaddr *)&net->ro._l_addr,
8880 stcb->asoc.authinfo.active_keyid,
8881 no_fragmentflg, 0, asconf,
8882 inp->sctp_lport, stcb->rport,
8883 htonl(stcb->asoc.peer_vtag),
8885 #if defined(__FreeBSD__)
8889 if (error == ENOBUFS) {
8890 asoc->ifp_had_enobuf = 1;
8891 SCTP_STAT_INCR(sctps_lowlevelerr);
8893 if (from_where == 0) {
8894 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8896 /* error, could not output */
8898 if (*now_filled == 0) {
8899 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8901 *now = net->last_sent_time;
8903 net->last_sent_time = *now;
8907 if (error == EHOSTUNREACH) {
8913 sctp_move_chunks_from_net(stcb, net);
8918 asoc->ifp_had_enobuf = 0;
8919 /* Only HB or ASCONF advances time */
8921 if (*now_filled == 0) {
8922 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8924 *now = net->last_sent_time;
8926 net->last_sent_time = *now;
8931 * increase the number we sent, if a
8932 * cookie is sent we don't tell them
8935 outchain = endoutchain = NULL;
8939 *num_out += ctl_cnt;
8940 /* recalc a clean slate and setup */
8941 switch (net->ro._l_addr.sa.sa_family) {
8944 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8949 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8952 #if defined(__Userspace__)
8954 mtu = net->mtu - sizeof(struct sctphdr);
8967 /* JRI: if dest is in PF state, do not send data to it */
8968 if ((asoc->sctp_cmt_on_off > 0) &&
8969 (net != stcb->asoc.alternate) &&
8970 (net->dest_state & SCTP_ADDR_PF)) {
8973 if (net->flight_size >= net->cwnd) {
8976 if ((asoc->sctp_cmt_on_off > 0) &&
8977 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8978 (net->flight_size > max_rwnd_per_dest)) {
8982 * We need a specific accounting for the usage of the
8983 * send buffer. We also need to check the number of messages
8984 * per net. For now, this is better than nothing and it
8985 * disabled by default...
8987 if ((asoc->sctp_cmt_on_off > 0) &&
8988 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8989 (max_send_per_dest > 0) &&
8990 (net->flight_size > max_send_per_dest)) {
8993 /*********************/
8994 /* Data transmission */
8995 /*********************/
8997 * if AUTH for DATA is required and no AUTH has been added
8998 * yet, account for this in the mtu now... if no data can be
8999 * bundled, this adjustment won't matter anyways since the
9000 * packet will be going out...
9002 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
9003 stcb->asoc.peer_auth_chunks);
9004 if (data_auth_reqd && (auth == NULL)) {
9005 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9007 /* now lets add any data within the MTU constraints */
9008 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
9011 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
9012 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
9019 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
9020 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
9025 #if defined(__Userspace__)
9027 if (net->mtu > sizeof(struct sctphdr)) {
9028 omtu = net->mtu - sizeof(struct sctphdr);
9039 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
9040 (skip_data_for_this_net == 0)) ||
9042 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
9043 if (no_data_chunks) {
9044 /* let only control go out */
9048 if (net->flight_size >= net->cwnd) {
9049 /* skip this net, no room for data */
9053 if ((chk->whoTo != NULL) &&
9054 (chk->whoTo != net)) {
9055 /* Don't send the chunk on this net */
9059 if (asoc->sctp_cmt_on_off == 0) {
9060 if ((asoc->alternate) &&
9061 (asoc->alternate != net) &&
9062 (chk->whoTo == NULL)) {
9064 } else if ((net != asoc->primary_destination) &&
9065 (asoc->alternate == NULL) &&
9066 (chk->whoTo == NULL)) {
9070 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
9072 * strange, we have a chunk that is
9073 * to big for its destination and
9074 * yet no fragment ok flag.
9075 * Something went wrong when the
9076 * PMTU changed...we did not mark
9077 * this chunk for some reason?? I
9078 * will fix it here by letting IP
9079 * fragment it for now and printing
9080 * a warning. This really should not
9083 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
9084 chk->send_size, mtu);
9085 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
9087 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
9088 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
9089 struct sctp_data_chunk *dchkh;
9091 dchkh = mtod(chk->data, struct sctp_data_chunk *);
9092 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
9094 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
9095 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
9096 /* ok we will add this one */
9099 * Add an AUTH chunk, if chunk
9100 * requires it, save the offset into
9101 * the chain for AUTH
9103 if (data_auth_reqd) {
9105 outchain = sctp_add_auth_chunk(outchain,
9111 auth_keyid = chk->auth_keyid;
9113 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9114 } else if (override_ok) {
9115 /* use this data's keyid */
9116 auth_keyid = chk->auth_keyid;
9118 } else if (auth_keyid != chk->auth_keyid) {
9119 /* different keyid, so done bundling */
9123 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
9124 chk->send_size, chk->copy_by_ref);
9125 if (outchain == NULL) {
9126 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
9127 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9128 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9131 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9134 /* upate our MTU size */
9135 /* Do clear IP_DF ? */
9136 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9139 /* unsigned subtraction of mtu */
9140 if (mtu > chk->send_size)
9141 mtu -= chk->send_size;
9144 /* unsigned subtraction of r_mtu */
9145 if (r_mtu > chk->send_size)
9146 r_mtu -= chk->send_size;
9150 to_out += chk->send_size;
9151 if ((to_out > mx_mtu) && no_fragmentflg) {
9153 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
9155 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
9159 chk->window_probe = 0;
9160 data_list[bundle_at++] = chk;
9161 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9164 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
9165 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
9166 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
9168 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
9170 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
9171 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
9172 /* Count number of user msg's that were fragmented
9173 * we do this by counting when we see a LAST fragment
9176 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
9178 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
9179 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
9180 data_list[0]->window_probe = 1;
9181 net->window_probe = 1;
9187 * Must be sent in order of the
9188 * TSN's (on a network)
9192 } /* for (chunk gather loop for this net) */
9193 } /* if asoc.state OPEN */
9195 /* Is there something to send for this destination? */
9197 /* We may need to start a control timer or two */
9199 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9202 * do NOT clear the asconf flag as it is used
9203 * to do appropriate source address selection.
9207 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9210 /* must start a send timer if data is being sent */
9211 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9213 * no timer running on this destination
9216 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9218 /* Now send it, if there is anything to send :> */
9219 if ((error = sctp_lowlevel_chunk_output(inp,
9222 (struct sockaddr *)&net->ro._l_addr,
9230 inp->sctp_lport, stcb->rport,
9231 htonl(stcb->asoc.peer_vtag),
9233 #if defined(__FreeBSD__)
9237 /* error, we could not output */
9238 if (error == ENOBUFS) {
9239 SCTP_STAT_INCR(sctps_lowlevelerr);
9240 asoc->ifp_had_enobuf = 1;
9242 if (from_where == 0) {
9243 SCTP_STAT_INCR(sctps_lowlevelerrusr);
9245 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9247 if (*now_filled == 0) {
9248 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
9250 *now = net->last_sent_time;
9252 net->last_sent_time = *now;
9256 if (error == EHOSTUNREACH) {
9258 * Destination went unreachable
9261 sctp_move_chunks_from_net(stcb, net);
9265 * I add this line to be paranoid. As far as
9266 * I can tell the continue, takes us back to
9267 * the top of the for, but just to make sure
9268 * I will reset these again here.
9270 ctl_cnt = bundle_at = 0;
9271 continue; /* This takes us back to the for() for the nets. */
9273 asoc->ifp_had_enobuf = 0;
9278 if (bundle_at || hbflag) {
9279 /* For data/asconf and hb set time */
9280 if (*now_filled == 0) {
9281 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
9283 *now = net->last_sent_time;
9285 net->last_sent_time = *now;
9289 *num_out += (ctl_cnt + bundle_at);
9292 /* setup for a RTO measurement */
9293 tsns_sent = data_list[0]->rec.data.TSN_seq;
9294 /* fill time if not already filled */
9295 if (*now_filled == 0) {
9296 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9298 *now = asoc->time_last_sent;
9300 asoc->time_last_sent = *now;
9302 if (net->rto_needed) {
9303 data_list[0]->do_rtt = 1;
9304 net->rto_needed = 0;
9306 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9307 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9314 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9317 if (old_start_at == NULL) {
9318 old_start_at = start_at;
9319 start_at = TAILQ_FIRST(&asoc->nets);
9321 goto again_one_more_time;
9325 * At the end there should be no NON timed chunks hanging on this
9328 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9329 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9331 if ((*num_out == 0) && (*reason_code == 0)) {
9336 sctp_clean_up_ctl(stcb, asoc, so_locked);
9341 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9344 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9345 * the control chunk queue.
9347 struct sctp_chunkhdr *hdr;
9348 struct sctp_tmit_chunk *chk;
9351 SCTP_TCB_LOCK_ASSERT(stcb);
9352 sctp_alloc_a_chunk(stcb, chk);
9355 sctp_m_freem(op_err);
9358 chk->copy_by_ref = 0;
9359 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9360 if (op_err == NULL) {
9361 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
9366 while (mat != NULL) {
9367 chk->send_size += SCTP_BUF_LEN(mat);
9368 mat = SCTP_BUF_NEXT(mat);
9370 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9371 chk->rec.chunk_id.can_take_data = 1;
9372 chk->sent = SCTP_DATAGRAM_UNSENT;
9375 chk->asoc = &stcb->asoc;
9378 hdr = mtod(op_err, struct sctp_chunkhdr *);
9379 hdr->chunk_type = SCTP_OPERATION_ERROR;
9380 hdr->chunk_flags = 0;
9381 hdr->chunk_length = htons(chk->send_size);
9382 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
9385 chk->asoc->ctrl_queue_cnt++;
9389 sctp_send_cookie_echo(struct mbuf *m,
9391 struct sctp_tcb *stcb,
9392 struct sctp_nets *net)
9395 * pull out the cookie and put it at the front of the control chunk
9399 struct mbuf *cookie;
9400 struct sctp_paramhdr parm, *phdr;
9401 struct sctp_chunkhdr *hdr;
9402 struct sctp_tmit_chunk *chk;
9403 uint16_t ptype, plen;
9405 /* First find the cookie in the param area */
9407 at = offset + sizeof(struct sctp_init_chunk);
9409 SCTP_TCB_LOCK_ASSERT(stcb);
9411 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
9415 ptype = ntohs(phdr->param_type);
9416 plen = ntohs(phdr->param_length);
9417 if (ptype == SCTP_STATE_COOKIE) {
9420 /* found the cookie */
9421 if ((pad = (plen % 4))) {
9424 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9425 if (cookie == NULL) {
9429 #ifdef SCTP_MBUF_LOGGING
9430 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9433 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
9434 if (SCTP_BUF_IS_EXTENDED(mat)) {
9435 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9442 at += SCTP_SIZE32(plen);
9444 if (cookie == NULL) {
9445 /* Did not find the cookie */
9448 /* ok, we got the cookie lets change it into a cookie echo chunk */
9450 /* first the change from param to cookie */
9451 hdr = mtod(cookie, struct sctp_chunkhdr *);
9452 hdr->chunk_type = SCTP_COOKIE_ECHO;
9453 hdr->chunk_flags = 0;
9454 /* get the chunk stuff now and place it in the FRONT of the queue */
9455 sctp_alloc_a_chunk(stcb, chk);
9458 sctp_m_freem(cookie);
9461 chk->copy_by_ref = 0;
9462 chk->send_size = plen;
9463 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9464 chk->rec.chunk_id.can_take_data = 0;
9465 chk->sent = SCTP_DATAGRAM_UNSENT;
9467 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9468 chk->asoc = &stcb->asoc;
9471 atomic_add_int(&chk->whoTo->ref_count, 1);
9472 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9473 chk->asoc->ctrl_queue_cnt++;
9478 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9482 struct sctp_nets *net)
9485 * take a HB request and make it into a HB ack and send it.
9487 struct mbuf *outchain;
9488 struct sctp_chunkhdr *chdr;
9489 struct sctp_tmit_chunk *chk;
9493 /* must have a net pointer */
9496 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9497 if (outchain == NULL) {
9498 /* gak out of memory */
9501 #ifdef SCTP_MBUF_LOGGING
9502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9505 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
9506 if (SCTP_BUF_IS_EXTENDED(mat)) {
9507 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9512 chdr = mtod(outchain, struct sctp_chunkhdr *);
9513 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9514 chdr->chunk_flags = 0;
9515 if (chk_length % 4) {
9517 uint32_t cpthis = 0;
9520 padlen = 4 - (chk_length % 4);
9521 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9523 sctp_alloc_a_chunk(stcb, chk);
9526 sctp_m_freem(outchain);
9529 chk->copy_by_ref = 0;
9530 chk->send_size = chk_length;
9531 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9532 chk->rec.chunk_id.can_take_data = 1;
9533 chk->sent = SCTP_DATAGRAM_UNSENT;
9536 chk->asoc = &stcb->asoc;
9537 chk->data = outchain;
9539 atomic_add_int(&chk->whoTo->ref_count, 1);
9540 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9541 chk->asoc->ctrl_queue_cnt++;
9545 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9547 /* formulate and queue a cookie-ack back to sender */
9548 struct mbuf *cookie_ack;
9549 struct sctp_chunkhdr *hdr;
9550 struct sctp_tmit_chunk *chk;
9552 SCTP_TCB_LOCK_ASSERT(stcb);
9554 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9555 if (cookie_ack == NULL) {
9559 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9560 sctp_alloc_a_chunk(stcb, chk);
9563 sctp_m_freem(cookie_ack);
9566 chk->copy_by_ref = 0;
9567 chk->send_size = sizeof(struct sctp_chunkhdr);
9568 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9569 chk->rec.chunk_id.can_take_data = 1;
9570 chk->sent = SCTP_DATAGRAM_UNSENT;
9573 chk->asoc = &stcb->asoc;
9574 chk->data = cookie_ack;
9575 if (chk->asoc->last_control_chunk_from != NULL) {
9576 chk->whoTo = chk->asoc->last_control_chunk_from;
9577 atomic_add_int(&chk->whoTo->ref_count, 1);
9581 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9582 hdr->chunk_type = SCTP_COOKIE_ACK;
9583 hdr->chunk_flags = 0;
9584 hdr->chunk_length = htons(chk->send_size);
9585 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9586 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9587 chk->asoc->ctrl_queue_cnt++;
9593 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9595 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9596 struct mbuf *m_shutdown_ack;
9597 struct sctp_shutdown_ack_chunk *ack_cp;
9598 struct sctp_tmit_chunk *chk;
9600 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9601 if (m_shutdown_ack == NULL) {
9605 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9606 sctp_alloc_a_chunk(stcb, chk);
9609 sctp_m_freem(m_shutdown_ack);
9612 chk->copy_by_ref = 0;
9613 chk->send_size = sizeof(struct sctp_chunkhdr);
9614 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9615 chk->rec.chunk_id.can_take_data = 1;
9616 chk->sent = SCTP_DATAGRAM_UNSENT;
9619 chk->asoc = &stcb->asoc;
9620 chk->data = m_shutdown_ack;
9623 atomic_add_int(&chk->whoTo->ref_count, 1);
9625 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9626 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9627 ack_cp->ch.chunk_flags = 0;
9628 ack_cp->ch.chunk_length = htons(chk->send_size);
9629 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9630 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9631 chk->asoc->ctrl_queue_cnt++;
9636 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9638 /* formulate and queue a SHUTDOWN to the sender */
9639 struct mbuf *m_shutdown;
9640 struct sctp_shutdown_chunk *shutdown_cp;
9641 struct sctp_tmit_chunk *chk;
9643 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9644 if (m_shutdown == NULL) {
9648 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9649 sctp_alloc_a_chunk(stcb, chk);
9652 sctp_m_freem(m_shutdown);
9655 chk->copy_by_ref = 0;
9656 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9657 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9658 chk->rec.chunk_id.can_take_data = 1;
9659 chk->sent = SCTP_DATAGRAM_UNSENT;
9662 chk->asoc = &stcb->asoc;
9663 chk->data = m_shutdown;
9666 atomic_add_int(&chk->whoTo->ref_count, 1);
9668 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9669 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9670 shutdown_cp->ch.chunk_flags = 0;
9671 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9672 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9673 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9674 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9675 chk->asoc->ctrl_queue_cnt++;
9680 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9683 * formulate and queue an ASCONF to the peer.
9684 * ASCONF parameters should be queued on the assoc queue.
9686 struct sctp_tmit_chunk *chk;
9687 struct mbuf *m_asconf;
9690 SCTP_TCB_LOCK_ASSERT(stcb);
9692 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9693 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9694 /* can't send a new one if there is one in flight already */
9698 /* compose an ASCONF chunk, maximum length is PMTU */
9699 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9700 if (m_asconf == NULL) {
9704 sctp_alloc_a_chunk(stcb, chk);
9707 sctp_m_freem(m_asconf);
9711 chk->copy_by_ref = 0;
9712 chk->data = m_asconf;
9713 chk->send_size = len;
9714 chk->rec.chunk_id.id = SCTP_ASCONF;
9715 chk->rec.chunk_id.can_take_data = 0;
9716 chk->sent = SCTP_DATAGRAM_UNSENT;
9718 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9719 chk->asoc = &stcb->asoc;
9722 atomic_add_int(&chk->whoTo->ref_count, 1);
9724 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9725 chk->asoc->ctrl_queue_cnt++;
9730 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9733 * formulate and queue a asconf-ack back to sender.
9734 * the asconf-ack must be stored in the tcb.
9736 struct sctp_tmit_chunk *chk;
9737 struct sctp_asconf_ack *ack, *latest_ack;
9739 struct sctp_nets *net = NULL;
9741 SCTP_TCB_LOCK_ASSERT(stcb);
9742 /* Get the latest ASCONF-ACK */
9743 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9744 if (latest_ack == NULL) {
9747 if (latest_ack->last_sent_to != NULL &&
9748 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9749 /* we're doing a retransmission */
9750 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9753 if (stcb->asoc.last_control_chunk_from == NULL) {
9754 if (stcb->asoc.alternate) {
9755 net = stcb->asoc.alternate;
9757 net = stcb->asoc.primary_destination;
9760 net = stcb->asoc.last_control_chunk_from;
9765 if (stcb->asoc.last_control_chunk_from == NULL) {
9766 if (stcb->asoc.alternate) {
9767 net = stcb->asoc.alternate;
9769 net = stcb->asoc.primary_destination;
9772 net = stcb->asoc.last_control_chunk_from;
9775 latest_ack->last_sent_to = net;
9777 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9778 if (ack->data == NULL) {
9782 /* copy the asconf_ack */
9783 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9784 if (m_ack == NULL) {
9785 /* couldn't copy it */
9788 #ifdef SCTP_MBUF_LOGGING
9789 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9792 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
9793 if (SCTP_BUF_IS_EXTENDED(mat)) {
9794 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9800 sctp_alloc_a_chunk(stcb, chk);
9804 sctp_m_freem(m_ack);
9807 chk->copy_by_ref = 0;
9811 atomic_add_int(&chk->whoTo->ref_count, 1);
9816 chk->send_size = ack->len;
9817 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9818 chk->rec.chunk_id.can_take_data = 1;
9819 chk->sent = SCTP_DATAGRAM_UNSENT;
9821 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9822 chk->asoc = &stcb->asoc;
9824 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9825 chk->asoc->ctrl_queue_cnt++;
9832 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9833 struct sctp_tcb *stcb,
9834 struct sctp_association *asoc,
9835 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9836 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9842 * send out one MTU of retransmission. If fast_retransmit is
9843 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9844 * rwnd. For a Cookie or Asconf in the control chunk queue we
9845 * retransmit them by themselves.
9847 * For data chunks we will pick out the lowest TSN's in the sent_queue
9848 * marked for resend and bundle them all together (up to a MTU of
9849 * destination). The address to send to should have been
9850 * selected/changed where the retransmission was marked (i.e. in FR
9851 * or t3-timeout routines).
9853 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9854 struct sctp_tmit_chunk *chk, *fwd;
9855 struct mbuf *m, *endofchain;
9856 struct sctp_nets *net = NULL;
9857 uint32_t tsns_sent = 0;
9858 int no_fragmentflg, bundle_at, cnt_thru;
9860 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9861 struct sctp_auth_chunk *auth = NULL;
9862 uint32_t auth_offset = 0;
9863 uint16_t auth_keyid;
9864 int override_ok = 1;
9865 int data_auth_reqd = 0;
9868 #if defined(__APPLE__)
9870 sctp_lock_assert(SCTP_INP_SO(inp));
9872 sctp_unlock_assert(SCTP_INP_SO(inp));
9875 SCTP_TCB_LOCK_ASSERT(stcb);
9876 tmr_started = ctl_cnt = bundle_at = error = 0;
9881 endofchain = m = NULL;
9882 auth_keyid = stcb->asoc.authinfo.active_keyid;
9883 #ifdef SCTP_AUDITING_ENABLED
9884 sctp_audit_log(0xC3, 1);
9886 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9887 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9888 SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
9889 asoc->sent_queue_retran_cnt);
9890 asoc->sent_queue_cnt = 0;
9891 asoc->sent_queue_cnt_removeable = 0;
9892 /* send back 0/0 so we enter normal transmission */
9896 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9897 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9898 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9899 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9900 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9903 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9904 if (chk != asoc->str_reset) {
9906 * not eligible for retran if its
9913 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9917 * Add an AUTH chunk, if chunk requires it save the
9918 * offset into the chain for AUTH
9920 if ((auth == NULL) &&
9921 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9922 stcb->asoc.peer_auth_chunks))) {
9923 m = sctp_add_auth_chunk(m, &endofchain,
9924 &auth, &auth_offset,
9926 chk->rec.chunk_id.id);
9927 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9929 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9935 /* do we have control chunks to retransmit? */
9937 /* Start a timer no matter if we suceed or fail */
9938 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9939 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9940 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9941 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9942 chk->snd_count++; /* update our count */
9943 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9944 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9945 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9946 no_fragmentflg, 0, 0,
9947 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9948 chk->whoTo->port, NULL,
9949 #if defined(__FreeBSD__)
9953 SCTP_STAT_INCR(sctps_lowlevelerr);
9960 * We don't want to mark the net->sent time here since this
9961 * we use this for HB and retrans cannot measure RTT
9963 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9965 chk->sent = SCTP_DATAGRAM_SENT;
9966 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9970 /* Clean up the fwd-tsn list */
9971 sctp_clean_up_ctl(stcb, asoc, so_locked);
9976 * Ok, it is just data retransmission we need to do or that and a
9977 * fwd-tsn with it all.
9979 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9980 return (SCTP_RETRAN_DONE);
9982 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9983 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9984 /* not yet open, resend the cookie and that is it */
9987 #ifdef SCTP_AUDITING_ENABLED
9988 sctp_auditing(20, inp, stcb, NULL);
9990 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9991 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9992 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9993 /* No, not sent to this net or not ready for rtx */
9996 if (chk->data == NULL) {
9997 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9998 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
10001 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
10002 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
10003 /* Gak, we have exceeded max unlucky retran, abort! */
10004 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
10006 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
10007 atomic_add_int(&stcb->asoc.refcnt, 1);
10008 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
10009 SCTP_TCB_LOCK(stcb);
10010 atomic_subtract_int(&stcb->asoc.refcnt, 1);
10011 return (SCTP_RETRAN_EXIT);
10013 /* pick up the net */
10015 switch (net->ro._l_addr.sa.sa_family) {
10018 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
10023 mtu = net->mtu - SCTP_MIN_OVERHEAD;
10026 #if defined(__Userspace__)
10028 mtu = net->mtu - sizeof(struct sctphdr);
10037 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
10038 /* No room in peers rwnd */
10041 tsn = asoc->last_acked_seq + 1;
10042 if (tsn == chk->rec.data.TSN_seq) {
10044 * we make a special exception for this
10045 * case. The peer has no rwnd but is missing
10046 * the lowest chunk.. which is probably what
10047 * is holding up the rwnd.
10049 goto one_chunk_around;
10054 if (asoc->peers_rwnd < mtu) {
10056 if ((asoc->peers_rwnd == 0) &&
10057 (asoc->total_flight == 0)) {
10058 chk->window_probe = 1;
10059 chk->whoTo->window_probe = 1;
10062 #ifdef SCTP_AUDITING_ENABLED
10063 sctp_audit_log(0xC3, 2);
10067 net->fast_retran_ip = 0;
10068 if (chk->rec.data.doing_fast_retransmit == 0) {
10070 * if no FR in progress skip destination that have
10071 * flight_size > cwnd.
10073 if (net->flight_size >= net->cwnd) {
10078 * Mark the destination net to have FR recovery
10079 * limits put on it.
10082 net->fast_retran_ip = 1;
10086 * if no AUTH is yet included and this chunk requires it,
10087 * make sure to account for it. We don't apply the size
10088 * until the AUTH chunk is actually added below in case
10089 * there is no room for this chunk.
10091 if (data_auth_reqd && (auth == NULL)) {
10092 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10096 if ((chk->send_size <= (mtu - dmtu)) ||
10097 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
10098 /* ok we will add this one */
10099 if (data_auth_reqd) {
10100 if (auth == NULL) {
10101 m = sctp_add_auth_chunk(m,
10107 auth_keyid = chk->auth_keyid;
10109 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10110 } else if (override_ok) {
10111 auth_keyid = chk->auth_keyid;
10113 } else if (chk->auth_keyid != auth_keyid) {
10114 /* different keyid, so done bundling */
10118 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10120 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10123 /* Do clear IP_DF ? */
10124 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10125 no_fragmentflg = 0;
10127 /* upate our MTU size */
10128 if (mtu > (chk->send_size + dmtu))
10129 mtu -= (chk->send_size + dmtu);
10132 data_list[bundle_at++] = chk;
10133 if (one_chunk && (asoc->total_flight <= 0)) {
10134 SCTP_STAT_INCR(sctps_windowprobed);
10137 if (one_chunk == 0) {
10139 * now are there anymore forward from chk to pick
10142 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
10143 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
10144 /* Nope, not for retran */
10147 if (fwd->whoTo != net) {
10148 /* Nope, not the net in question */
10151 if (data_auth_reqd && (auth == NULL)) {
10152 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10155 if (fwd->send_size <= (mtu - dmtu)) {
10156 if (data_auth_reqd) {
10157 if (auth == NULL) {
10158 m = sctp_add_auth_chunk(m,
10164 auth_keyid = fwd->auth_keyid;
10166 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10167 } else if (override_ok) {
10168 auth_keyid = fwd->auth_keyid;
10170 } else if (fwd->auth_keyid != auth_keyid) {
10171 /* different keyid, so done bundling */
10175 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
10177 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10180 /* Do clear IP_DF ? */
10181 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10182 no_fragmentflg = 0;
10184 /* upate our MTU size */
10185 if (mtu > (fwd->send_size + dmtu))
10186 mtu -= (fwd->send_size + dmtu);
10189 data_list[bundle_at++] = fwd;
10190 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10194 /* can't fit so we are done */
10199 /* Is there something to send for this destination? */
10202 * No matter if we fail/or suceed we should start a
10203 * timer. A failure is like a lost IP packet :-)
10205 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10207 * no timer running on this destination
10210 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10213 /* Now lets send it, if there is anything to send :> */
10214 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10215 (struct sockaddr *)&net->ro._l_addr, m,
10216 auth_offset, auth, auth_keyid,
10217 no_fragmentflg, 0, 0,
10218 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10220 #if defined(__FreeBSD__)
10224 /* error, we could not output */
10225 SCTP_STAT_INCR(sctps_lowlevelerr);
10233 * We don't want to mark the net->sent time here
10234 * since this we use this for HB and retrans cannot
10237 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10239 /* For auto-close */
10241 if (*now_filled == 0) {
10242 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10243 *now = asoc->time_last_sent;
10246 asoc->time_last_sent = *now;
10248 *cnt_out += bundle_at;
10249 #ifdef SCTP_AUDITING_ENABLED
10250 sctp_audit_log(0xC4, bundle_at);
10253 tsns_sent = data_list[0]->rec.data.TSN_seq;
10255 for (i = 0; i < bundle_at; i++) {
10256 SCTP_STAT_INCR(sctps_sendretransdata);
10257 data_list[i]->sent = SCTP_DATAGRAM_SENT;
10259 * When we have a revoked data, and we
10260 * retransmit it, then we clear the revoked
10261 * flag since this flag dictates if we
10262 * subtracted from the fs
10264 if (data_list[i]->rec.data.chunk_was_revoked) {
10265 /* Deflate the cwnd */
10266 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10267 data_list[i]->rec.data.chunk_was_revoked = 0;
10269 data_list[i]->snd_count++;
10270 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10271 /* record the time */
10272 data_list[i]->sent_rcv_time = asoc->time_last_sent;
10273 if (data_list[i]->book_size_scale) {
10275 * need to double the book size on
10278 data_list[i]->book_size_scale = 0;
10279 /* Since we double the booksize, we must
10280 * also double the output queue size, since this
10281 * get shrunk when we free by this amount.
10283 atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10284 data_list[i]->book_size *= 2;
10288 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10289 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10290 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10292 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10293 (uint32_t) (data_list[i]->send_size +
10294 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10296 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10297 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10298 data_list[i]->whoTo->flight_size,
10299 data_list[i]->book_size,
10300 (uintptr_t)data_list[i]->whoTo,
10301 data_list[i]->rec.data.TSN_seq);
10303 sctp_flight_size_increase(data_list[i]);
10304 sctp_total_flight_increase(stcb, data_list[i]);
10305 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10306 /* SWS sender side engages */
10307 asoc->peers_rwnd = 0;
10310 (data_list[i]->rec.data.doing_fast_retransmit)) {
10311 SCTP_STAT_INCR(sctps_sendfastretrans);
10312 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10313 (tmr_started == 0)) {
10315 * ok we just fast-retrans'd
10316 * the lowest TSN, i.e the
10317 * first on the list. In
10318 * this case we want to give
10319 * some more time to get a
10320 * SACK back without a
10323 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10324 SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_4);
10325 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10329 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10330 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10332 #ifdef SCTP_AUDITING_ENABLED
10333 sctp_auditing(21, inp, stcb, NULL);
10336 /* None will fit */
10339 if (asoc->sent_queue_retran_cnt <= 0) {
10340 /* all done we have no more to retran */
10341 asoc->sent_queue_retran_cnt = 0;
10345 /* No more room in rwnd */
10348 /* stop the for loop here. we sent out a packet */
10355 sctp_timer_validation(struct sctp_inpcb *inp,
10356 struct sctp_tcb *stcb,
10357 struct sctp_association *asoc)
10359 struct sctp_nets *net;
10361 /* Validate that a timer is running somewhere */
10362 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10363 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10364 /* Here is a timer */
10368 SCTP_TCB_LOCK_ASSERT(stcb);
10369 /* Gak, we did not have a timer somewhere */
10370 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10371 if (asoc->alternate) {
10372 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10374 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10380 sctp_chunk_output (struct sctp_inpcb *inp,
10381 struct sctp_tcb *stcb,
10384 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10390 * Ok this is the generic chunk service queue. we must do the
10392 * - See if there are retransmits pending, if so we must
10394 * - Service the stream queue that is next, moving any
10395 * message (note I must get a complete message i.e.
10396 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10398 * - Check to see if the cwnd/rwnd allows any output, if so we
10399 * go ahead and fomulate and send the low level chunks. Making sure
10400 * to combine any control in the control chunk queue also.
10402 struct sctp_association *asoc;
10403 struct sctp_nets *net;
10404 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
10405 unsigned int burst_cnt = 0;
10406 struct timeval now;
10407 int now_filled = 0;
10409 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10412 unsigned int tot_frs = 0;
10414 #if defined(__APPLE__)
10416 sctp_lock_assert(SCTP_INP_SO(inp));
10418 sctp_unlock_assert(SCTP_INP_SO(inp));
10421 asoc = &stcb->asoc;
10422 /* The Nagle algorithm is only applied when handling a send call. */
10423 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10424 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10432 SCTP_TCB_LOCK_ASSERT(stcb);
10434 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10436 if ((un_sent <= 0) &&
10437 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10438 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10439 (asoc->sent_queue_retran_cnt == 0)) {
10440 /* Nothing to do unless there is something to be sent left */
10443 /* Do we have something to send, data or control AND
10444 * a sack timer running, if so piggy-back the sack.
10446 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10447 sctp_send_sack(stcb, so_locked);
10448 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10450 while (asoc->sent_queue_retran_cnt) {
10452 * Ok, it is retransmission time only, we send out only ONE
10453 * packet with a single call off to the retran code.
10455 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10457 * Special hook for handling cookiess discarded
10458 * by peer that carried data. Send cookie-ack only
10459 * and then the next call with get the retran's.
10461 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10463 &now, &now_filled, frag_point, so_locked);
10465 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10466 /* if its not from a HB then do it */
10468 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10474 * its from any other place, we don't allow retran
10475 * output (only control)
10480 /* Can't send anymore */
10482 * now lets push out control by calling med-level
10483 * output once. this assures that we WILL send HB's
10486 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10488 &now, &now_filled, frag_point, so_locked);
10489 #ifdef SCTP_AUDITING_ENABLED
10490 sctp_auditing(8, inp, stcb, NULL);
10492 sctp_timer_validation(inp, stcb, asoc);
10497 * The count was off.. retran is not happening so do
10498 * the normal retransmission.
10500 #ifdef SCTP_AUDITING_ENABLED
10501 sctp_auditing(9, inp, stcb, NULL);
10503 if (ret == SCTP_RETRAN_EXIT) {
10508 if (from_where == SCTP_OUTPUT_FROM_T3) {
10509 /* Only one transmission allowed out of a timeout */
10510 #ifdef SCTP_AUDITING_ENABLED
10511 sctp_auditing(10, inp, stcb, NULL);
10513 /* Push out any control */
10514 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10515 &now, &now_filled, frag_point, so_locked);
10518 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10519 /* Hit FR burst limit */
10522 if ((num_out == 0) && (ret == 0)) {
10523 /* No more retrans to send */
10527 #ifdef SCTP_AUDITING_ENABLED
10528 sctp_auditing(12, inp, stcb, NULL);
10530 /* Check for bad destinations, if they exist move chunks around. */
10531 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10532 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10534 * if possible move things off of this address we
10535 * still may send below due to the dormant state but
10536 * we try to find an alternate address to send to
10537 * and if we have one we move all queued data on the
10538 * out wheel to this alternate address.
10540 if (net->ref_count > 1)
10541 sctp_move_chunks_from_net(stcb, net);
10544 * if ((asoc->sat_network) || (net->addr_is_local))
10545 * { burst_limit = asoc->max_burst *
10546 * SCTP_SAT_NETWORK_BURST_INCR; }
10548 if (asoc->max_burst > 0) {
10549 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10550 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10551 /* JRS - Use the congestion control given in the congestion control module */
10552 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10553 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10554 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10556 SCTP_STAT_INCR(sctps_maxburstqueued);
10558 net->fast_retran_ip = 0;
10560 if (net->flight_size == 0) {
10561 /* Should be decaying the cwnd here */
10571 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10572 &reason_code, 0, from_where,
10573 &now, &now_filled, frag_point, so_locked);
10575 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10576 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10577 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10579 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10580 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10581 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10585 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10587 tot_out += num_out;
10589 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10590 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10591 if (num_out == 0) {
10592 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10597 * When the Nagle algorithm is used, look at how much
10598 * is unsent, then if its smaller than an MTU and we
10599 * have data in flight we stop, except if we are
10600 * handling a fragmented user message.
10602 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10603 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10604 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10605 (stcb->asoc.total_flight > 0) &&
10606 ((stcb->asoc.locked_on_sending == NULL) ||
10607 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10611 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10612 TAILQ_EMPTY(&asoc->send_queue) &&
10613 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10614 /* Nothing left to send */
10617 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10618 /* Nothing left to send */
10621 } while (num_out &&
10622 ((asoc->max_burst == 0) ||
10623 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10624 (burst_cnt < asoc->max_burst)));
10626 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10627 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10628 SCTP_STAT_INCR(sctps_maxburstqueued);
10629 asoc->burst_limit_applied = 1;
10630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10631 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10634 asoc->burst_limit_applied = 0;
10637 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10638 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10640 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10644 * Now we need to clean up the control chunk chain if a ECNE is on
10645 * it. It must be marked as UNSENT again so next call will continue
10646 * to send it until such time that we get a CWR, to remove it.
10648 if (stcb->asoc.ecn_echo_cnt_onq)
10649 sctp_fix_ecn_echo(asoc);
10656 struct sctp_inpcb *inp,
10657 #if defined(__Panda__)
10662 struct sockaddr *addr,
10663 #if defined(__Panda__)
10664 pakhandle_type control,
10666 struct mbuf *control,
10668 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
10670 #elif defined(__Windows__)
10673 #if defined(__APPLE__)
10674 struct proc *p SCTP_UNUSED,
10682 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10686 if (inp->sctp_socket == NULL) {
10687 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10690 return (sctp_sosend(inp->sctp_socket,
10692 (struct uio *)NULL,
10695 #if defined(__APPLE__) || defined(__Panda__)
10704 send_forward_tsn(struct sctp_tcb *stcb,
10705 struct sctp_association *asoc)
10707 struct sctp_tmit_chunk *chk;
10708 struct sctp_forward_tsn_chunk *fwdtsn;
10709 uint32_t advance_peer_ack_point;
10711 SCTP_TCB_LOCK_ASSERT(stcb);
10712 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10713 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10714 /* mark it to unsent */
10715 chk->sent = SCTP_DATAGRAM_UNSENT;
10716 chk->snd_count = 0;
10717 /* Do we correct its output location? */
10719 sctp_free_remote_addr(chk->whoTo);
10722 goto sctp_fill_in_rest;
10725 /* Ok if we reach here we must build one */
10726 sctp_alloc_a_chunk(stcb, chk);
10730 asoc->fwd_tsn_cnt++;
10731 chk->copy_by_ref = 0;
10732 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10733 chk->rec.chunk_id.can_take_data = 0;
10736 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10737 if (chk->data == NULL) {
10738 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10741 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10742 chk->sent = SCTP_DATAGRAM_UNSENT;
10743 chk->snd_count = 0;
10744 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10745 asoc->ctrl_queue_cnt++;
10748 * Here we go through and fill out the part that deals with
10749 * stream/seq of the ones we skip.
10751 SCTP_BUF_LEN(chk->data) = 0;
10753 struct sctp_tmit_chunk *at, *tp1, *last;
10754 struct sctp_strseq *strseq;
10755 unsigned int cnt_of_space, i, ovh;
10756 unsigned int space_needed;
10757 unsigned int cnt_of_skipped = 0;
10759 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10760 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10761 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10762 /* no more to look at */
10765 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10766 /* We don't report these */
10771 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10772 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10774 cnt_of_space = M_TRAILINGSPACE(chk->data);
10776 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10777 ovh = SCTP_MIN_OVERHEAD;
10779 ovh = SCTP_MIN_V4_OVERHEAD;
10781 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10782 /* trim to a mtu size */
10783 cnt_of_space = asoc->smallest_mtu - ovh;
10785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10786 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10787 0xff, 0, cnt_of_skipped,
10788 asoc->advanced_peer_ack_point);
10791 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10792 if (cnt_of_space < space_needed) {
10794 * ok we must trim down the chunk by lowering the
10795 * advance peer ack point.
10797 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10798 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10799 0xff, 0xff, cnt_of_space,
10802 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10803 cnt_of_skipped /= sizeof(struct sctp_strseq);
10805 * Go through and find the TSN that will be the one
10808 at = TAILQ_FIRST(&asoc->sent_queue);
10810 for (i = 0; i < cnt_of_skipped; i++) {
10811 tp1 = TAILQ_NEXT(at, sctp_next);
10818 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10819 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10820 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10821 asoc->advanced_peer_ack_point);
10825 * last now points to last one I can report, update
10829 advance_peer_ack_point = last->rec.data.TSN_seq;
10830 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10831 cnt_of_skipped * sizeof(struct sctp_strseq);
10833 chk->send_size = space_needed;
10834 /* Setup the chunk */
10835 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10836 fwdtsn->ch.chunk_length = htons(chk->send_size);
10837 fwdtsn->ch.chunk_flags = 0;
10838 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10839 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10840 SCTP_BUF_LEN(chk->data) = chk->send_size;
10843 * Move pointer to after the fwdtsn and transfer to the
10846 strseq = (struct sctp_strseq *)fwdtsn;
10848 * Now populate the strseq list. This is done blindly
10849 * without pulling out duplicate stream info. This is
10850 * inefficent but won't harm the process since the peer will
10851 * look at these in sequence and will thus release anything.
10852 * It could mean we exceed the PMTU and chop off some that
10853 * we could have included.. but this is unlikely (aka 1432/4
10854 * would mean 300+ stream seq's would have to be reported in
10855 * one FWD-TSN. With a bit of work we can later FIX this to
10856 * optimize and pull out duplcates.. but it does add more
10857 * overhead. So for now... not!
10859 at = TAILQ_FIRST(&asoc->sent_queue);
10860 for (i = 0; i < cnt_of_skipped; i++) {
10861 tp1 = TAILQ_NEXT(at, sctp_next);
10864 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10865 /* We don't report these */
10870 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10871 at->rec.data.fwd_tsn_cnt = 0;
10873 strseq->stream = ntohs(at->rec.data.stream_number);
10874 strseq->sequence = ntohs(at->rec.data.stream_seq);
10883 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10884 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10890 * Queue up a SACK or NR-SACK in the control queue.
10891 * We must first check to see if a SACK or NR-SACK is
10892 * somehow on the control queue.
10893 * If so, we will take and and remove the old one.
10895 struct sctp_association *asoc;
10896 struct sctp_tmit_chunk *chk, *a_chk;
10897 struct sctp_sack_chunk *sack;
10898 struct sctp_nr_sack_chunk *nr_sack;
10899 struct sctp_gap_ack_block *gap_descriptor;
10900 struct sack_track *selector;
10905 int limit_reached = 0;
10906 unsigned int i, siz, j;
10907 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10910 uint32_t highest_tsn;
10915 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10916 (stcb->asoc.peer_supports_nr_sack == 1)) {
10917 type = SCTP_NR_SELECTIVE_ACK;
10919 type = SCTP_SELECTIVE_ACK;
10922 asoc = &stcb->asoc;
10923 SCTP_TCB_LOCK_ASSERT(stcb);
10924 if (asoc->last_data_chunk_from == NULL) {
10925 /* Hmm we never received anything */
10928 sctp_slide_mapping_arrays(stcb);
10929 sctp_set_rwnd(stcb, asoc);
10930 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10931 if (chk->rec.chunk_id.id == type) {
10932 /* Hmm, found a sack already on queue, remove it */
10933 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10934 asoc->ctrl_queue_cnt--;
10937 sctp_m_freem(a_chk->data);
10938 a_chk->data = NULL;
10940 if (a_chk->whoTo) {
10941 sctp_free_remote_addr(a_chk->whoTo);
10942 a_chk->whoTo = NULL;
10947 if (a_chk == NULL) {
10948 sctp_alloc_a_chunk(stcb, a_chk);
10949 if (a_chk == NULL) {
10950 /* No memory so we drop the idea, and set a timer */
10951 if (stcb->asoc.delayed_ack) {
10952 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10953 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10954 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10955 stcb->sctp_ep, stcb, NULL);
10957 stcb->asoc.send_sack = 1;
10961 a_chk->copy_by_ref = 0;
10962 a_chk->rec.chunk_id.id = type;
10963 a_chk->rec.chunk_id.can_take_data = 1;
10965 /* Clear our pkt counts */
10966 asoc->data_pkts_seen = 0;
10968 a_chk->asoc = asoc;
10969 a_chk->snd_count = 0;
10970 a_chk->send_size = 0; /* fill in later */
10971 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10972 a_chk->whoTo = NULL;
10974 if ((asoc->numduptsns) ||
10975 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10977 * Ok, we have some duplicates or the destination for the
10978 * sack is unreachable, lets see if we can select an
10979 * alternate than asoc->last_data_chunk_from
10981 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10982 (asoc->used_alt_onsack > asoc->numnets)) {
10983 /* We used an alt last time, don't this time */
10984 a_chk->whoTo = NULL;
10986 asoc->used_alt_onsack++;
10987 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10989 if (a_chk->whoTo == NULL) {
10990 /* Nope, no alternate */
10991 a_chk->whoTo = asoc->last_data_chunk_from;
10992 asoc->used_alt_onsack = 0;
10996 * No duplicates so we use the last place we received data
10999 asoc->used_alt_onsack = 0;
11000 a_chk->whoTo = asoc->last_data_chunk_from;
11002 if (a_chk->whoTo) {
11003 atomic_add_int(&a_chk->whoTo->ref_count, 1);
11005 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
11006 highest_tsn = asoc->highest_tsn_inside_map;
11008 highest_tsn = asoc->highest_tsn_inside_nr_map;
11010 if (highest_tsn == asoc->cumulative_tsn) {
11012 if (type == SCTP_SELECTIVE_ACK) {
11013 space_req = sizeof(struct sctp_sack_chunk);
11015 space_req = sizeof(struct sctp_nr_sack_chunk);
11018 /* gaps get a cluster */
11019 space_req = MCLBYTES;
11021 /* Ok now lets formulate a MBUF with our sack */
11022 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
11023 if ((a_chk->data == NULL) ||
11024 (a_chk->whoTo == NULL)) {
11025 /* rats, no mbuf memory */
11027 /* was a problem with the destination */
11028 sctp_m_freem(a_chk->data);
11029 a_chk->data = NULL;
11031 sctp_free_a_chunk(stcb, a_chk, so_locked);
11032 /* sa_ignore NO_NULL_CHK */
11033 if (stcb->asoc.delayed_ack) {
11034 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11035 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
11036 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11037 stcb->sctp_ep, stcb, NULL);
11039 stcb->asoc.send_sack = 1;
11043 /* ok, lets go through and fill it in */
11044 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
11045 space = M_TRAILINGSPACE(a_chk->data);
11046 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
11047 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
11049 limit = mtod(a_chk->data, caddr_t);
11054 if ((asoc->sctp_cmt_on_off > 0) &&
11055 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
11057 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
11058 * received, then set high bit to 1, else 0. Reset
11061 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
11062 asoc->cmt_dac_pkts_rcvd = 0;
11064 #ifdef SCTP_ASOCLOG_OF_TSNS
11065 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
11066 stcb->asoc.cumack_log_atsnt++;
11067 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
11068 stcb->asoc.cumack_log_atsnt = 0;
11071 /* reset the readers interpretation */
11072 stcb->freed_by_sorcv_sincelast = 0;
11074 if (type == SCTP_SELECTIVE_ACK) {
11075 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
11077 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
11078 if (highest_tsn > asoc->mapping_array_base_tsn) {
11079 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11081 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
11085 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
11086 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
11087 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
11088 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11090 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
11094 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11097 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11099 if (((type == SCTP_SELECTIVE_ACK) &&
11100 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
11101 ((type == SCTP_NR_SELECTIVE_ACK) &&
11102 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
11103 /* we have a gap .. maybe */
11104 for (i = 0; i < siz; i++) {
11105 tsn_map = asoc->mapping_array[i];
11106 if (type == SCTP_SELECTIVE_ACK) {
11107 tsn_map |= asoc->nr_mapping_array[i];
11111 * Clear all bits corresponding to TSNs
11112 * smaller or equal to the cumulative TSN.
11114 tsn_map &= (~0 << (1 - offset));
11116 selector = &sack_array[tsn_map];
11117 if (mergeable && selector->right_edge) {
11119 * Backup, left and right edges were ok to
11125 if (selector->num_entries == 0)
11128 for (j = 0; j < selector->num_entries; j++) {
11129 if (mergeable && selector->right_edge) {
11131 * do a merge by NOT setting
11137 * no merge, set the left
11141 gap_descriptor->start = htons((selector->gaps[j].start + offset));
11143 gap_descriptor->end = htons((selector->gaps[j].end + offset));
11146 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11152 if (selector->left_edge) {
11156 if (limit_reached) {
11157 /* Reached the limit stop */
11163 if ((type == SCTP_NR_SELECTIVE_ACK) &&
11164 (limit_reached == 0)) {
11168 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
11169 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11171 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
11174 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11177 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11179 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
11180 /* we have a gap .. maybe */
11181 for (i = 0; i < siz; i++) {
11182 tsn_map = asoc->nr_mapping_array[i];
11185 * Clear all bits corresponding to TSNs
11186 * smaller or equal to the cumulative TSN.
11188 tsn_map &= (~0 << (1 - offset));
11190 selector = &sack_array[tsn_map];
11191 if (mergeable && selector->right_edge) {
11193 * Backup, left and right edges were ok to
11196 num_nr_gap_blocks--;
11199 if (selector->num_entries == 0)
11202 for (j = 0; j < selector->num_entries; j++) {
11203 if (mergeable && selector->right_edge) {
11205 * do a merge by NOT setting
11211 * no merge, set the left
11215 gap_descriptor->start = htons((selector->gaps[j].start + offset));
11217 gap_descriptor->end = htons((selector->gaps[j].end + offset));
11218 num_nr_gap_blocks++;
11220 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11226 if (selector->left_edge) {
11230 if (limit_reached) {
11231 /* Reached the limit stop */
11238 /* now we must add any dups we are going to report. */
11239 if ((limit_reached == 0) && (asoc->numduptsns)) {
11240 dup = (uint32_t *) gap_descriptor;
11241 for (i = 0; i < asoc->numduptsns; i++) {
11242 *dup = htonl(asoc->dup_tsns[i]);
11245 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11250 asoc->numduptsns = 0;
11253 * now that the chunk is prepared queue it to the control chunk
11256 if (type == SCTP_SELECTIVE_ACK) {
11257 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
11258 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11259 num_dups * sizeof(int32_t);
11260 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11261 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11262 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11263 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11264 sack->sack.num_dup_tsns = htons(num_dups);
11265 sack->ch.chunk_type = type;
11266 sack->ch.chunk_flags = flags;
11267 sack->ch.chunk_length = htons(a_chk->send_size);
11269 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
11270 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11271 num_dups * sizeof(int32_t);
11272 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11273 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11274 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11275 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11276 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11277 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11278 nr_sack->nr_sack.reserved = 0;
11279 nr_sack->ch.chunk_type = type;
11280 nr_sack->ch.chunk_flags = flags;
11281 nr_sack->ch.chunk_length = htons(a_chk->send_size);
11283 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11284 asoc->my_last_reported_rwnd = asoc->my_rwnd;
11285 asoc->ctrl_queue_cnt++;
11286 asoc->send_sack = 0;
11287 SCTP_STAT_INCR(sctps_sendsacks);
11292 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
11293 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11298 struct mbuf *m_abort, *m, *m_last;
11299 struct mbuf *m_out, *m_end = NULL;
11300 struct sctp_abort_chunk *abort;
11301 struct sctp_auth_chunk *auth = NULL;
11302 struct sctp_nets *net;
11304 uint32_t auth_offset = 0;
11305 uint16_t cause_len, chunk_len, padding_len;
11307 #if defined(__APPLE__)
11309 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11311 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11314 SCTP_TCB_LOCK_ASSERT(stcb);
11316 * Add an AUTH chunk, if chunk requires it and save the offset into
11317 * the chain for AUTH
11319 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11320 stcb->asoc.peer_auth_chunks)) {
11321 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11322 stcb, SCTP_ABORT_ASSOCIATION);
11323 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11327 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11328 if (m_abort == NULL) {
11330 sctp_m_freem(m_out);
11333 sctp_m_freem(operr);
11337 /* link in any error */
11338 SCTP_BUF_NEXT(m_abort) = operr;
11341 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11342 cause_len += (uint16_t)SCTP_BUF_LEN(m);
11343 if (SCTP_BUF_NEXT(m) == NULL) {
11347 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11348 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11349 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11350 if (m_out == NULL) {
11351 /* NO Auth chunk prepended, so reserve space in front */
11352 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11355 /* Put AUTH chunk at the front of the chain */
11356 SCTP_BUF_NEXT(m_end) = m_abort;
11358 if (stcb->asoc.alternate) {
11359 net = stcb->asoc.alternate;
11361 net = stcb->asoc.primary_destination;
11363 /* Fill in the ABORT chunk header. */
11364 abort = mtod(m_abort, struct sctp_abort_chunk *);
11365 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11366 if (stcb->asoc.peer_vtag == 0) {
11367 /* This happens iff the assoc is in COOKIE-WAIT state. */
11368 vtag = stcb->asoc.my_vtag;
11369 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11371 vtag = stcb->asoc.peer_vtag;
11372 abort->ch.chunk_flags = 0;
11374 abort->ch.chunk_length = htons(chunk_len);
11375 /* Add padding, if necessary. */
11376 if (padding_len > 0) {
11377 if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) {
11378 sctp_m_freem(m_out);
11382 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11383 (struct sockaddr *)&net->ro._l_addr,
11384 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11385 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11386 stcb->asoc.primary_destination->port, NULL,
11387 #if defined(__FreeBSD__)
11391 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11395 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11396 struct sctp_nets *net,
11399 /* formulate and SEND a SHUTDOWN-COMPLETE */
11400 struct mbuf *m_shutdown_comp;
11401 struct sctp_shutdown_complete_chunk *shutdown_complete;
11405 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11406 if (m_shutdown_comp == NULL) {
11410 if (reflect_vtag) {
11411 flags = SCTP_HAD_NO_TCB;
11412 vtag = stcb->asoc.my_vtag;
11415 vtag = stcb->asoc.peer_vtag;
11417 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11418 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11419 shutdown_complete->ch.chunk_flags = flags;
11420 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11421 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11422 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11423 (struct sockaddr *)&net->ro._l_addr,
11424 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11425 stcb->sctp_ep->sctp_lport, stcb->rport,
11428 #if defined(__FreeBSD__)
11431 SCTP_SO_NOT_LOCKED);
11432 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11436 #if defined(__FreeBSD__)
11438 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11439 struct sctphdr *sh, uint32_t vtag,
11440 uint8_t type, struct mbuf *cause,
11441 uint8_t use_mflowid, uint32_t mflowid,
11442 uint32_t vrf_id, uint16_t port)
11445 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11446 struct sctphdr *sh, uint32_t vtag,
11447 uint8_t type, struct mbuf *cause,
11448 uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11452 pakhandle_type o_pak;
11454 struct mbuf *o_pak;
11457 struct sctphdr *shout;
11458 struct sctp_chunkhdr *ch;
11459 #if defined(INET) || defined(INET6)
11460 struct udphdr *udp;
11463 int len, cause_len, padding_len;
11465 #if defined(__APPLE__) || defined(__Panda__)
11468 struct sockaddr_in *src_sin, *dst_sin;
11472 struct sockaddr_in6 *src_sin6, *dst_sin6;
11473 struct ip6_hdr *ip6;
11476 /* Compute the length of the cause and add final padding. */
11478 if (cause != NULL) {
11479 struct mbuf *m_at, *m_last = NULL;
11481 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11482 if (SCTP_BUF_NEXT(m_at) == NULL)
11484 cause_len += SCTP_BUF_LEN(m_at);
11486 padding_len = cause_len % 4;
11487 if (padding_len != 0) {
11488 padding_len = 4 - padding_len;
11490 if (padding_len != 0) {
11491 if (sctp_add_pad_tombuf(m_last, padding_len)) {
11492 sctp_m_freem(cause);
11499 /* Get an mbuf for the header. */
11500 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11501 switch (dst->sa_family) {
11504 len += sizeof(struct ip);
11509 len += sizeof(struct ip6_hdr);
11515 #if defined INET || defined INET6
11517 len += sizeof(struct udphdr);
11520 #if defined(__APPLE__)
11521 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11522 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11524 mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11527 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11529 if (mout == NULL) {
11531 sctp_m_freem(cause);
11535 #if defined(__APPLE__)
11536 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11537 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11539 SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11542 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11544 SCTP_BUF_LEN(mout) = len;
11545 SCTP_BUF_NEXT(mout) = cause;
11546 #if defined(__FreeBSD__)
11547 if (use_mflowid != 0) {
11548 mout->m_pkthdr.flowid = mflowid;
11549 mout->m_flags |= M_FLOWID;
11558 switch (dst->sa_family) {
11561 src_sin = (struct sockaddr_in *)src;
11562 dst_sin = (struct sockaddr_in *)dst;
11563 ip = mtod(mout, struct ip *);
11564 ip->ip_v = IPVERSION;
11565 ip->ip_hl = (sizeof(struct ip) >> 2);
11567 #if defined(__FreeBSD__)
11568 ip->ip_id = ip_newid();
11569 #elif defined(__APPLE__)
11571 ip->ip_id = ip_randomid();
11573 ip->ip_id = htons(ip_id++);
11576 ip->ip_id = htons(ip_id++);
11579 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11581 ip->ip_p = IPPROTO_UDP;
11583 ip->ip_p = IPPROTO_SCTP;
11585 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11586 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11588 len = sizeof(struct ip);
11589 shout = (struct sctphdr *)((caddr_t)ip + len);
11594 src_sin6 = (struct sockaddr_in6 *)src;
11595 dst_sin6 = (struct sockaddr_in6 *)dst;
11596 ip6 = mtod(mout, struct ip6_hdr *);
11597 ip6->ip6_flow = htonl(0x60000000);
11598 #if defined(__FreeBSD__)
11599 if (V_ip6_auto_flowlabel) {
11600 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11603 #if defined(__Userspace__)
11604 ip6->ip6_hlim = IPv6_HOP_LIMIT;
11606 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11609 ip6->ip6_nxt = IPPROTO_UDP;
11611 ip6->ip6_nxt = IPPROTO_SCTP;
11613 ip6->ip6_src = dst_sin6->sin6_addr;
11614 ip6->ip6_dst = src_sin6->sin6_addr;
11615 len = sizeof(struct ip6_hdr);
11616 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11621 shout = mtod(mout, struct sctphdr *);
11624 #if defined INET || defined INET6
11626 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11627 sctp_m_freem(mout);
11630 udp = (struct udphdr *)shout;
11631 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11632 udp->uh_dport = port;
11634 udp->uh_ulen = htons(sizeof(struct udphdr) +
11635 sizeof(struct sctphdr) +
11636 sizeof(struct sctp_chunkhdr) +
11637 cause_len + padding_len);
11638 len += sizeof(struct udphdr);
11639 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11644 shout->src_port = sh->dest_port;
11645 shout->dest_port = sh->src_port;
11646 shout->checksum = 0;
11648 shout->v_tag = htonl(vtag);
11650 shout->v_tag = sh->v_tag;
11652 len += sizeof(struct sctphdr);
11653 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11654 ch->chunk_type = type;
11656 ch->chunk_flags = 0;
11658 ch->chunk_flags = SCTP_HAD_NO_TCB;
11660 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
11661 len += sizeof(struct sctp_chunkhdr);
11662 len += cause_len + padding_len;
11664 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11665 sctp_m_freem(mout);
11668 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11669 switch (dst->sa_family) {
11672 #if defined(__APPLE__) || defined(__Panda__)
11673 /* zap the stack pointer to the route */
11674 bzero(&ro, sizeof(sctp_route_t));
11675 #if defined(__Panda__)
11676 ro._l_addr.sa.sa_family = AF_INET;
11680 #if !defined(__Windows__) && !defined(__Userspace__)
11681 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11683 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11688 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11694 #if defined(__FreeBSD__)
11695 #if __FreeBSD_version >= 1000000
11696 ip->ip_len = htons(len);
11700 #elif defined(__APPLE__) || defined(__Userspace__)
11703 ip->ip_len = htons(len);
11706 #if defined(SCTP_WITH_NO_CSUM)
11707 SCTP_STAT_INCR(sctps_sendnocrc);
11709 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11710 SCTP_STAT_INCR(sctps_sendswcrc);
11712 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11714 SCTP_ENABLE_UDP_CSUM(o_pak);
11717 SCTP_ENABLE_UDP_CSUM(o_pak);
11720 #if defined(SCTP_WITH_NO_CSUM)
11721 SCTP_STAT_INCR(sctps_sendnocrc);
11723 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
11724 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11725 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11726 SCTP_STAT_INCR(sctps_sendhwcrc);
11728 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11729 SCTP_STAT_INCR(sctps_sendswcrc);
11733 #ifdef SCTP_PACKET_LOGGING
11734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11735 sctp_packet_log(o_pak);
11738 #if defined(__APPLE__) || defined(__Panda__)
11739 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11740 /* Free the route if we got one back */
11746 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11752 ip6->ip6_plen = len - sizeof(struct ip6_hdr);
11754 #if defined(SCTP_WITH_NO_CSUM)
11755 SCTP_STAT_INCR(sctps_sendnocrc);
11757 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11758 SCTP_STAT_INCR(sctps_sendswcrc);
11760 #if defined(__Windows__)
11762 #elif !defined(__Userspace__)
11763 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11764 udp->uh_sum = 0xffff;
11768 #if defined(SCTP_WITH_NO_CSUM)
11769 SCTP_STAT_INCR(sctps_sendnocrc);
11771 #if defined(__FreeBSD__) && __FreeBSD_version >= 900000
11772 #if __FreeBSD_version > 901000
11773 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11775 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11777 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11778 SCTP_STAT_INCR(sctps_sendhwcrc);
11780 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
11781 SCTP_STAT_INCR(sctps_sendswcrc);
11785 #ifdef SCTP_PACKET_LOGGING
11786 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11787 sctp_packet_log(o_pak);
11790 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11793 #if defined(__Userspace__)
11797 struct sockaddr_conn *sconn;
11799 sconn = (struct sockaddr_conn *)src;
11800 #if defined(SCTP_WITH_NO_CSUM)
11801 SCTP_STAT_INCR(sctps_sendnocrc);
11803 shout->checksum = sctp_calculate_cksum(mout, 0);
11804 SCTP_STAT_INCR(sctps_sendswcrc);
11806 #ifdef SCTP_PACKET_LOGGING
11807 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11808 sctp_packet_log(mout);
11811 /* Don't alloc/free for each packet */
11812 if ((buffer = malloc(len)) != NULL) {
11813 m_copydata(mout, 0, len, buffer);
11814 SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
11817 sctp_m_freem(mout);
11822 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11824 sctp_m_freem(mout);
11825 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11828 SCTP_STAT_INCR(sctps_sendpackets);
11829 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11830 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11835 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11836 struct sctphdr *sh,
11837 #if defined(__FreeBSD__)
11838 uint8_t use_mflowid, uint32_t mflowid,
11840 uint32_t vrf_id, uint16_t port)
11842 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11843 #if defined(__FreeBSD__)
11844 use_mflowid, mflowid,
11850 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked
11851 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11856 struct sctp_tmit_chunk *chk;
11857 struct sctp_heartbeat_chunk *hb;
11858 struct timeval now;
11860 SCTP_TCB_LOCK_ASSERT(stcb);
11864 (void)SCTP_GETTIME_TIMEVAL(&now);
11865 switch (net->ro._l_addr.sa.sa_family) {
11874 #if defined(__Userspace__)
11881 sctp_alloc_a_chunk(stcb, chk);
11883 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11887 chk->copy_by_ref = 0;
11888 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11889 chk->rec.chunk_id.can_take_data = 1;
11890 chk->asoc = &stcb->asoc;
11891 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11893 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11894 if (chk->data == NULL) {
11895 sctp_free_a_chunk(stcb, chk, so_locked);
11898 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11899 SCTP_BUF_LEN(chk->data) = chk->send_size;
11900 chk->sent = SCTP_DATAGRAM_UNSENT;
11901 chk->snd_count = 0;
11903 atomic_add_int(&chk->whoTo->ref_count, 1);
11904 /* Now we have a mbuf that we can fill in with the details */
11905 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11906 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11907 /* fill out chunk header */
11908 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11909 hb->ch.chunk_flags = 0;
11910 hb->ch.chunk_length = htons(chk->send_size);
11911 /* Fill out hb parameter */
11912 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11913 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11914 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11915 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11916 /* Did our user request this one, put it in */
11917 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11919 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11921 switch (net->ro._l_addr.sa.sa_family) {
11924 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
11929 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
11932 #if defined(__Userspace__)
11934 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
11938 hb->heartbeat.hb_info.addr_len = 0;
11942 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11944 * we only take from the entropy pool if the address is not
11947 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11948 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11950 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11951 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11953 switch (net->ro._l_addr.sa.sa_family) {
11956 memcpy(hb->heartbeat.hb_info.address,
11957 &net->ro._l_addr.sin.sin_addr,
11958 sizeof(net->ro._l_addr.sin.sin_addr));
11963 memcpy(hb->heartbeat.hb_info.address,
11964 &net->ro._l_addr.sin6.sin6_addr,
11965 sizeof(net->ro._l_addr.sin6.sin6_addr));
11968 #if defined(__Userspace__)
11970 memcpy(hb->heartbeat.hb_info.address,
11971 &net->ro._l_addr.sconn.sconn_addr,
11972 sizeof(net->ro._l_addr.sconn.sconn_addr));
11979 net->hb_responded = 0;
11980 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11981 stcb->asoc.ctrl_queue_cnt++;
11982 SCTP_STAT_INCR(sctps_sendheartbeat);
11987 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11990 struct sctp_association *asoc;
11991 struct sctp_ecne_chunk *ecne;
11992 struct sctp_tmit_chunk *chk;
11997 asoc = &stcb->asoc;
11998 SCTP_TCB_LOCK_ASSERT(stcb);
11999 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12000 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
12001 /* found a previous ECN_ECHO update it if needed */
12002 uint32_t cnt, ctsn;
12003 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12004 ctsn = ntohl(ecne->tsn);
12005 if (SCTP_TSN_GT(high_tsn, ctsn)) {
12006 ecne->tsn = htonl(high_tsn);
12007 SCTP_STAT_INCR(sctps_queue_upd_ecne);
12009 cnt = ntohl(ecne->num_pkts_since_cwr);
12011 ecne->num_pkts_since_cwr = htonl(cnt);
12015 /* nope could not find one to update so we must build one */
12016 sctp_alloc_a_chunk(stcb, chk);
12020 chk->copy_by_ref = 0;
12021 SCTP_STAT_INCR(sctps_queue_upd_ecne);
12022 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
12023 chk->rec.chunk_id.can_take_data = 0;
12024 chk->asoc = &stcb->asoc;
12025 chk->send_size = sizeof(struct sctp_ecne_chunk);
12026 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12027 if (chk->data == NULL) {
12028 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12031 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12032 SCTP_BUF_LEN(chk->data) = chk->send_size;
12033 chk->sent = SCTP_DATAGRAM_UNSENT;
12034 chk->snd_count = 0;
12036 atomic_add_int(&chk->whoTo->ref_count, 1);
12038 stcb->asoc.ecn_echo_cnt_onq++;
12039 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12040 ecne->ch.chunk_type = SCTP_ECN_ECHO;
12041 ecne->ch.chunk_flags = 0;
12042 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
12043 ecne->tsn = htonl(high_tsn);
12044 ecne->num_pkts_since_cwr = htonl(1);
12045 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
12046 asoc->ctrl_queue_cnt++;
12050 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
12051 struct mbuf *m, int len, int iphlen, int bad_crc)
12053 struct sctp_association *asoc;
12054 struct sctp_pktdrop_chunk *drp;
12055 struct sctp_tmit_chunk *chk;
12061 struct sctp_chunkhdr *ch, chunk_buf;
12062 unsigned int chk_length;
12067 asoc = &stcb->asoc;
12068 SCTP_TCB_LOCK_ASSERT(stcb);
12069 if (asoc->peer_supports_pktdrop == 0) {
12071 * peer must declare support before I send one.
12075 if (stcb->sctp_socket == NULL) {
12078 sctp_alloc_a_chunk(stcb, chk);
12082 chk->copy_by_ref = 0;
12084 chk->send_size = len;
12085 /* Validate that we do not have an ABORT in here. */
12086 offset = iphlen + sizeof(struct sctphdr);
12087 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12088 sizeof(*ch), (uint8_t *) & chunk_buf);
12089 while (ch != NULL) {
12090 chk_length = ntohs(ch->chunk_length);
12091 if (chk_length < sizeof(*ch)) {
12092 /* break to abort land */
12095 switch (ch->chunk_type) {
12096 case SCTP_PACKET_DROPPED:
12097 case SCTP_ABORT_ASSOCIATION:
12098 case SCTP_INITIATION_ACK:
12100 * We don't respond with an PKT-DROP to an ABORT
12101 * or PKT-DROP. We also do not respond to an
12102 * INIT-ACK, because we can't know if the initiation
12103 * tag is correct or not.
12105 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12110 offset += SCTP_SIZE32(chk_length);
12111 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12112 sizeof(*ch), (uint8_t *) & chunk_buf);
12115 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
12116 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
12117 /* only send 1 mtu worth, trim off the
12118 * excess on the end.
12121 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
12124 chk->asoc = &stcb->asoc;
12125 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12126 if (chk->data == NULL) {
12128 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12131 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12132 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
12134 sctp_m_freem(chk->data);
12138 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
12139 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
12140 chk->book_size_scale = 0;
12142 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
12143 drp->trunc_len = htons(fullsz);
12144 /* Len is already adjusted to size minus overhead above
12145 * take out the pkt_drop chunk itself from it.
12147 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
12148 len = chk->send_size;
12150 /* no truncation needed */
12151 drp->ch.chunk_flags = 0;
12152 drp->trunc_len = htons(0);
12155 drp->ch.chunk_flags |= SCTP_BADCRC;
12157 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
12158 SCTP_BUF_LEN(chk->data) = chk->send_size;
12159 chk->sent = SCTP_DATAGRAM_UNSENT;
12160 chk->snd_count = 0;
12162 /* we should hit here */
12164 atomic_add_int(&chk->whoTo->ref_count, 1);
12168 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
12169 chk->rec.chunk_id.can_take_data = 1;
12170 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
12171 drp->ch.chunk_length = htons(chk->send_size);
12172 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
12176 drp->bottle_bw = htonl(spc);
12177 if (asoc->my_rwnd) {
12178 drp->current_onq = htonl(asoc->size_on_reasm_queue +
12179 asoc->size_on_all_streams +
12180 asoc->my_rwnd_control_len +
12181 stcb->sctp_socket->so_rcv.sb_cc);
12184 * If my rwnd is 0, possibly from mbuf depletion as well as
12185 * space used, tell the peer there is NO space aka onq == bw
12187 drp->current_onq = htonl(spc);
12191 m_copydata(m, iphlen, len, (caddr_t)datap);
12192 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12193 asoc->ctrl_queue_cnt++;
12197 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12199 struct sctp_association *asoc;
12200 struct sctp_cwr_chunk *cwr;
12201 struct sctp_tmit_chunk *chk;
12203 SCTP_TCB_LOCK_ASSERT(stcb);
12207 asoc = &stcb->asoc;
12208 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12209 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12210 /* found a previous CWR queued to same destination update it if needed */
12212 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12213 ctsn = ntohl(cwr->tsn);
12214 if (SCTP_TSN_GT(high_tsn, ctsn)) {
12215 cwr->tsn = htonl(high_tsn);
12217 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12218 /* Make sure override is carried */
12219 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12224 sctp_alloc_a_chunk(stcb, chk);
12228 chk->copy_by_ref = 0;
12229 chk->rec.chunk_id.id = SCTP_ECN_CWR;
12230 chk->rec.chunk_id.can_take_data = 1;
12231 chk->asoc = &stcb->asoc;
12232 chk->send_size = sizeof(struct sctp_cwr_chunk);
12233 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12234 if (chk->data == NULL) {
12235 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12238 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12239 SCTP_BUF_LEN(chk->data) = chk->send_size;
12240 chk->sent = SCTP_DATAGRAM_UNSENT;
12241 chk->snd_count = 0;
12243 atomic_add_int(&chk->whoTo->ref_count, 1);
12244 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12245 cwr->ch.chunk_type = SCTP_ECN_CWR;
12246 cwr->ch.chunk_flags = override;
12247 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12248 cwr->tsn = htonl(high_tsn);
12249 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12250 asoc->ctrl_queue_cnt++;
12254 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
12255 int number_entries, uint16_t * list,
12256 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12258 uint16_t len, old_len, i;
12259 struct sctp_stream_reset_out_request *req_out;
12260 struct sctp_chunkhdr *ch;
12262 ch = mtod(chk->data, struct sctp_chunkhdr *);
12263 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12265 /* get to new offset for the param. */
12266 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12267 /* now how long will this param be? */
12268 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12269 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12270 req_out->ph.param_length = htons(len);
12271 req_out->request_seq = htonl(seq);
12272 req_out->response_seq = htonl(resp_seq);
12273 req_out->send_reset_at_tsn = htonl(last_sent);
12274 if (number_entries) {
12275 for (i = 0; i < number_entries; i++) {
12276 req_out->list_of_streams[i] = htons(list[i]);
12279 if (SCTP_SIZE32(len) > len) {
12281 * Need to worry about the pad we may end up adding to the
12282 * end. This is easy since the struct is either aligned to 4
12283 * bytes or 2 bytes off.
12285 req_out->list_of_streams[number_entries] = 0;
12287 /* now fix the chunk length */
12288 ch->chunk_length = htons(len + old_len);
12289 chk->book_size = len + old_len;
12290 chk->book_size_scale = 0;
12291 chk->send_size = SCTP_SIZE32(chk->book_size);
12292 SCTP_BUF_LEN(chk->data) = chk->send_size;
12297 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12298 int number_entries, uint16_t *list,
12301 uint16_t len, old_len, i;
12302 struct sctp_stream_reset_in_request *req_in;
12303 struct sctp_chunkhdr *ch;
12305 ch = mtod(chk->data, struct sctp_chunkhdr *);
12306 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12308 /* get to new offset for the param. */
12309 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12310 /* now how long will this param be? */
12311 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12312 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12313 req_in->ph.param_length = htons(len);
12314 req_in->request_seq = htonl(seq);
12315 if (number_entries) {
12316 for (i = 0; i < number_entries; i++) {
12317 req_in->list_of_streams[i] = htons(list[i]);
12320 if (SCTP_SIZE32(len) > len) {
12322 * Need to worry about the pad we may end up adding to the
12323 * end. This is easy since the struct is either aligned to 4
12324 * bytes or 2 bytes off.
12326 req_in->list_of_streams[number_entries] = 0;
12328 /* now fix the chunk length */
12329 ch->chunk_length = htons(len + old_len);
12330 chk->book_size = len + old_len;
12331 chk->book_size_scale = 0;
12332 chk->send_size = SCTP_SIZE32(chk->book_size);
12333 SCTP_BUF_LEN(chk->data) = chk->send_size;
12338 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12341 uint16_t len, old_len;
12342 struct sctp_stream_reset_tsn_request *req_tsn;
12343 struct sctp_chunkhdr *ch;
12345 ch = mtod(chk->data, struct sctp_chunkhdr *);
12346 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12348 /* get to new offset for the param. */
12349 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12350 /* now how long will this param be? */
12351 len = sizeof(struct sctp_stream_reset_tsn_request);
12352 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12353 req_tsn->ph.param_length = htons(len);
12354 req_tsn->request_seq = htonl(seq);
12356 /* now fix the chunk length */
12357 ch->chunk_length = htons(len + old_len);
12358 chk->send_size = len + old_len;
12359 chk->book_size = SCTP_SIZE32(chk->send_size);
12360 chk->book_size_scale = 0;
12361 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12366 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12367 uint32_t resp_seq, uint32_t result)
12369 uint16_t len, old_len;
12370 struct sctp_stream_reset_response *resp;
12371 struct sctp_chunkhdr *ch;
12373 ch = mtod(chk->data, struct sctp_chunkhdr *);
12374 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12376 /* get to new offset for the param. */
12377 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12378 /* now how long will this param be? */
12379 len = sizeof(struct sctp_stream_reset_response);
12380 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12381 resp->ph.param_length = htons(len);
12382 resp->response_seq = htonl(resp_seq);
12383 resp->result = ntohl(result);
12385 /* now fix the chunk length */
12386 ch->chunk_length = htons(len + old_len);
12387 chk->book_size = len + old_len;
12388 chk->book_size_scale = 0;
12389 chk->send_size = SCTP_SIZE32(chk->book_size);
12390 SCTP_BUF_LEN(chk->data) = chk->send_size;
12395 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12396 uint32_t resp_seq, uint32_t result,
12397 uint32_t send_una, uint32_t recv_next)
12399 uint16_t len, old_len;
12400 struct sctp_stream_reset_response_tsn *resp;
12401 struct sctp_chunkhdr *ch;
12403 ch = mtod(chk->data, struct sctp_chunkhdr *);
12404 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12406 /* get to new offset for the param. */
12407 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12408 /* now how long will this param be? */
12409 len = sizeof(struct sctp_stream_reset_response_tsn);
12410 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12411 resp->ph.param_length = htons(len);
12412 resp->response_seq = htonl(resp_seq);
12413 resp->result = htonl(result);
12414 resp->senders_next_tsn = htonl(send_una);
12415 resp->receivers_next_tsn = htonl(recv_next);
12417 /* now fix the chunk length */
12418 ch->chunk_length = htons(len + old_len);
12419 chk->book_size = len + old_len;
12420 chk->send_size = SCTP_SIZE32(chk->book_size);
12421 chk->book_size_scale = 0;
12422 SCTP_BUF_LEN(chk->data) = chk->send_size;
12427 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12431 uint16_t len, old_len;
12432 struct sctp_chunkhdr *ch;
12433 struct sctp_stream_reset_add_strm *addstr;
12435 ch = mtod(chk->data, struct sctp_chunkhdr *);
12436 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12438 /* get to new offset for the param. */
12439 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12440 /* now how long will this param be? */
12441 len = sizeof(struct sctp_stream_reset_add_strm);
12444 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12445 addstr->ph.param_length = htons(len);
12446 addstr->request_seq = htonl(seq);
12447 addstr->number_of_streams = htons(adding);
12448 addstr->reserved = 0;
12450 /* now fix the chunk length */
12451 ch->chunk_length = htons(len + old_len);
12452 chk->send_size = len + old_len;
12453 chk->book_size = SCTP_SIZE32(chk->send_size);
12454 chk->book_size_scale = 0;
12455 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12460 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12464 uint16_t len, old_len;
12465 struct sctp_chunkhdr *ch;
12466 struct sctp_stream_reset_add_strm *addstr;
12468 ch = mtod(chk->data, struct sctp_chunkhdr *);
12469 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12471 /* get to new offset for the param. */
12472 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12473 /* now how long will this param be? */
12474 len = sizeof(struct sctp_stream_reset_add_strm);
12476 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12477 addstr->ph.param_length = htons(len);
12478 addstr->request_seq = htonl(seq);
12479 addstr->number_of_streams = htons(adding);
12480 addstr->reserved = 0;
12482 /* now fix the chunk length */
12483 ch->chunk_length = htons(len + old_len);
12484 chk->send_size = len + old_len;
12485 chk->book_size = SCTP_SIZE32(chk->send_size);
12486 chk->book_size_scale = 0;
12487 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12492 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12493 int number_entries, uint16_t *list,
12494 uint8_t send_out_req,
12495 uint8_t send_in_req,
12496 uint8_t send_tsn_req,
12497 uint8_t add_stream,
12499 uint16_t adding_i, uint8_t peer_asked)
12502 struct sctp_association *asoc;
12503 struct sctp_tmit_chunk *chk;
12504 struct sctp_chunkhdr *ch;
12507 asoc = &stcb->asoc;
12508 if (asoc->stream_reset_outstanding) {
12510 * Already one pending, must get ACK back to clear the flag.
12512 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12515 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
12516 (add_stream == 0)) {
12517 /* nothing to do */
12518 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12521 if (send_tsn_req && (send_out_req || send_in_req)) {
12522 /* error, can't do that */
12523 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12526 sctp_alloc_a_chunk(stcb, chk);
12528 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12531 chk->copy_by_ref = 0;
12532 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12533 chk->rec.chunk_id.can_take_data = 0;
12534 chk->asoc = &stcb->asoc;
12535 chk->book_size = sizeof(struct sctp_chunkhdr);
12536 chk->send_size = SCTP_SIZE32(chk->book_size);
12537 chk->book_size_scale = 0;
12539 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12540 if (chk->data == NULL) {
12541 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12542 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12545 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12547 /* setup chunk parameters */
12548 chk->sent = SCTP_DATAGRAM_UNSENT;
12549 chk->snd_count = 0;
12550 if (stcb->asoc.alternate) {
12551 chk->whoTo = stcb->asoc.alternate;
12553 chk->whoTo = stcb->asoc.primary_destination;
12555 atomic_add_int(&chk->whoTo->ref_count, 1);
12556 ch = mtod(chk->data, struct sctp_chunkhdr *);
12557 ch->chunk_type = SCTP_STREAM_RESET;
12558 ch->chunk_flags = 0;
12559 ch->chunk_length = htons(chk->book_size);
12560 SCTP_BUF_LEN(chk->data) = chk->send_size;
12562 seq = stcb->asoc.str_reset_seq_out;
12563 if (send_out_req) {
12564 sctp_add_stream_reset_out(chk, number_entries, list,
12565 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12566 asoc->stream_reset_out_is_outstanding = 1;
12568 asoc->stream_reset_outstanding++;
12570 if ((add_stream & 1) &&
12571 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12572 /* Need to allocate more */
12573 struct sctp_stream_out *oldstream;
12574 struct sctp_stream_queue_pending *sp, *nsp;
12577 oldstream = stcb->asoc.strmout;
12578 /* get some more */
12579 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12580 ((stcb->asoc.streamoutcnt+adding_o) * sizeof(struct sctp_stream_out)),
12582 if (stcb->asoc.strmout == NULL) {
12584 stcb->asoc.strmout = oldstream;
12585 /* Turn off the bit */
12586 x = add_stream & 0xfe;
12590 /* Ok now we proceed with copying the old out stuff and
12591 * initializing the new stuff.
12593 SCTP_TCB_SEND_LOCK(stcb);
12594 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12595 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12596 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12597 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12598 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
12599 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12600 stcb->asoc.strmout[i].stream_no = i;
12601 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
12602 /* now anything on those queues? */
12603 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12604 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12605 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12607 /* Now move assoc pointers too */
12608 if (stcb->asoc.last_out_stream == &oldstream[i]) {
12609 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
12611 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
12612 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
12615 /* now the new streams */
12616 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12617 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12618 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12619 stcb->asoc.strmout[i].chunks_on_queues = 0;
12620 stcb->asoc.strmout[i].next_sequence_send = 0x0;
12621 stcb->asoc.strmout[i].stream_no = i;
12622 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12623 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
12625 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12626 SCTP_FREE(oldstream, SCTP_M_STRMO);
12627 SCTP_TCB_SEND_UNLOCK(stcb);
12630 if ((add_stream & 1) && (adding_o > 0)) {
12631 asoc->strm_pending_add_size = adding_o;
12632 asoc->peer_req_out = peer_asked;
12633 sctp_add_an_out_stream(chk, seq, adding_o);
12635 asoc->stream_reset_outstanding++;
12637 if ((add_stream & 2) && (adding_i > 0)) {
12638 sctp_add_an_in_stream(chk, seq, adding_i);
12640 asoc->stream_reset_outstanding++;
12643 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12645 asoc->stream_reset_outstanding++;
12647 if (send_tsn_req) {
12648 sctp_add_stream_reset_tsn(chk, seq);
12649 asoc->stream_reset_outstanding++;
12651 asoc->str_reset = chk;
12652 /* insert the chunk for sending */
12653 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12656 asoc->ctrl_queue_cnt++;
12657 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12662 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12663 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12664 #if defined(__FreeBSD__)
12665 uint8_t use_mflowid, uint32_t mflowid,
12667 uint32_t vrf_id, uint16_t port)
12669 /* Don't respond to an ABORT with an ABORT. */
12670 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12672 sctp_m_freem(cause);
12675 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12676 #if defined(__FreeBSD__)
12677 use_mflowid, mflowid,
12684 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12685 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12686 #if defined(__FreeBSD__)
12687 uint8_t use_mflowid, uint32_t mflowid,
12689 uint32_t vrf_id, uint16_t port)
12691 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12692 #if defined(__FreeBSD__)
12693 use_mflowid, mflowid,
12699 static struct mbuf *
12700 sctp_copy_resume(struct uio *uio,
12702 #if defined(__FreeBSD__) && __FreeBSD_version > 602000
12703 int user_marks_eor,
12707 struct mbuf **new_tail)
12709 #if defined(__Panda__)
12712 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12713 (user_marks_eor ? M_EOR : 0));
12715 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12718 *sndout = m_length(m, NULL);
12719 *new_tail = m_last(m);
12722 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000
12725 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12726 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12728 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12731 *sndout = m_length(m, NULL);
12732 *new_tail = m_last(m);
12736 int left, cancpy, willcpy;
12737 struct mbuf *m, *head;
12739 #if defined(__APPLE__)
12740 #if defined(APPLE_LEOPARD)
12741 left = min(uio->uio_resid, max_send_len);
12743 left = min(uio_resid(uio), max_send_len);
12746 left = min(uio->uio_resid, max_send_len);
12748 /* Always get a header just in case */
12749 head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12750 if (head == NULL) {
12751 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12755 cancpy = M_TRAILINGSPACE(head);
12756 willcpy = min(cancpy, left);
12757 *error = uiomove(mtod(head, caddr_t), willcpy, uio);
12759 sctp_m_freem(head);
12762 *sndout += willcpy;
12764 SCTP_BUF_LEN(head) = willcpy;
12768 /* move in user data */
12769 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12770 if (SCTP_BUF_NEXT(m) == NULL) {
12771 sctp_m_freem(head);
12773 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12777 m = SCTP_BUF_NEXT(m);
12778 cancpy = M_TRAILINGSPACE(m);
12779 willcpy = min(cancpy, left);
12780 *error = uiomove(mtod(m, caddr_t), willcpy, uio);
12782 sctp_m_freem(head);
12784 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12788 SCTP_BUF_LEN(m) = willcpy;
12790 *sndout += willcpy;
12793 SCTP_BUF_NEXT(m) = NULL;
12801 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12806 #if defined(__Panda__)
12808 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12810 if (sp->data == NULL) {
12811 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12815 sp->tail_mbuf = m_last(sp->data);
12818 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000
12820 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12822 if (sp->data == NULL) {
12823 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12827 sp->tail_mbuf = m_last(sp->data);
12830 int cancpy, willcpy, error;
12831 struct mbuf *m, *head;
12834 /* First one gets a header */
12836 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
12838 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12842 * Add this one for m in now, that way if the alloc fails we won't
12845 SCTP_BUF_RESV_UF(m, resv_upfront);
12846 cancpy = M_TRAILINGSPACE(m);
12847 willcpy = min(cancpy, left);
12849 /* move in user data */
12850 error = uiomove(mtod(m, caddr_t), willcpy, uio);
12852 sctp_m_freem(head);
12855 SCTP_BUF_LEN(m) = willcpy;
12859 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12860 if (SCTP_BUF_NEXT(m) == NULL) {
12862 * the head goes back to caller, he can free
12865 sctp_m_freem(head);
12866 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12869 m = SCTP_BUF_NEXT(m);
12870 cancpy = M_TRAILINGSPACE(m);
12871 willcpy = min(cancpy, left);
12874 SCTP_BUF_NEXT(m) = NULL;
12885 static struct sctp_stream_queue_pending *
12886 sctp_copy_it_in(struct sctp_tcb *stcb,
12887 struct sctp_association *asoc,
12888 struct sctp_sndrcvinfo *srcv,
12890 struct sctp_nets *net,
12892 int user_marks_eor,
12897 * This routine must be very careful in its work. Protocol
12898 * processing is up and running so care must be taken to spl...()
12899 * when you need to do something that may effect the stcb/asoc. The
12900 * sb is locked however. When data is copied the protocol processing
12901 * should be enabled since this is a slower operation...
12903 struct sctp_stream_queue_pending *sp = NULL;
12907 /* Now can we send this? */
12908 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12909 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12910 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12911 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12912 /* got data while shutting down */
12913 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12914 *error = ECONNRESET;
12917 sctp_alloc_a_strmoq(stcb, sp);
12919 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12924 sp->sender_all_done = 0;
12925 sp->sinfo_flags = srcv->sinfo_flags;
12926 sp->timetolive = srcv->sinfo_timetolive;
12927 sp->ppid = srcv->sinfo_ppid;
12928 sp->context = srcv->sinfo_context;
12929 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12931 sp->stream = srcv->sinfo_stream;
12932 #if defined(__APPLE__)
12933 #if defined(APPLE_LEOPARD)
12934 sp->length = min(uio->uio_resid, max_send_len);
12936 sp->length = min(uio_resid(uio), max_send_len);
12939 sp->length = min(uio->uio_resid, max_send_len);
12941 #if defined(__APPLE__)
12942 #if defined(APPLE_LEOPARD)
12943 if ((sp->length == (uint32_t)uio->uio_resid) &&
12945 if ((sp->length == (uint32_t)uio_resid(uio)) &&
12948 if ((sp->length == (uint32_t)uio->uio_resid) &&
12950 ((user_marks_eor == 0) ||
12951 (srcv->sinfo_flags & SCTP_EOF) ||
12952 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12953 sp->msg_is_complete = 1;
12955 sp->msg_is_complete = 0;
12957 sp->sender_all_done = 0;
12958 sp->some_taken = 0;
12959 sp->put_last_out = 0;
12960 resv_in_first = sizeof(struct sctp_data_chunk);
12961 sp->data = sp->tail_mbuf = NULL;
12962 if (sp->length == 0) {
12966 if (srcv->sinfo_keynumber_valid) {
12967 sp->auth_keyid = srcv->sinfo_keynumber;
12969 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12971 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12972 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12973 sp->holds_key_ref = 1;
12975 #if defined(__APPLE__)
12976 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
12978 *error = sctp_copy_one(sp, uio, resv_in_first);
12979 #if defined(__APPLE__)
12980 SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
12984 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12987 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12989 atomic_add_int(&sp->net->ref_count, 1);
12993 sctp_set_prsctp_policy(sp);
13001 sctp_sosend(struct socket *so,
13002 struct sockaddr *addr,
13005 pakhandle_type top,
13006 pakhandle_type icontrol,
13009 struct mbuf *control,
13011 #if defined(__APPLE__) || defined(__Panda__)
13015 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13017 #elif defined(__Windows__)
13020 #if defined(__Userspace__)
13022 * proc is a dummy in __Userspace__ and will not be passed
13023 * to sctp_lower_sosend
13032 struct mbuf *control = NULL;
13034 #if defined(__APPLE__)
13035 struct proc *p = current_proc();
13037 int error, use_sndinfo = 0;
13038 struct sctp_sndrcvinfo sndrcvninfo;
13039 struct sockaddr *addr_to_use;
13040 #if defined(INET) && defined(INET6)
13041 struct sockaddr_in sin;
13044 #if defined(__APPLE__)
13045 SCTP_SOCKET_LOCK(so, 1);
13048 control = SCTP_HEADER_TO_CHAIN(icontrol);
13051 /* process cmsg snd/rcv info (maybe a assoc-id) */
13052 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
13053 sizeof(sndrcvninfo))) {
13058 addr_to_use = addr;
13059 #if defined(INET) && defined(INET6)
13060 if ((addr) && (addr->sa_family == AF_INET6)) {
13061 struct sockaddr_in6 *sin6;
13063 sin6 = (struct sockaddr_in6 *)addr;
13064 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
13065 in6_sin6_2_sin(&sin, sin6);
13066 addr_to_use = (struct sockaddr *)&sin;
13070 error = sctp_lower_sosend(so, addr_to_use, uio, top,
13077 use_sndinfo ? &sndrcvninfo: NULL
13078 #if !(defined(__Panda__) || defined(__Userspace__))
13082 #if defined(__APPLE__)
13083 SCTP_SOCKET_UNLOCK(so, 1);
13090 sctp_lower_sosend(struct socket *so,
13091 struct sockaddr *addr,
13094 pakhandle_type i_pak,
13095 pakhandle_type i_control,
13097 struct mbuf *i_pak,
13098 struct mbuf *control,
13101 struct sctp_sndrcvinfo *srcv
13102 #if !(defined( __Panda__) || defined(__Userspace__))
13104 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13106 #elif defined(__Windows__)
13114 unsigned int sndlen = 0, max_len;
13116 struct mbuf *top = NULL;
13118 struct mbuf *control = NULL;
13120 int queue_only = 0, queue_only_for_init = 0;
13121 int free_cnt_applied = 0;
13123 int now_filled = 0;
13124 unsigned int inqueue_bytes = 0;
13125 struct sctp_block_entry be;
13126 struct sctp_inpcb *inp;
13127 struct sctp_tcb *stcb = NULL;
13128 struct timeval now;
13129 struct sctp_nets *net;
13130 struct sctp_association *asoc;
13131 struct sctp_inpcb *t_inp;
13132 int user_marks_eor;
13133 int create_lock_applied = 0;
13134 int nagle_applies = 0;
13135 int some_on_control = 0;
13136 int got_all_of_the_send = 0;
13137 int hold_tcblock = 0;
13138 int non_blocking = 0;
13139 uint32_t local_add_more, local_soresv = 0;
13141 uint16_t sinfo_flags;
13142 sctp_assoc_t sinfo_assoc_id;
13149 #if defined(__APPLE__)
13150 sctp_lock_assert(so);
13152 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
13154 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13157 SCTP_RELEASE_PKT(i_pak);
13161 if ((uio == NULL) && (i_pak == NULL)) {
13162 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13165 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
13166 atomic_add_int(&inp->total_sends, 1);
13168 #if defined(__APPLE__)
13169 #if defined(APPLE_LEOPARD)
13170 if (uio->uio_resid < 0) {
13172 if (uio_resid(uio) < 0) {
13175 if (uio->uio_resid < 0) {
13177 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13180 #if defined(__APPLE__)
13181 #if defined(APPLE_LEOPARD)
13182 sndlen = uio->uio_resid;
13184 sndlen = uio_resid(uio);
13187 sndlen = uio->uio_resid;
13190 top = SCTP_HEADER_TO_CHAIN(i_pak);
13193 * app len indicates the datalen, dgsize for cases
13194 * of SCTP_EOF/ABORT will not have the right len
13196 sndlen = SCTP_APP_DATA_LEN(i_pak);
13198 * Set the particle len also to zero to match
13199 * up with app len. We only have one particle
13200 * if app len is zero for Panda. This is ensured
13201 * in the socket lib
13204 SCTP_BUF_LEN(top) = 0;
13207 * We delink the chain from header, but keep
13208 * the header around as we will need it in
13211 SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
13213 sndlen = SCTP_HEADER_LEN(i_pak);
13216 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
13221 control = SCTP_HEADER_TO_CHAIN(i_control);
13224 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13225 (inp->sctp_socket->so_qlimit)) {
13226 /* The listener can NOT send */
13227 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13232 * Pre-screen address, if one is given the sin-len
13233 * must be set correctly!
13236 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13237 switch (raddr->sa.sa_family) {
13240 #ifdef HAVE_SIN_LEN
13241 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13242 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13247 port = raddr->sin.sin_port;
13252 #ifdef HAVE_SIN6_LEN
13253 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13254 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13259 port = raddr->sin6.sin6_port;
13262 #if defined(__Userspace__)
13264 #ifdef HAVE_SCONN_LEN
13265 if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13266 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13271 port = raddr->sconn.sconn_port;
13275 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13276 error = EAFNOSUPPORT;
13283 sinfo_flags = srcv->sinfo_flags;
13284 sinfo_assoc_id = srcv->sinfo_assoc_id;
13285 if (INVALID_SINFO_FLAG(sinfo_flags) ||
13286 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13287 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13291 if (srcv->sinfo_flags)
13292 SCTP_STAT_INCR(sctps_sends_with_flags);
13294 sinfo_flags = inp->def_send.sinfo_flags;
13295 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13297 if (sinfo_flags & SCTP_SENDALL) {
13298 /* its a sendall */
13299 error = sctp_sendall(inp, uio, top, srcv);
13303 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13304 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13308 /* now we must find the assoc */
13309 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13310 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13311 SCTP_INP_RLOCK(inp);
13312 stcb = LIST_FIRST(&inp->sctp_asoc_list);
13314 SCTP_TCB_LOCK(stcb);
13317 SCTP_INP_RUNLOCK(inp);
13318 } else if (sinfo_assoc_id) {
13319 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
13322 * Since we did not use findep we must
13323 * increment it, and if we don't find a tcb
13326 SCTP_INP_WLOCK(inp);
13327 SCTP_INP_INCR_REF(inp);
13328 SCTP_INP_WUNLOCK(inp);
13329 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13330 if (stcb == NULL) {
13331 SCTP_INP_WLOCK(inp);
13332 SCTP_INP_DECR_REF(inp);
13333 SCTP_INP_WUNLOCK(inp);
13338 if ((stcb == NULL) && (addr)) {
13339 /* Possible implicit send? */
13340 SCTP_ASOC_CREATE_LOCK(inp);
13341 create_lock_applied = 1;
13342 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13343 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13344 /* Should I really unlock ? */
13345 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13350 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13351 (addr->sa_family == AF_INET6)) {
13352 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13356 SCTP_INP_WLOCK(inp);
13357 SCTP_INP_INCR_REF(inp);
13358 SCTP_INP_WUNLOCK(inp);
13359 /* With the lock applied look again */
13360 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13361 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13362 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13364 if (stcb == NULL) {
13365 SCTP_INP_WLOCK(inp);
13366 SCTP_INP_DECR_REF(inp);
13367 SCTP_INP_WUNLOCK(inp);
13374 if (t_inp != inp) {
13375 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13380 if (stcb == NULL) {
13381 if (addr == NULL) {
13382 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13386 /* We must go ahead and start the INIT process */
13389 if ((sinfo_flags & SCTP_ABORT) ||
13390 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13392 * User asks to abort a non-existant assoc,
13393 * or EOF a non-existant assoc with no data
13395 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13399 /* get an asoc/stcb struct */
13400 vrf_id = inp->def_vrf_id;
13402 if (create_lock_applied == 0) {
13403 panic("Error, should hold create lock and I don't?");
13406 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13407 #if !(defined( __Panda__) || defined(__Userspace__))
13410 (struct proc *)NULL
13413 if (stcb == NULL) {
13414 /* Error is setup for us in the call */
13417 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13418 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13419 /* Set the connected flag so we can queue data */
13420 soisconnecting(so);
13423 if (create_lock_applied) {
13424 SCTP_ASOC_CREATE_UNLOCK(inp);
13425 create_lock_applied = 0;
13427 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13429 /* Turn on queue only flag to prevent data from being sent */
13431 asoc = &stcb->asoc;
13432 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13433 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13435 /* initialize authentication params for the assoc */
13436 sctp_initialize_auth_params(inp, stcb);
13439 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13440 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
13446 /* out with the INIT */
13447 queue_only_for_init = 1;
13449 * we may want to dig in after this call and adjust the MTU
13450 * value. It defaulted to 1500 (constant) but the ro
13451 * structure may now have an update and thus we may need to
13452 * change it BEFORE we append the message.
13456 asoc = &stcb->asoc;
13458 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13459 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
13461 net = sctp_findnet(stcb, addr);
13464 if ((net == NULL) ||
13465 ((port != 0) && (port != stcb->rport))) {
13466 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13471 if (stcb->asoc.alternate) {
13472 net = stcb->asoc.alternate;
13474 net = stcb->asoc.primary_destination;
13477 atomic_add_int(&stcb->total_sends, 1);
13478 /* Keep the stcb from being freed under our feet */
13479 atomic_add_int(&asoc->refcnt, 1);
13480 free_cnt_applied = 1;
13482 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13483 if (sndlen > asoc->smallest_mtu) {
13484 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13489 #if defined(__Userspace__)
13490 if (inp->recv_callback) {
13494 if (SCTP_SO_IS_NBIO(so)
13495 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13496 || (flags & MSG_NBIO)
13502 /* would we block? */
13503 if (non_blocking) {
13504 if (hold_tcblock == 0) {
13505 SCTP_TCB_LOCK(stcb);
13508 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13509 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13510 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13511 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13512 if (sndlen > SCTP_SB_LIMIT_SND(so))
13515 error = EWOULDBLOCK;
13518 stcb->asoc.sb_send_resv += sndlen;
13519 SCTP_TCB_UNLOCK(stcb);
13522 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13524 local_soresv = sndlen;
13525 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13526 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13527 error = ECONNRESET;
13530 if (create_lock_applied) {
13531 SCTP_ASOC_CREATE_UNLOCK(inp);
13532 create_lock_applied = 0;
13534 if (asoc->stream_reset_outstanding) {
13536 * Can't queue any data while stream reset is underway.
13538 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
13542 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13543 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13546 /* we are now done with all control */
13548 sctp_m_freem(control);
13551 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
13552 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13553 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13554 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13555 if (srcv->sinfo_flags & SCTP_ABORT) {
13558 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13559 error = ECONNRESET;
13563 /* Ok, we will attempt a msgsnd :> */
13564 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
13566 #if defined(__FreeBSD__) && __FreeBSD_version >= 603000
13567 p->td_ru.ru_msgsnd++;
13568 #elif defined(__FreeBSD__) && __FreeBSD_version >= 500000
13569 p->td_proc->p_stats->p_ru.ru_msgsnd++;
13571 p->p_stats->p_ru.ru_msgsnd++;
13575 /* Are we aborting? */
13576 if (srcv->sinfo_flags & SCTP_ABORT) {
13578 int tot_demand, tot_out = 0, max_out;
13580 SCTP_STAT_INCR(sctps_sends_with_abort);
13581 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13582 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13583 /* It has to be up before we abort */
13584 /* how big is the user initiated abort? */
13585 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13589 if (hold_tcblock) {
13590 SCTP_TCB_UNLOCK(stcb);
13594 struct mbuf *cntm = NULL;
13596 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13598 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13599 tot_out += SCTP_BUF_LEN(cntm);
13603 /* Must fit in a MTU */
13605 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13606 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13608 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13612 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
13615 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13619 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13620 max_out -= sizeof(struct sctp_abort_msg);
13621 if (tot_out > max_out) {
13625 struct sctp_paramhdr *ph;
13627 /* now move forward the data pointer */
13628 ph = mtod(mm, struct sctp_paramhdr *);
13629 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13630 ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out);
13632 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
13634 #if defined(__APPLE__)
13635 SCTP_SOCKET_UNLOCK(so, 0);
13637 error = uiomove((caddr_t)ph, (int)tot_out, uio);
13638 #if defined(__APPLE__)
13639 SCTP_SOCKET_LOCK(so, 0);
13643 * Here if we can't get his data we
13644 * still abort we just don't get to
13645 * send the users note :-0
13652 SCTP_BUF_NEXT(mm) = top;
13656 if (hold_tcblock == 0) {
13657 SCTP_TCB_LOCK(stcb);
13659 atomic_add_int(&stcb->asoc.refcnt, -1);
13660 free_cnt_applied = 0;
13661 /* release this lock, otherwise we hang on ourselves */
13662 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13663 /* now relock the stcb so everything is sane */
13666 /* In this case top is already chained to mm
13667 * avoid double free, since we free it below if
13668 * top != NULL and driver would free it after sending
13676 /* Calculate the maximum we can send */
13677 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13678 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13679 if (non_blocking) {
13680 /* we already checked for non-blocking above. */
13683 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13688 if (hold_tcblock) {
13689 SCTP_TCB_UNLOCK(stcb);
13692 /* Is the stream no. valid? */
13693 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13694 /* Invalid stream number */
13695 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13699 if (asoc->strmout == NULL) {
13700 /* huh? software error */
13701 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13706 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13707 if ((user_marks_eor == 0) &&
13708 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13709 /* It will NEVER fit */
13710 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13714 if ((uio == NULL) && user_marks_eor) {
13716 * We do not support eeor mode for
13717 * sending with mbuf chains (like sendfile).
13719 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13724 if (user_marks_eor) {
13725 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13728 * For non-eeor the whole message must fit in
13729 * the socket send buffer.
13731 local_add_more = sndlen;
13734 if (non_blocking) {
13735 goto skip_preblock;
13737 if (((max_len <= local_add_more) &&
13738 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13740 ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13741 /* No room right now ! */
13742 SOCKBUF_LOCK(&so->so_snd);
13743 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13744 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13745 ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13746 SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13747 (unsigned int)SCTP_SB_LIMIT_SND(so),
13750 stcb->asoc.stream_queue_cnt,
13751 stcb->asoc.chunks_on_out_queue,
13752 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13753 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13754 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13757 #if !defined(__Panda__) && !defined(__Windows__)
13758 stcb->block_entry = &be;
13760 error = sbwait(&so->so_snd);
13761 stcb->block_entry = NULL;
13762 if (error || so->so_error || be.error) {
13765 error = so->so_error;
13770 SOCKBUF_UNLOCK(&so->so_snd);
13773 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13774 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13775 asoc, stcb->asoc.total_output_queue_size);
13777 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13780 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13782 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13783 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13787 SOCKBUF_UNLOCK(&so->so_snd);
13791 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13794 #if defined(__APPLE__)
13795 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
13797 /* sndlen covers for mbuf case
13798 * uio_resid covers for the non-mbuf case
13799 * NOTE: uio will be null when top/mbuf is passed
13802 if (srcv->sinfo_flags & SCTP_EOF) {
13803 got_all_of_the_send = 1;
13806 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13812 struct sctp_stream_queue_pending *sp;
13813 struct sctp_stream_out *strm;
13816 SCTP_TCB_SEND_LOCK(stcb);
13817 if ((asoc->stream_locked) &&
13818 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13819 SCTP_TCB_SEND_UNLOCK(stcb);
13820 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13824 SCTP_TCB_SEND_UNLOCK(stcb);
13826 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13827 if (strm->last_msg_incomplete == 0) {
13829 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13830 if ((sp == NULL) || (error)) {
13833 SCTP_TCB_SEND_LOCK(stcb);
13834 if (sp->msg_is_complete) {
13835 strm->last_msg_incomplete = 0;
13836 asoc->stream_locked = 0;
13838 /* Just got locked to this guy in
13839 * case of an interrupt.
13841 strm->last_msg_incomplete = 1;
13842 asoc->stream_locked = 1;
13843 asoc->stream_locked_on = srcv->sinfo_stream;
13844 sp->sender_all_done = 0;
13846 sctp_snd_sb_alloc(stcb, sp->length);
13847 atomic_add_int(&asoc->stream_queue_cnt, 1);
13848 if (srcv->sinfo_flags & SCTP_UNORDERED) {
13849 SCTP_STAT_INCR(sctps_sends_with_unord);
13851 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13852 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13853 SCTP_TCB_SEND_UNLOCK(stcb);
13855 SCTP_TCB_SEND_LOCK(stcb);
13856 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13857 SCTP_TCB_SEND_UNLOCK(stcb);
13859 /* ???? Huh ??? last msg is gone */
13861 panic("Warning: Last msg marked incomplete, yet nothing left?");
13863 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13864 strm->last_msg_incomplete = 0;
13870 #if defined(__APPLE__)
13871 #if defined(APPLE_LEOPARD)
13872 while (uio->uio_resid > 0) {
13874 while (uio_resid(uio) > 0) {
13877 while (uio->uio_resid > 0) {
13879 /* How much room do we have? */
13880 struct mbuf *new_tail, *mm;
13882 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13883 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13887 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13888 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13889 #if defined(__APPLE__)
13890 #if defined(APPLE_LEOPARD)
13891 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13893 (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) {
13896 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13900 if (hold_tcblock) {
13901 SCTP_TCB_UNLOCK(stcb);
13904 #if defined(__APPLE__)
13905 SCTP_SOCKET_UNLOCK(so, 0);
13907 #if defined(__FreeBSD__) && __FreeBSD_version > 602000
13908 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13910 mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail);
13912 #if defined(__APPLE__)
13913 SCTP_SOCKET_LOCK(so, 0);
13915 if ((mm == NULL) || error) {
13921 /* Update the mbuf and count */
13922 SCTP_TCB_SEND_LOCK(stcb);
13923 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13924 /* we need to get out.
13925 * Peer probably aborted.
13928 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13929 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13930 error = ECONNRESET;
13932 SCTP_TCB_SEND_UNLOCK(stcb);
13935 if (sp->tail_mbuf) {
13936 /* tack it to the end */
13937 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13938 sp->tail_mbuf = new_tail;
13940 /* A stolen mbuf */
13942 sp->tail_mbuf = new_tail;
13944 sctp_snd_sb_alloc(stcb, sndout);
13945 atomic_add_int(&sp->length,sndout);
13948 /* Did we reach EOR? */
13949 #if defined(__APPLE__)
13950 #if defined(APPLE_LEOPARD)
13951 if ((uio->uio_resid == 0) &&
13953 if ((uio_resid(uio) == 0) &&
13956 if ((uio->uio_resid == 0) &&
13958 ((user_marks_eor == 0) ||
13959 (srcv->sinfo_flags & SCTP_EOF) ||
13960 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13961 sp->msg_is_complete = 1;
13963 sp->msg_is_complete = 0;
13965 SCTP_TCB_SEND_UNLOCK(stcb);
13967 #if defined(__APPLE__)
13968 #if defined(APPLE_LEOPARD)
13969 if (uio->uio_resid == 0) {
13971 if (uio_resid(uio) == 0) {
13974 if (uio->uio_resid == 0) {
13980 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
13981 /* This is ugly but we must assure locking order */
13982 if (hold_tcblock == 0) {
13983 SCTP_TCB_LOCK(stcb);
13986 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13987 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13988 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13989 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13995 SCTP_TCB_UNLOCK(stcb);
13998 /* wait for space now */
13999 if (non_blocking) {
14000 /* Non-blocking io in place out */
14003 /* What about the INIT, send it maybe */
14004 if (queue_only_for_init) {
14005 if (hold_tcblock == 0) {
14006 SCTP_TCB_LOCK(stcb);
14009 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
14010 /* a collision took us forward? */
14013 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14014 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
14018 if ((net->flight_size > net->cwnd) &&
14019 (asoc->sctp_cmt_on_off == 0)) {
14020 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14022 } else if (asoc->ifp_had_enobuf) {
14023 SCTP_STAT_INCR(sctps_ifnomemqueued);
14024 if (net->flight_size > (2 * net->mtu)) {
14027 asoc->ifp_had_enobuf = 0;
14029 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
14030 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
14031 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14032 (stcb->asoc.total_flight > 0) &&
14033 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14034 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14037 * Ok, Nagle is set on and we have data outstanding.
14038 * Don't send anything and let SACKs drive out the
14039 * data unless wen have a "full" segment to send.
14041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14042 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14044 SCTP_STAT_INCR(sctps_naglequeued);
14047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14048 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14049 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14051 SCTP_STAT_INCR(sctps_naglesent);
14054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14056 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14057 nagle_applies, un_sent);
14058 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14059 stcb->asoc.total_flight,
14060 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14062 if (queue_only_for_init)
14063 queue_only_for_init = 0;
14064 if ((queue_only == 0) && (nagle_applies == 0)) {
14066 * need to start chunk output
14067 * before blocking.. note that if
14068 * a lock is already applied, then
14069 * the input via the net is happening
14070 * and I don't need to start output :-D
14072 if (hold_tcblock == 0) {
14073 if (SCTP_TCB_TRYLOCK(stcb)) {
14075 sctp_chunk_output(inp,
14077 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14080 sctp_chunk_output(inp,
14082 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14084 if (hold_tcblock == 1) {
14085 SCTP_TCB_UNLOCK(stcb);
14089 SOCKBUF_LOCK(&so->so_snd);
14091 * This is a bit strange, but I think it will
14092 * work. The total_output_queue_size is locked and
14093 * protected by the TCB_LOCK, which we just released.
14094 * There is a race that can occur between releasing it
14095 * above, and me getting the socket lock, where sacks
14096 * come in but we have not put the SB_WAIT on the
14097 * so_snd buffer to get the wakeup. After the LOCK
14098 * is applied the sack_processing will also need to
14099 * LOCK the so->so_snd to do the actual sowwakeup(). So
14100 * once we have the socket buffer lock if we recheck the
14101 * size we KNOW we will get to sleep safely with the
14102 * wakeup flag in place.
14104 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
14105 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
14106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14107 #if defined(__APPLE__)
14108 #if defined(APPLE_LEOPARD)
14109 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14110 asoc, uio->uio_resid);
14112 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14113 asoc, uio_resid(uio));
14116 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14117 asoc, uio->uio_resid);
14121 #if !defined(__Panda__) && !defined(__Windows__)
14122 stcb->block_entry = &be;
14124 #if defined(__APPLE__)
14125 sbunlock(&so->so_snd, 1);
14127 error = sbwait(&so->so_snd);
14128 stcb->block_entry = NULL;
14130 if (error || so->so_error || be.error) {
14133 error = so->so_error;
14138 SOCKBUF_UNLOCK(&so->so_snd);
14142 #if defined(__APPLE__)
14143 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14146 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14147 asoc, stcb->asoc.total_output_queue_size);
14150 SOCKBUF_UNLOCK(&so->so_snd);
14151 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14155 SCTP_TCB_SEND_LOCK(stcb);
14157 if (sp->msg_is_complete == 0) {
14158 strm->last_msg_incomplete = 1;
14159 asoc->stream_locked = 1;
14160 asoc->stream_locked_on = srcv->sinfo_stream;
14162 sp->sender_all_done = 1;
14163 strm->last_msg_incomplete = 0;
14164 asoc->stream_locked = 0;
14167 SCTP_PRINTF("Huh no sp TSNH?\n");
14168 strm->last_msg_incomplete = 0;
14169 asoc->stream_locked = 0;
14171 SCTP_TCB_SEND_UNLOCK(stcb);
14172 #if defined(__APPLE__)
14173 #if defined(APPLE_LEOPARD)
14174 if (uio->uio_resid == 0) {
14176 if (uio_resid(uio) == 0) {
14179 if (uio->uio_resid == 0) {
14181 got_all_of_the_send = 1;
14184 /* We send in a 0, since we do NOT have any locks */
14185 error = sctp_msg_append(stcb, net, top, srcv, 0);
14187 if (srcv->sinfo_flags & SCTP_EOF) {
14189 * This should only happen for Panda for the mbuf
14190 * send case, which does NOT yet support EEOR mode.
14191 * Thus, we can just set this flag to do the proper
14194 got_all_of_the_send = 1;
14202 if ((srcv->sinfo_flags & SCTP_EOF) &&
14203 (got_all_of_the_send == 1)) {
14205 SCTP_STAT_INCR(sctps_sends_with_eof);
14207 if (hold_tcblock == 0) {
14208 SCTP_TCB_LOCK(stcb);
14211 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
14212 if (TAILQ_EMPTY(&asoc->send_queue) &&
14213 TAILQ_EMPTY(&asoc->sent_queue) &&
14215 if (asoc->locked_on_sending) {
14218 /* there is nothing queued to send, so I'm done... */
14219 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
14220 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14221 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14222 struct sctp_nets *netp;
14224 /* only send SHUTDOWN the first time through */
14225 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
14226 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14228 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
14229 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
14230 sctp_stop_timers_for_shutdown(stcb);
14231 if (stcb->asoc.alternate) {
14232 netp = stcb->asoc.alternate;
14234 netp = stcb->asoc.primary_destination;
14236 sctp_send_shutdown(stcb, netp);
14237 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14239 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14240 asoc->primary_destination);
14244 * we still got (or just got) data to send, so set
14248 * XXX sockets draft says that SCTP_EOF should be
14249 * sent with no data. currently, we will allow user
14250 * data to be sent first and move to
14253 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
14254 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14255 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14256 if (hold_tcblock == 0) {
14257 SCTP_TCB_LOCK(stcb);
14260 if (asoc->locked_on_sending) {
14261 /* Locked to send out the data */
14262 struct sctp_stream_queue_pending *sp;
14263 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
14265 if ((sp->length == 0) && (sp->msg_is_complete == 0))
14266 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
14269 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
14270 if (TAILQ_EMPTY(&asoc->send_queue) &&
14271 TAILQ_EMPTY(&asoc->sent_queue) &&
14272 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14274 if (free_cnt_applied) {
14275 atomic_add_int(&stcb->asoc.refcnt, -1);
14276 free_cnt_applied = 0;
14278 sctp_abort_an_association(stcb->sctp_ep, stcb,
14279 NULL, SCTP_SO_LOCKED);
14280 /* now relock the stcb so everything is sane */
14285 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14286 asoc->primary_destination);
14287 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14292 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14293 some_on_control = 1;
14295 if (queue_only_for_init) {
14296 if (hold_tcblock == 0) {
14297 SCTP_TCB_LOCK(stcb);
14300 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
14301 /* a collision took us forward? */
14304 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14305 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
14309 if ((net->flight_size > net->cwnd) &&
14310 (stcb->asoc.sctp_cmt_on_off == 0)) {
14311 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14313 } else if (asoc->ifp_had_enobuf) {
14314 SCTP_STAT_INCR(sctps_ifnomemqueued);
14315 if (net->flight_size > (2 * net->mtu)) {
14318 asoc->ifp_had_enobuf = 0;
14320 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
14321 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
14322 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14323 (stcb->asoc.total_flight > 0) &&
14324 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14325 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14327 * Ok, Nagle is set on and we have data outstanding.
14328 * Don't send anything and let SACKs drive out the
14329 * data unless wen have a "full" segment to send.
14331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14332 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14334 SCTP_STAT_INCR(sctps_naglequeued);
14337 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14338 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14339 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14341 SCTP_STAT_INCR(sctps_naglesent);
14344 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14345 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14346 nagle_applies, un_sent);
14347 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14348 stcb->asoc.total_flight,
14349 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14351 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14352 /* we can attempt to send too. */
14353 if (hold_tcblock == 0) {
14354 /* If there is activity recv'ing sacks no need to send */
14355 if (SCTP_TCB_TRYLOCK(stcb)) {
14356 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14360 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14362 } else if ((queue_only == 0) &&
14363 (stcb->asoc.peers_rwnd == 0) &&
14364 (stcb->asoc.total_flight == 0)) {
14365 /* We get to have a probe outstanding */
14366 if (hold_tcblock == 0) {
14368 SCTP_TCB_LOCK(stcb);
14370 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14371 } else if (some_on_control) {
14372 int num_out, reason, frag_point;
14374 /* Here we do control only */
14375 if (hold_tcblock == 0) {
14377 SCTP_TCB_LOCK(stcb);
14379 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14380 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14381 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14383 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14384 queue_only, stcb->asoc.peers_rwnd, un_sent,
14385 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14386 stcb->asoc.total_output_queue_size, error);
14389 #if defined(__APPLE__)
14390 sbunlock(&so->so_snd, 1);
14394 if (local_soresv && stcb) {
14395 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14397 if (create_lock_applied) {
14398 SCTP_ASOC_CREATE_UNLOCK(inp);
14400 if ((stcb) && hold_tcblock) {
14401 SCTP_TCB_UNLOCK(stcb);
14403 if (stcb && free_cnt_applied) {
14404 atomic_add_int(&stcb->asoc.refcnt, -1);
14407 #if !defined(__APPLE__)
14409 if (mtx_owned(&stcb->tcb_mtx)) {
14410 panic("Leaving with tcb mtx owned?");
14412 if (mtx_owned(&stcb->tcb_send_mtx)) {
14413 panic("Leaving with tcb send mtx owned?");
14420 * Handle the EAGAIN/ENOMEM cases to reattach the pak header
14421 * to particle when pak is passed in, so that caller
14422 * can try again with this pak
14424 * NOTE: For other cases, including success case,
14425 * we simply want to return the header back to free
14429 if ((error == EAGAIN) || (error == ENOMEM)) {
14430 SCTP_ATTACH_CHAIN(i_pak, top, sndlen);
14433 (void)SCTP_RELEASE_HEADER(i_pak);
14436 /* This is to handle cases when top has
14437 * been reset to NULL but pak might not
14441 (void)SCTP_RELEASE_HEADER(i_pak);
14447 sctp_validate_no_locks(inp);
14449 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
14456 sctp_m_freem(control);
14463 * generate an AUTHentication chunk, if required
14466 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
14467 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
14468 struct sctp_tcb *stcb, uint8_t chunk)
14470 struct mbuf *m_auth;
14471 struct sctp_auth_chunk *auth;
14475 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
14479 /* sysctl disabled auth? */
14480 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
14483 /* peer doesn't do auth... */
14484 if (!stcb->asoc.peer_supports_auth) {
14487 /* does the requested chunk require auth? */
14488 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
14491 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
14492 if (m_auth == NULL) {
14496 /* reserve some space if this will be the first mbuf */
14498 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
14499 /* fill in the AUTH chunk details */
14500 auth = mtod(m_auth, struct sctp_auth_chunk *);
14501 bzero(auth, sizeof(*auth));
14502 auth->ch.chunk_type = SCTP_AUTHENTICATION;
14503 auth->ch.chunk_flags = 0;
14504 chunk_len = sizeof(*auth) +
14505 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
14506 auth->ch.chunk_length = htons(chunk_len);
14507 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
14508 /* key id and hmac digest will be computed and filled in upon send */
14510 /* save the offset where the auth was inserted into the chain */
14512 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
14513 *offset += SCTP_BUF_LEN(cn);
14516 /* update length and return pointer to the auth chunk */
14517 SCTP_BUF_LEN(m_auth) = chunk_len;
14518 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
14519 if (auth_ret != NULL)
14525 #if defined(__FreeBSD__) || defined(__APPLE__)
14528 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14530 struct nd_prefix *pfx = NULL;
14531 struct nd_pfxrouter *pfxrtr = NULL;
14532 struct sockaddr_in6 gw6;
14534 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
14537 /* get prefix entry of address */
14538 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
14539 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
14541 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
14542 &src6->sin6_addr, &pfx->ndpr_mask))
14545 /* no prefix entry in the prefix list */
14547 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
14548 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14552 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
14553 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14555 /* search installed gateway from prefix entry */
14556 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
14557 memset(&gw6, 0, sizeof(struct sockaddr_in6));
14558 gw6.sin6_family = AF_INET6;
14559 #ifdef HAVE_SIN6_LEN
14560 gw6.sin6_len = sizeof(struct sockaddr_in6);
14562 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
14563 sizeof(struct in6_addr));
14564 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
14565 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
14566 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
14567 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14568 if (sctp_cmpaddr((struct sockaddr *)&gw6,
14569 ro->ro_rt->rt_gateway)) {
14570 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
14574 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
14580 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14583 struct sockaddr_in *sin, *mask;
14584 struct ifaddr *ifa;
14585 struct in_addr srcnetaddr, gwnetaddr;
14587 if (ro == NULL || ro->ro_rt == NULL ||
14588 sifa->address.sa.sa_family != AF_INET) {
14591 ifa = (struct ifaddr *)sifa->ifa;
14592 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
14593 sin = (struct sockaddr_in *)&sifa->address.sin;
14594 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14595 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
14596 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
14597 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
14599 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
14600 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14601 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
14602 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14603 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
14604 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
14610 #elif defined(__Userspace__)
14611 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
14613 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14618 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)