2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 258235 2013-11-16 19:57:56Z tuexen $");
38 #include <netinet/sctp_os.h>
42 #include <netinet/sctp_var.h>
43 #include <netinet/sctp_sysctl.h>
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_pcb.h>
46 #include <netinet/sctputil.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctputil.h>
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_timer.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_indata.h>
54 #include <netinet/sctp_bsd_addr.h>
55 #include <netinet/sctp_input.h>
56 #include <netinet/sctp_crc32.h>
57 #if defined(__Userspace_os_Linux)
58 #define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
60 #if !defined(__Userspace_os_Windows)
61 #include <netinet/udp.h>
63 #if defined(__APPLE__)
64 #include <netinet/in.h>
66 #if defined(__FreeBSD__)
67 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
68 #include <netinet/udp_var.h>
70 #include <machine/in_cksum.h>
72 #if defined(__Userspace__) && defined(INET6)
73 #include <netinet6/sctp6_var.h>
76 #if defined(__APPLE__)
77 #define APPLE_FILE_NO 3
80 #if defined(__APPLE__)
81 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
82 #define SCTP_MAX_LINKHDR 16
86 #define SCTP_MAX_GAPS_INARRAY 4
88 uint8_t right_edge; /* mergable on the right edge */
89 uint8_t left_edge; /* mergable on the left edge */
92 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
95 struct sack_track sack_array[256] = {
96 {0, 0, 0, 0, /* 0x00 */
103 {1, 0, 1, 0, /* 0x01 */
110 {0, 0, 1, 0, /* 0x02 */
117 {1, 0, 1, 0, /* 0x03 */
124 {0, 0, 1, 0, /* 0x04 */
131 {1, 0, 2, 0, /* 0x05 */
138 {0, 0, 1, 0, /* 0x06 */
145 {1, 0, 1, 0, /* 0x07 */
152 {0, 0, 1, 0, /* 0x08 */
159 {1, 0, 2, 0, /* 0x09 */
166 {0, 0, 2, 0, /* 0x0a */
173 {1, 0, 2, 0, /* 0x0b */
180 {0, 0, 1, 0, /* 0x0c */
187 {1, 0, 2, 0, /* 0x0d */
194 {0, 0, 1, 0, /* 0x0e */
201 {1, 0, 1, 0, /* 0x0f */
208 {0, 0, 1, 0, /* 0x10 */
215 {1, 0, 2, 0, /* 0x11 */
222 {0, 0, 2, 0, /* 0x12 */
229 {1, 0, 2, 0, /* 0x13 */
236 {0, 0, 2, 0, /* 0x14 */
243 {1, 0, 3, 0, /* 0x15 */
250 {0, 0, 2, 0, /* 0x16 */
257 {1, 0, 2, 0, /* 0x17 */
264 {0, 0, 1, 0, /* 0x18 */
271 {1, 0, 2, 0, /* 0x19 */
278 {0, 0, 2, 0, /* 0x1a */
285 {1, 0, 2, 0, /* 0x1b */
292 {0, 0, 1, 0, /* 0x1c */
299 {1, 0, 2, 0, /* 0x1d */
306 {0, 0, 1, 0, /* 0x1e */
313 {1, 0, 1, 0, /* 0x1f */
320 {0, 0, 1, 0, /* 0x20 */
327 {1, 0, 2, 0, /* 0x21 */
334 {0, 0, 2, 0, /* 0x22 */
341 {1, 0, 2, 0, /* 0x23 */
348 {0, 0, 2, 0, /* 0x24 */
355 {1, 0, 3, 0, /* 0x25 */
362 {0, 0, 2, 0, /* 0x26 */
369 {1, 0, 2, 0, /* 0x27 */
376 {0, 0, 2, 0, /* 0x28 */
383 {1, 0, 3, 0, /* 0x29 */
390 {0, 0, 3, 0, /* 0x2a */
397 {1, 0, 3, 0, /* 0x2b */
404 {0, 0, 2, 0, /* 0x2c */
411 {1, 0, 3, 0, /* 0x2d */
418 {0, 0, 2, 0, /* 0x2e */
425 {1, 0, 2, 0, /* 0x2f */
432 {0, 0, 1, 0, /* 0x30 */
439 {1, 0, 2, 0, /* 0x31 */
446 {0, 0, 2, 0, /* 0x32 */
453 {1, 0, 2, 0, /* 0x33 */
460 {0, 0, 2, 0, /* 0x34 */
467 {1, 0, 3, 0, /* 0x35 */
474 {0, 0, 2, 0, /* 0x36 */
481 {1, 0, 2, 0, /* 0x37 */
488 {0, 0, 1, 0, /* 0x38 */
495 {1, 0, 2, 0, /* 0x39 */
502 {0, 0, 2, 0, /* 0x3a */
509 {1, 0, 2, 0, /* 0x3b */
516 {0, 0, 1, 0, /* 0x3c */
523 {1, 0, 2, 0, /* 0x3d */
530 {0, 0, 1, 0, /* 0x3e */
537 {1, 0, 1, 0, /* 0x3f */
544 {0, 0, 1, 0, /* 0x40 */
551 {1, 0, 2, 0, /* 0x41 */
558 {0, 0, 2, 0, /* 0x42 */
565 {1, 0, 2, 0, /* 0x43 */
572 {0, 0, 2, 0, /* 0x44 */
579 {1, 0, 3, 0, /* 0x45 */
586 {0, 0, 2, 0, /* 0x46 */
593 {1, 0, 2, 0, /* 0x47 */
600 {0, 0, 2, 0, /* 0x48 */
607 {1, 0, 3, 0, /* 0x49 */
614 {0, 0, 3, 0, /* 0x4a */
621 {1, 0, 3, 0, /* 0x4b */
628 {0, 0, 2, 0, /* 0x4c */
635 {1, 0, 3, 0, /* 0x4d */
642 {0, 0, 2, 0, /* 0x4e */
649 {1, 0, 2, 0, /* 0x4f */
656 {0, 0, 2, 0, /* 0x50 */
663 {1, 0, 3, 0, /* 0x51 */
670 {0, 0, 3, 0, /* 0x52 */
677 {1, 0, 3, 0, /* 0x53 */
684 {0, 0, 3, 0, /* 0x54 */
691 {1, 0, 4, 0, /* 0x55 */
698 {0, 0, 3, 0, /* 0x56 */
705 {1, 0, 3, 0, /* 0x57 */
712 {0, 0, 2, 0, /* 0x58 */
719 {1, 0, 3, 0, /* 0x59 */
726 {0, 0, 3, 0, /* 0x5a */
733 {1, 0, 3, 0, /* 0x5b */
740 {0, 0, 2, 0, /* 0x5c */
747 {1, 0, 3, 0, /* 0x5d */
754 {0, 0, 2, 0, /* 0x5e */
761 {1, 0, 2, 0, /* 0x5f */
768 {0, 0, 1, 0, /* 0x60 */
775 {1, 0, 2, 0, /* 0x61 */
782 {0, 0, 2, 0, /* 0x62 */
789 {1, 0, 2, 0, /* 0x63 */
796 {0, 0, 2, 0, /* 0x64 */
803 {1, 0, 3, 0, /* 0x65 */
810 {0, 0, 2, 0, /* 0x66 */
817 {1, 0, 2, 0, /* 0x67 */
824 {0, 0, 2, 0, /* 0x68 */
831 {1, 0, 3, 0, /* 0x69 */
838 {0, 0, 3, 0, /* 0x6a */
845 {1, 0, 3, 0, /* 0x6b */
852 {0, 0, 2, 0, /* 0x6c */
859 {1, 0, 3, 0, /* 0x6d */
866 {0, 0, 2, 0, /* 0x6e */
873 {1, 0, 2, 0, /* 0x6f */
880 {0, 0, 1, 0, /* 0x70 */
887 {1, 0, 2, 0, /* 0x71 */
894 {0, 0, 2, 0, /* 0x72 */
901 {1, 0, 2, 0, /* 0x73 */
908 {0, 0, 2, 0, /* 0x74 */
915 {1, 0, 3, 0, /* 0x75 */
922 {0, 0, 2, 0, /* 0x76 */
929 {1, 0, 2, 0, /* 0x77 */
936 {0, 0, 1, 0, /* 0x78 */
943 {1, 0, 2, 0, /* 0x79 */
950 {0, 0, 2, 0, /* 0x7a */
957 {1, 0, 2, 0, /* 0x7b */
964 {0, 0, 1, 0, /* 0x7c */
971 {1, 0, 2, 0, /* 0x7d */
978 {0, 0, 1, 0, /* 0x7e */
985 {1, 0, 1, 0, /* 0x7f */
992 {0, 1, 1, 0, /* 0x80 */
999 {1, 1, 2, 0, /* 0x81 */
1006 {0, 1, 2, 0, /* 0x82 */
1013 {1, 1, 2, 0, /* 0x83 */
1020 {0, 1, 2, 0, /* 0x84 */
1027 {1, 1, 3, 0, /* 0x85 */
1034 {0, 1, 2, 0, /* 0x86 */
1041 {1, 1, 2, 0, /* 0x87 */
1048 {0, 1, 2, 0, /* 0x88 */
1055 {1, 1, 3, 0, /* 0x89 */
1062 {0, 1, 3, 0, /* 0x8a */
1069 {1, 1, 3, 0, /* 0x8b */
1076 {0, 1, 2, 0, /* 0x8c */
1083 {1, 1, 3, 0, /* 0x8d */
1090 {0, 1, 2, 0, /* 0x8e */
1097 {1, 1, 2, 0, /* 0x8f */
1104 {0, 1, 2, 0, /* 0x90 */
1111 {1, 1, 3, 0, /* 0x91 */
1118 {0, 1, 3, 0, /* 0x92 */
1125 {1, 1, 3, 0, /* 0x93 */
1132 {0, 1, 3, 0, /* 0x94 */
1139 {1, 1, 4, 0, /* 0x95 */
1146 {0, 1, 3, 0, /* 0x96 */
1153 {1, 1, 3, 0, /* 0x97 */
1160 {0, 1, 2, 0, /* 0x98 */
1167 {1, 1, 3, 0, /* 0x99 */
1174 {0, 1, 3, 0, /* 0x9a */
1181 {1, 1, 3, 0, /* 0x9b */
1188 {0, 1, 2, 0, /* 0x9c */
1195 {1, 1, 3, 0, /* 0x9d */
1202 {0, 1, 2, 0, /* 0x9e */
1209 {1, 1, 2, 0, /* 0x9f */
1216 {0, 1, 2, 0, /* 0xa0 */
1223 {1, 1, 3, 0, /* 0xa1 */
1230 {0, 1, 3, 0, /* 0xa2 */
1237 {1, 1, 3, 0, /* 0xa3 */
1244 {0, 1, 3, 0, /* 0xa4 */
1251 {1, 1, 4, 0, /* 0xa5 */
1258 {0, 1, 3, 0, /* 0xa6 */
1265 {1, 1, 3, 0, /* 0xa7 */
1272 {0, 1, 3, 0, /* 0xa8 */
1279 {1, 1, 4, 0, /* 0xa9 */
1286 {0, 1, 4, 0, /* 0xaa */
1293 {1, 1, 4, 0, /* 0xab */
1300 {0, 1, 3, 0, /* 0xac */
1307 {1, 1, 4, 0, /* 0xad */
1314 {0, 1, 3, 0, /* 0xae */
1321 {1, 1, 3, 0, /* 0xaf */
1328 {0, 1, 2, 0, /* 0xb0 */
1335 {1, 1, 3, 0, /* 0xb1 */
1342 {0, 1, 3, 0, /* 0xb2 */
1349 {1, 1, 3, 0, /* 0xb3 */
1356 {0, 1, 3, 0, /* 0xb4 */
1363 {1, 1, 4, 0, /* 0xb5 */
1370 {0, 1, 3, 0, /* 0xb6 */
1377 {1, 1, 3, 0, /* 0xb7 */
1384 {0, 1, 2, 0, /* 0xb8 */
1391 {1, 1, 3, 0, /* 0xb9 */
1398 {0, 1, 3, 0, /* 0xba */
1405 {1, 1, 3, 0, /* 0xbb */
1412 {0, 1, 2, 0, /* 0xbc */
1419 {1, 1, 3, 0, /* 0xbd */
1426 {0, 1, 2, 0, /* 0xbe */
1433 {1, 1, 2, 0, /* 0xbf */
1440 {0, 1, 1, 0, /* 0xc0 */
1447 {1, 1, 2, 0, /* 0xc1 */
1454 {0, 1, 2, 0, /* 0xc2 */
1461 {1, 1, 2, 0, /* 0xc3 */
1468 {0, 1, 2, 0, /* 0xc4 */
1475 {1, 1, 3, 0, /* 0xc5 */
1482 {0, 1, 2, 0, /* 0xc6 */
1489 {1, 1, 2, 0, /* 0xc7 */
1496 {0, 1, 2, 0, /* 0xc8 */
1503 {1, 1, 3, 0, /* 0xc9 */
1510 {0, 1, 3, 0, /* 0xca */
1517 {1, 1, 3, 0, /* 0xcb */
1524 {0, 1, 2, 0, /* 0xcc */
1531 {1, 1, 3, 0, /* 0xcd */
1538 {0, 1, 2, 0, /* 0xce */
1545 {1, 1, 2, 0, /* 0xcf */
1552 {0, 1, 2, 0, /* 0xd0 */
1559 {1, 1, 3, 0, /* 0xd1 */
1566 {0, 1, 3, 0, /* 0xd2 */
1573 {1, 1, 3, 0, /* 0xd3 */
1580 {0, 1, 3, 0, /* 0xd4 */
1587 {1, 1, 4, 0, /* 0xd5 */
1594 {0, 1, 3, 0, /* 0xd6 */
1601 {1, 1, 3, 0, /* 0xd7 */
1608 {0, 1, 2, 0, /* 0xd8 */
1615 {1, 1, 3, 0, /* 0xd9 */
1622 {0, 1, 3, 0, /* 0xda */
1629 {1, 1, 3, 0, /* 0xdb */
1636 {0, 1, 2, 0, /* 0xdc */
1643 {1, 1, 3, 0, /* 0xdd */
1650 {0, 1, 2, 0, /* 0xde */
1657 {1, 1, 2, 0, /* 0xdf */
1664 {0, 1, 1, 0, /* 0xe0 */
1671 {1, 1, 2, 0, /* 0xe1 */
1678 {0, 1, 2, 0, /* 0xe2 */
1685 {1, 1, 2, 0, /* 0xe3 */
1692 {0, 1, 2, 0, /* 0xe4 */
1699 {1, 1, 3, 0, /* 0xe5 */
1706 {0, 1, 2, 0, /* 0xe6 */
1713 {1, 1, 2, 0, /* 0xe7 */
1720 {0, 1, 2, 0, /* 0xe8 */
1727 {1, 1, 3, 0, /* 0xe9 */
1734 {0, 1, 3, 0, /* 0xea */
1741 {1, 1, 3, 0, /* 0xeb */
1748 {0, 1, 2, 0, /* 0xec */
1755 {1, 1, 3, 0, /* 0xed */
1762 {0, 1, 2, 0, /* 0xee */
1769 {1, 1, 2, 0, /* 0xef */
1776 {0, 1, 1, 0, /* 0xf0 */
1783 {1, 1, 2, 0, /* 0xf1 */
1790 {0, 1, 2, 0, /* 0xf2 */
1797 {1, 1, 2, 0, /* 0xf3 */
1804 {0, 1, 2, 0, /* 0xf4 */
1811 {1, 1, 3, 0, /* 0xf5 */
1818 {0, 1, 2, 0, /* 0xf6 */
1825 {1, 1, 2, 0, /* 0xf7 */
1832 {0, 1, 1, 0, /* 0xf8 */
1839 {1, 1, 2, 0, /* 0xf9 */
1846 {0, 1, 2, 0, /* 0xfa */
1853 {1, 1, 2, 0, /* 0xfb */
1860 {0, 1, 1, 0, /* 0xfc */
1867 {1, 1, 2, 0, /* 0xfd */
1874 {0, 1, 1, 0, /* 0xfe */
1881 {1, 1, 1, 0, /* 0xff */
1892 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1893 struct sctp_scoping *scope,
1896 if ((scope->loopback_scope == 0) &&
1897 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1899 * skip loopback if not in scope *
1903 switch (ifa->address.sa.sa_family) {
1906 if (scope->ipv4_addr_legal) {
1907 struct sockaddr_in *sin;
1909 sin = (struct sockaddr_in *)&ifa->address.sin;
1910 if (sin->sin_addr.s_addr == 0) {
1911 /* not in scope , unspecified */
1914 if ((scope->ipv4_local_scope == 0) &&
1915 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1916 /* private address not in scope */
1926 if (scope->ipv6_addr_legal) {
1927 struct sockaddr_in6 *sin6;
1929 #if !defined(__Panda__)
1930 /* Must update the flags, bummer, which
1931 * means any IFA locks must now be applied HERE <->
1934 sctp_gather_internal_ifa_flags(ifa);
1937 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1940 /* ok to use deprecated addresses? */
1941 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1942 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1943 /* skip unspecifed addresses */
1946 if ( /* (local_scope == 0) && */
1947 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1950 if ((scope->site_scope == 0) &&
1951 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1959 #if defined(__Userspace__)
1961 if (!scope->conn_addr_legal) {
1972 static struct mbuf *
1973 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1975 #if defined(INET) || defined(INET6)
1976 struct sctp_paramhdr *parmh;
1981 switch (ifa->address.sa.sa_family) {
1984 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1989 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1995 #if defined(INET) || defined(INET6)
1996 if (M_TRAILINGSPACE(m) >= plen) {
1997 /* easy side we just drop it on the end */
1998 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
2001 /* Need more space */
2003 while (SCTP_BUF_NEXT(mret) != NULL) {
2004 mret = SCTP_BUF_NEXT(mret);
2006 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2007 if (SCTP_BUF_NEXT(mret) == NULL) {
2008 /* We are hosed, can't add more addresses */
2011 mret = SCTP_BUF_NEXT(mret);
2012 parmh = mtod(mret, struct sctp_paramhdr *);
2014 /* now add the parameter */
2015 switch (ifa->address.sa.sa_family) {
2019 struct sctp_ipv4addr_param *ipv4p;
2020 struct sockaddr_in *sin;
2022 sin = (struct sockaddr_in *)&ifa->address.sin;
2023 ipv4p = (struct sctp_ipv4addr_param *)parmh;
2024 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
2025 parmh->param_length = htons(plen);
2026 ipv4p->addr = sin->sin_addr.s_addr;
2027 SCTP_BUF_LEN(mret) += plen;
2034 struct sctp_ipv6addr_param *ipv6p;
2035 struct sockaddr_in6 *sin6;
2037 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2038 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2039 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2040 parmh->param_length = htons(plen);
2041 memcpy(ipv6p->addr, &sin6->sin6_addr,
2042 sizeof(ipv6p->addr));
2043 #if defined(SCTP_EMBEDDED_V6_SCOPE)
2044 /* clear embedded scope in the address */
2045 in6_clearscope((struct in6_addr *)ipv6p->addr);
2047 SCTP_BUF_LEN(mret) += plen;
2063 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2064 struct sctp_scoping *scope,
2065 struct mbuf *m_at, int cnt_inits_to,
2066 uint16_t *padding_len, uint16_t *chunk_len)
2068 struct sctp_vrf *vrf = NULL;
2069 int cnt, limit_out = 0, total_count;
2072 vrf_id = inp->def_vrf_id;
2073 SCTP_IPI_ADDR_RLOCK();
2074 vrf = sctp_find_vrf(vrf_id);
2076 SCTP_IPI_ADDR_RUNLOCK();
2079 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2080 struct sctp_ifa *sctp_ifap;
2081 struct sctp_ifn *sctp_ifnp;
2084 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2086 cnt = SCTP_ADDRESS_LIMIT;
2089 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2090 if ((scope->loopback_scope == 0) &&
2091 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2093 * Skip loopback devices if loopback_scope
2098 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2099 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2102 #if defined(__Userspace__)
2103 if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2107 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2111 if (cnt > SCTP_ADDRESS_LIMIT) {
2115 if (cnt > SCTP_ADDRESS_LIMIT) {
2122 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2124 if ((scope->loopback_scope == 0) &&
2125 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2127 * Skip loopback devices if
2128 * loopback_scope not set
2132 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2133 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2136 #if defined(__Userspace__)
2137 if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2141 if (sctp_is_address_in_scope(sctp_ifap,
2145 if ((chunk_len != NULL) &&
2146 (padding_len != NULL) &&
2147 (*padding_len > 0)) {
2148 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2149 SCTP_BUF_LEN(m_at) += *padding_len;
2150 *chunk_len += *padding_len;
2153 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2158 /* two from each address */
2161 if (total_count > SCTP_ADDRESS_LIMIT) {
2162 /* No more addresses */
2170 struct sctp_laddr *laddr;
2173 /* First, how many ? */
2174 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2175 if (laddr->ifa == NULL) {
2178 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2179 /* Address being deleted by the system, dont
2183 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2184 /* Address being deleted on this ep
2189 #if defined(__Userspace__)
2190 if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2194 if (sctp_is_address_in_scope(laddr->ifa,
2201 * To get through a NAT we only list addresses if we have
2202 * more than one. That way if you just bind a single address
2203 * we let the source of the init dictate our address.
2207 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2208 if (laddr->ifa == NULL) {
2211 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2214 #if defined(__Userspace__)
2215 if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2219 if (sctp_is_address_in_scope(laddr->ifa,
2223 if ((chunk_len != NULL) &&
2224 (padding_len != NULL) &&
2225 (*padding_len > 0)) {
2226 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2227 SCTP_BUF_LEN(m_at) += *padding_len;
2228 *chunk_len += *padding_len;
2231 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2233 if (cnt >= SCTP_ADDRESS_LIMIT) {
2239 SCTP_IPI_ADDR_RUNLOCK();
2243 static struct sctp_ifa *
2244 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2245 uint8_t dest_is_loop,
2246 uint8_t dest_is_priv,
2249 uint8_t dest_is_global = 0;
2250 /* dest_is_priv is true if destination is a private address */
2251 /* dest_is_loop is true if destination is a loopback addresses */
2254 * Here we determine if its a preferred address. A preferred address
2255 * means it is the same scope or higher scope then the destination.
2256 * L = loopback, P = private, G = global
2257 * -----------------------------------------
2258 * src | dest | result
2259 * ----------------------------------------
2261 * -----------------------------------------
2262 * P | L | yes-v4 no-v6
2263 * -----------------------------------------
2264 * G | L | yes-v4 no-v6
2265 * -----------------------------------------
2267 * -----------------------------------------
2269 * -----------------------------------------
2271 * -----------------------------------------
2273 * -----------------------------------------
2275 * -----------------------------------------
2277 * -----------------------------------------
2280 if (ifa->address.sa.sa_family != fam) {
2281 /* forget mis-matched family */
2284 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2287 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2288 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2289 /* Ok the address may be ok */
2291 if (fam == AF_INET6) {
2292 /* ok to use deprecated addresses? no lets not! */
2293 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2294 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2297 if (ifa->src_is_priv && !ifa->src_is_loop) {
2299 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2303 if (ifa->src_is_glob) {
2305 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2311 /* Now that we know what is what, implement or table
2312 * this could in theory be done slicker (it used to be), but this
2313 * is straightforward and easier to validate :-)
2315 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2316 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2317 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2318 dest_is_loop, dest_is_priv, dest_is_global);
2320 if ((ifa->src_is_loop) && (dest_is_priv)) {
2321 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2324 if ((ifa->src_is_glob) && (dest_is_priv)) {
2325 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2328 if ((ifa->src_is_loop) && (dest_is_global)) {
2329 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2332 if ((ifa->src_is_priv) && (dest_is_global)) {
2333 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2336 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2337 /* its a preferred address */
2341 static struct sctp_ifa *
2342 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2343 uint8_t dest_is_loop,
2344 uint8_t dest_is_priv,
2347 uint8_t dest_is_global = 0;
2350 * Here we determine if its a acceptable address. A acceptable
2351 * address means it is the same scope or higher scope but we can
2352 * allow for NAT which means its ok to have a global dest and a
2355 * L = loopback, P = private, G = global
2356 * -----------------------------------------
2357 * src | dest | result
2358 * -----------------------------------------
2360 * -----------------------------------------
2361 * P | L | yes-v4 no-v6
2362 * -----------------------------------------
2364 * -----------------------------------------
2366 * -----------------------------------------
2368 * -----------------------------------------
2369 * G | P | yes - May not work
2370 * -----------------------------------------
2372 * -----------------------------------------
2373 * P | G | yes - May not work
2374 * -----------------------------------------
2376 * -----------------------------------------
2379 if (ifa->address.sa.sa_family != fam) {
2380 /* forget non matching family */
2381 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2382 ifa->address.sa.sa_family, fam);
2385 /* Ok the address may be ok */
2386 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2387 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2388 dest_is_loop, dest_is_priv);
2389 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2393 if (fam == AF_INET6) {
2394 /* ok to use deprecated addresses? */
2395 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2398 if (ifa->src_is_priv) {
2399 /* Special case, linklocal to loop */
2406 * Now that we know what is what, implement our table.
2407 * This could in theory be done slicker (it used to be), but this
2408 * is straightforward and easier to validate :-)
2410 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2413 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2416 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2419 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2422 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2423 /* its an acceptable address */
2428 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2430 struct sctp_laddr *laddr;
2433 /* There are no restrictions, no TCB :-) */
2436 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2437 if (laddr->ifa == NULL) {
2438 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2442 if (laddr->ifa == ifa) {
2443 /* Yes it is on the list */
2452 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2454 struct sctp_laddr *laddr;
2458 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2459 if (laddr->ifa == NULL) {
2460 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2464 if ((laddr->ifa == ifa) && laddr->action == 0)
2473 static struct sctp_ifa *
2474 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2477 int non_asoc_addr_ok,
2478 uint8_t dest_is_priv,
2479 uint8_t dest_is_loop,
2482 struct sctp_laddr *laddr, *starting_point;
2485 struct sctp_ifn *sctp_ifn;
2486 struct sctp_ifa *sctp_ifa, *sifa;
2487 struct sctp_vrf *vrf;
2490 vrf = sctp_find_vrf(vrf_id);
2494 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2495 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2496 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2498 * first question, is the ifn we will emit on in our list, if so, we
2499 * want such an address. Note that we first looked for a
2500 * preferred address.
2503 /* is a preferred one on the interface we route out? */
2504 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2505 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2506 (non_asoc_addr_ok == 0))
2508 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2513 if (sctp_is_addr_in_ep(inp, sifa)) {
2514 atomic_add_int(&sifa->refcount, 1);
2520 * ok, now we now need to find one on the list of the addresses.
2521 * We can't get one on the emitting interface so let's find first
2522 * a preferred one. If not that an acceptable one otherwise...
2525 starting_point = inp->next_addr_touse;
2527 if (inp->next_addr_touse == NULL) {
2528 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2531 for (laddr = inp->next_addr_touse; laddr;
2532 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2533 if (laddr->ifa == NULL) {
2534 /* address has been removed */
2537 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2538 /* address is being deleted */
2541 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2545 atomic_add_int(&sifa->refcount, 1);
2548 if (resettotop == 0) {
2549 inp->next_addr_touse = NULL;
2553 inp->next_addr_touse = starting_point;
2556 if (inp->next_addr_touse == NULL) {
2557 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2561 /* ok, what about an acceptable address in the inp */
2562 for (laddr = inp->next_addr_touse; laddr;
2563 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2564 if (laddr->ifa == NULL) {
2565 /* address has been removed */
2568 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2569 /* address is being deleted */
2572 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2576 atomic_add_int(&sifa->refcount, 1);
2579 if (resettotop == 0) {
2580 inp->next_addr_touse = NULL;
2581 goto once_again_too;
2585 * no address bound can be a source for the destination we are in
2593 static struct sctp_ifa *
2594 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2595 struct sctp_tcb *stcb,
2598 uint8_t dest_is_priv,
2599 uint8_t dest_is_loop,
2600 int non_asoc_addr_ok,
2603 struct sctp_laddr *laddr, *starting_point;
2605 struct sctp_ifn *sctp_ifn;
2606 struct sctp_ifa *sctp_ifa, *sifa;
2607 uint8_t start_at_beginning = 0;
2608 struct sctp_vrf *vrf;
2612 * first question, is the ifn we will emit on in our list, if so, we
2615 vrf = sctp_find_vrf(vrf_id);
2619 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2620 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2621 sctp_ifn = sctp_find_ifn( ifn, ifn_index);
2624 * first question, is the ifn we will emit on in our list? If so,
2625 * we want that one. First we look for a preferred. Second, we go
2626 * for an acceptable.
2629 /* first try for a preferred address on the ep */
2630 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2631 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2633 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2634 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2637 if (((non_asoc_addr_ok == 0) &&
2638 (sctp_is_addr_restricted(stcb, sifa))) ||
2639 (non_asoc_addr_ok &&
2640 (sctp_is_addr_restricted(stcb, sifa)) &&
2641 (!sctp_is_addr_pending(stcb, sifa)))) {
2642 /* on the no-no list */
2645 atomic_add_int(&sifa->refcount, 1);
2649 /* next try for an acceptable address on the ep */
2650 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2651 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2653 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2654 sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2657 if (((non_asoc_addr_ok == 0) &&
2658 (sctp_is_addr_restricted(stcb, sifa))) ||
2659 (non_asoc_addr_ok &&
2660 (sctp_is_addr_restricted(stcb, sifa)) &&
2661 (!sctp_is_addr_pending(stcb, sifa)))) {
2662 /* on the no-no list */
2665 atomic_add_int(&sifa->refcount, 1);
2672 * if we can't find one like that then we must look at all
2673 * addresses bound to pick one at first preferable then
2674 * secondly acceptable.
2676 starting_point = stcb->asoc.last_used_address;
2678 if (stcb->asoc.last_used_address == NULL) {
2679 start_at_beginning = 1;
2680 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2682 /* search beginning with the last used address */
2683 for (laddr = stcb->asoc.last_used_address; laddr;
2684 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2685 if (laddr->ifa == NULL) {
2686 /* address has been removed */
2689 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2690 /* address is being deleted */
2693 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2696 if (((non_asoc_addr_ok == 0) &&
2697 (sctp_is_addr_restricted(stcb, sifa))) ||
2698 (non_asoc_addr_ok &&
2699 (sctp_is_addr_restricted(stcb, sifa)) &&
2700 (!sctp_is_addr_pending(stcb, sifa)))) {
2701 /* on the no-no list */
2704 stcb->asoc.last_used_address = laddr;
2705 atomic_add_int(&sifa->refcount, 1);
2708 if (start_at_beginning == 0) {
2709 stcb->asoc.last_used_address = NULL;
2710 goto sctp_from_the_top;
2712 /* now try for any higher scope than the destination */
2713 stcb->asoc.last_used_address = starting_point;
2714 start_at_beginning = 0;
2716 if (stcb->asoc.last_used_address == NULL) {
2717 start_at_beginning = 1;
2718 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2720 /* search beginning with the last used address */
2721 for (laddr = stcb->asoc.last_used_address; laddr;
2722 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2723 if (laddr->ifa == NULL) {
2724 /* address has been removed */
2727 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2728 /* address is being deleted */
2731 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2735 if (((non_asoc_addr_ok == 0) &&
2736 (sctp_is_addr_restricted(stcb, sifa))) ||
2737 (non_asoc_addr_ok &&
2738 (sctp_is_addr_restricted(stcb, sifa)) &&
2739 (!sctp_is_addr_pending(stcb, sifa)))) {
2740 /* on the no-no list */
2743 stcb->asoc.last_used_address = laddr;
2744 atomic_add_int(&sifa->refcount, 1);
2747 if (start_at_beginning == 0) {
2748 stcb->asoc.last_used_address = NULL;
2749 goto sctp_from_the_top2;
2754 static struct sctp_ifa *
2755 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2756 struct sctp_tcb *stcb,
2757 int non_asoc_addr_ok,
2758 uint8_t dest_is_loop,
2759 uint8_t dest_is_priv,
2765 struct sctp_ifa *ifa, *sifa;
2766 int num_eligible_addr = 0;
2768 #ifdef SCTP_EMBEDDED_V6_SCOPE
2769 struct sockaddr_in6 sin6, lsa6;
2771 if (fam == AF_INET6) {
2772 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2774 (void)sa6_recoverscope(&sin6);
2776 (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2777 #endif /* SCTP_KAME */
2779 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2781 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2782 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2783 (non_asoc_addr_ok == 0))
2785 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2790 if (fam == AF_INET6 &&
2792 sifa->src_is_loop && sifa->src_is_priv) {
2793 /* don't allow fe80::1 to be a src on loop ::1, we don't list it
2794 * to the peer so we will get an abort.
2798 #ifdef SCTP_EMBEDDED_V6_SCOPE
2799 if (fam == AF_INET6 &&
2800 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2801 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2802 /* link-local <-> link-local must belong to the same scope. */
2803 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2805 (void)sa6_recoverscope(&lsa6);
2807 (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2808 #endif /* SCTP_KAME */
2809 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2813 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2816 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2817 /* Check if the IPv6 address matches to next-hop.
2818 In the mobile case, old IPv6 address may be not deleted
2819 from the interface. Then, the interface has previous and
2820 new addresses. We should use one corresponding to the
2821 next-hop. (by micchie)
2824 if (stcb && fam == AF_INET6 &&
2825 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2826 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2833 /* Avoid topologically incorrect IPv4 address */
2834 if (stcb && fam == AF_INET &&
2835 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2836 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2843 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2846 if (((non_asoc_addr_ok == 0) &&
2847 (sctp_is_addr_restricted(stcb, sifa))) ||
2848 (non_asoc_addr_ok &&
2849 (sctp_is_addr_restricted(stcb, sifa)) &&
2850 (!sctp_is_addr_pending(stcb, sifa)))) {
2852 * It is restricted for some reason..
2853 * probably not yet added.
2858 if (num_eligible_addr >= addr_wanted) {
2861 num_eligible_addr++;
2868 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2869 struct sctp_tcb *stcb,
2870 int non_asoc_addr_ok,
2871 uint8_t dest_is_loop,
2872 uint8_t dest_is_priv,
2875 struct sctp_ifa *ifa, *sifa;
2876 int num_eligible_addr = 0;
2878 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2879 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2880 (non_asoc_addr_ok == 0)) {
2883 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2889 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2892 if (((non_asoc_addr_ok == 0) &&
2893 (sctp_is_addr_restricted(stcb, sifa))) ||
2894 (non_asoc_addr_ok &&
2895 (sctp_is_addr_restricted(stcb, sifa)) &&
2896 (!sctp_is_addr_pending(stcb, sifa)))) {
2898 * It is restricted for some reason..
2899 * probably not yet added.
2904 num_eligible_addr++;
2906 return (num_eligible_addr);
2909 static struct sctp_ifa *
2910 sctp_choose_boundall(struct sctp_tcb *stcb,
2911 struct sctp_nets *net,
2914 uint8_t dest_is_priv,
2915 uint8_t dest_is_loop,
2916 int non_asoc_addr_ok,
2919 int cur_addr_num = 0, num_preferred = 0;
2921 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2922 struct sctp_ifa *sctp_ifa, *sifa;
2924 struct sctp_vrf *vrf;
2930 * For boundall we can use any address in the association.
2931 * If non_asoc_addr_ok is set we can use any address (at least in
2932 * theory). So we look for preferred addresses first. If we find one,
2933 * we use it. Otherwise we next try to get an address on the
2934 * interface, which we should be able to do (unless non_asoc_addr_ok
2935 * is false and we are routed out that way). In these cases where we
2936 * can't use the address of the interface we go through all the
2937 * ifn's looking for an address we can use and fill that in. Punting
2938 * means we send back address 0, which will probably cause problems
2939 * actually since then IP will fill in the address of the route ifn,
2940 * which means we probably already rejected it.. i.e. here comes an
2943 vrf = sctp_find_vrf(vrf_id);
2947 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2948 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2949 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2950 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2951 if (sctp_ifn == NULL) {
2952 /* ?? We don't have this guy ?? */
2953 SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
2954 goto bound_all_plan_b;
2956 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
2957 ifn_index, sctp_ifn->ifn_name);
2960 cur_addr_num = net->indx_of_eligible_next_to_use;
2962 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2967 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2968 num_preferred, sctp_ifn->ifn_name);
2969 if (num_preferred == 0) {
2971 * no eligible addresses, we must use some other interface
2972 * address if we can find one.
2974 goto bound_all_plan_b;
2977 * Ok we have num_eligible_addr set with how many we can use, this
2978 * may vary from call to call due to addresses being deprecated
2981 if (cur_addr_num >= num_preferred) {
2985 * select the nth address from the list (where cur_addr_num is the
2986 * nth) and 0 is the first one, 1 is the second one etc...
2988 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2990 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2991 dest_is_priv, cur_addr_num, fam, ro);
2993 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2995 atomic_add_int(&sctp_ifa->refcount, 1);
2997 /* save off where the next one we will want */
2998 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3003 * plan_b: Look at all interfaces and find a preferred address. If
3004 * no preferred fall through to plan_c.
3007 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3008 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3009 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3010 sctp_ifn->ifn_name);
3011 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3012 /* wrong base scope */
3013 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3016 if ((sctp_ifn == looked_at) && looked_at) {
3017 /* already looked at this guy */
3018 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3021 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
3022 dest_is_loop, dest_is_priv, fam);
3023 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3024 "Found ifn:%p %d preferred source addresses\n",
3025 ifn, num_preferred);
3026 if (num_preferred == 0) {
3027 /* None on this interface. */
3028 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
3031 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3032 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3033 num_preferred, (void *)sctp_ifn, cur_addr_num);
3036 * Ok we have num_eligible_addr set with how many we can
3037 * use, this may vary from call to call due to addresses
3038 * being deprecated etc..
3040 if (cur_addr_num >= num_preferred) {
3043 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
3044 dest_is_priv, cur_addr_num, fam, ro);
3048 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3049 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3051 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3052 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3053 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3054 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3056 atomic_add_int(&sifa->refcount, 1);
3060 again_with_private_addresses_allowed:
3062 /* plan_c: do we have an acceptable address on the emit interface */
3064 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3065 if (emit_ifn == NULL) {
3066 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3069 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3070 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3071 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3072 (non_asoc_addr_ok == 0)) {
3073 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3076 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3079 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3083 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3084 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3088 if (((non_asoc_addr_ok == 0) &&
3089 (sctp_is_addr_restricted(stcb, sifa))) ||
3090 (non_asoc_addr_ok &&
3091 (sctp_is_addr_restricted(stcb, sifa)) &&
3092 (!sctp_is_addr_pending(stcb, sifa)))) {
3094 * It is restricted for some
3095 * reason.. probably not yet added.
3097 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3102 SCTP_PRINTF("Stcb is null - no print\n");
3104 atomic_add_int(&sifa->refcount, 1);
3109 * plan_d: We are in trouble. No preferred address on the emit
3110 * interface. And not even a preferred address on all interfaces.
3111 * Go out and see if we can find an acceptable address somewhere
3112 * amongst all interfaces.
3114 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3115 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3116 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3117 /* wrong base scope */
3120 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3121 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3122 (non_asoc_addr_ok == 0))
3124 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3130 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3134 if (((non_asoc_addr_ok == 0) &&
3135 (sctp_is_addr_restricted(stcb, sifa))) ||
3136 (non_asoc_addr_ok &&
3137 (sctp_is_addr_restricted(stcb, sifa)) &&
3138 (!sctp_is_addr_pending(stcb, sifa)))) {
3140 * It is restricted for some
3141 * reason.. probably not yet added.
3151 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3152 stcb->asoc.scope.ipv4_local_scope = 1;
3154 goto again_with_private_addresses_allowed;
3155 } else if (retried == 1) {
3156 stcb->asoc.scope.ipv4_local_scope = 0;
3163 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3164 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3165 /* wrong base scope */
3168 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3169 struct sctp_ifa *tmp_sifa;
3171 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3172 (non_asoc_addr_ok == 0))
3174 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3177 if (tmp_sifa == NULL) {
3180 if (tmp_sifa == sifa) {
3184 if (sctp_is_address_in_scope(tmp_sifa,
3185 &stcb->asoc.scope, 0) == 0) {
3188 if (((non_asoc_addr_ok == 0) &&
3189 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3190 (non_asoc_addr_ok &&
3191 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3192 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3194 * It is restricted for some
3195 * reason.. probably not yet added.
3200 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3201 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3202 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3207 atomic_add_int(&sifa->refcount, 1);
3215 /* tcb may be NULL */
3217 sctp_source_address_selection(struct sctp_inpcb *inp,
3218 struct sctp_tcb *stcb,
3220 struct sctp_nets *net,
3221 int non_asoc_addr_ok, uint32_t vrf_id)
3223 struct sctp_ifa *answer;
3224 uint8_t dest_is_priv, dest_is_loop;
3227 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3230 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3234 * Rules: - Find the route if needed, cache if I can. - Look at
3235 * interface address in route, Is it in the bound list. If so we
3236 * have the best source. - If not we must rotate amongst the
3241 * Do we need to pay attention to scope. We can have a private address
3242 * or a global address we are sourcing or sending to. So if we draw
3244 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3246 * ------------------------------------------
3247 * source * dest * result
3248 * -----------------------------------------
3249 * <a> Private * Global * NAT
3250 * -----------------------------------------
3251 * <b> Private * Private * No problem
3252 * -----------------------------------------
3253 * <c> Global * Private * Huh, How will this work?
3254 * -----------------------------------------
3255 * <d> Global * Global * No Problem
3256 *------------------------------------------
3257 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3259 *------------------------------------------
3260 * source * dest * result
3261 * -----------------------------------------
3262 * <a> Linklocal * Global *
3263 * -----------------------------------------
3264 * <b> Linklocal * Linklocal * No problem
3265 * -----------------------------------------
3266 * <c> Global * Linklocal * Huh, How will this work?
3267 * -----------------------------------------
3268 * <d> Global * Global * No Problem
3269 *------------------------------------------
3270 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3272 * And then we add to that what happens if there are multiple addresses
3273 * assigned to an interface. Remember the ifa on a ifn is a linked
3274 * list of addresses. So one interface can have more than one IP
3275 * address. What happens if we have both a private and a global
3276 * address? Do we then use context of destination to sort out which
3277 * one is best? And what about NAT's sending P->G may get you a NAT
3278 * translation, or should you select the G thats on the interface in
3283 * - count the number of addresses on the interface.
3284 * - if it is one, no problem except case <c>.
3285 * For <a> we will assume a NAT out there.
3286 * - if there are more than one, then we need to worry about scope P
3287 * or G. We should prefer G -> G and P -> P if possible.
3288 * Then as a secondary fall back to mixed types G->P being a last
3290 * - The above all works for bound all, but bound specific we need to
3291 * use the same concept but instead only consider the bound
3292 * addresses. If the bound set is NOT assigned to the interface then
3293 * we must use rotation amongst the bound addresses..
3295 if (ro->ro_rt == NULL) {
3297 * Need a route to cache.
3299 SCTP_RTALLOC(ro, vrf_id);
3301 if (ro->ro_rt == NULL) {
3304 fam = ro->ro_dst.sa_family;
3305 dest_is_priv = dest_is_loop = 0;
3306 /* Setup our scopes for the destination */
3310 /* Scope based on outbound address */
3311 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3314 /* mark it as local */
3315 net->addr_is_local = 1;
3317 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3324 /* Scope based on outbound address */
3325 #if defined(__Userspace_os_Windows)
3326 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3328 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3329 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3332 * If the address is a loopback address, which
3333 * consists of "::1" OR "fe80::1%lo0", we are loopback
3334 * scope. But we don't use dest_is_priv (link local
3339 /* mark it as local */
3340 net->addr_is_local = 1;
3342 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3348 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3349 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3350 SCTP_IPI_ADDR_RLOCK();
3351 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3355 answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
3356 dest_is_priv, dest_is_loop,
3357 non_asoc_addr_ok, fam);
3358 SCTP_IPI_ADDR_RUNLOCK();
3365 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3366 vrf_id, dest_is_priv,
3368 non_asoc_addr_ok, fam);
3370 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3375 SCTP_IPI_ADDR_RUNLOCK();
3380 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3382 #if defined(__Userspace_os_Windows)
3387 int tlen, at, found;
3388 struct sctp_sndinfo sndinfo;
3389 struct sctp_prinfo prinfo;
3390 struct sctp_authinfo authinfo;
3392 tlen = SCTP_BUF_LEN(control);
3396 * Independent of how many mbufs, find the c_type inside the control
3397 * structure and copy out the data.
3400 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3401 /* There is not enough room for one more. */
3404 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3405 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3406 /* We dont't have a complete CMSG header. */
3409 if (((int)cmh.cmsg_len + at) > tlen) {
3410 /* We don't have the complete CMSG. */
3413 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3414 ((c_type == cmh.cmsg_type) ||
3415 ((c_type == SCTP_SNDRCV) &&
3416 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3417 (cmh.cmsg_type == SCTP_PRINFO) ||
3418 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3419 if (c_type == cmh.cmsg_type) {
3420 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3423 /* It is exactly what we want. Copy it out. */
3424 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
3427 struct sctp_sndrcvinfo *sndrcvinfo;
3429 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3431 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3434 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3436 switch (cmh.cmsg_type) {
3438 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3441 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3442 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3443 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3444 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3445 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3446 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3449 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3452 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3453 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3454 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3456 sndrcvinfo->sinfo_timetolive = 0;
3458 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3461 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3464 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3465 sndrcvinfo->sinfo_keynumber_valid = 1;
3466 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3474 at += CMSG_ALIGN(cmh.cmsg_len);
3480 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3482 #if defined(__Userspace_os_Windows)
3488 struct sctp_initmsg initmsg;
3490 struct sockaddr_in sin;
3493 struct sockaddr_in6 sin6;
3496 tlen = SCTP_BUF_LEN(control);
3499 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3500 /* There is not enough room for one more. */
3504 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3505 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3506 /* We dont't have a complete CMSG header. */
3510 if (((int)cmh.cmsg_len + at) > tlen) {
3511 /* We don't have the complete CMSG. */
3515 if (cmh.cmsg_level == IPPROTO_SCTP) {
3516 switch (cmh.cmsg_type) {
3518 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3522 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3523 if (initmsg.sinit_max_attempts)
3524 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3525 if (initmsg.sinit_num_ostreams)
3526 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3527 if (initmsg.sinit_max_instreams)
3528 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3529 if (initmsg.sinit_max_init_timeo)
3530 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3531 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3532 struct sctp_stream_out *tmp_str;
3535 /* Default is NOT correct */
3536 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3537 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3538 SCTP_TCB_UNLOCK(stcb);
3539 SCTP_MALLOC(tmp_str,
3540 struct sctp_stream_out *,
3541 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3543 SCTP_TCB_LOCK(stcb);
3544 if (tmp_str != NULL) {
3545 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3546 stcb->asoc.strmout = tmp_str;
3547 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3549 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3551 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3552 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3553 stcb->asoc.strmout[i].chunks_on_queues = 0;
3554 stcb->asoc.strmout[i].next_sequence_send = 0;
3555 stcb->asoc.strmout[i].stream_no = i;
3556 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3557 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3562 case SCTP_DSTADDRV4:
3563 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3567 memset(&sin, 0, sizeof(struct sockaddr_in));
3568 sin.sin_family = AF_INET;
3570 sin.sin_len = sizeof(struct sockaddr_in);
3572 sin.sin_port = stcb->rport;
3573 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3574 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3575 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3576 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3580 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3581 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3588 case SCTP_DSTADDRV6:
3589 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3593 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3594 sin6.sin6_family = AF_INET6;
3595 #ifdef HAVE_SIN6_LEN
3596 sin6.sin6_len = sizeof(struct sockaddr_in6);
3598 sin6.sin6_port = stcb->rport;
3599 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3600 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3601 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3606 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3607 in6_sin6_2_sin(&sin, &sin6);
3608 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3609 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3610 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3614 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3615 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3621 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3622 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3632 at += CMSG_ALIGN(cmh.cmsg_len);
3637 static struct sctp_tcb *
3638 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3640 struct mbuf *control,
3641 struct sctp_nets **net_p,
3644 #if defined(__Userspace_os_Windows)
3650 struct sctp_tcb *stcb;
3651 struct sockaddr *addr;
3653 struct sockaddr_in sin;
3656 struct sockaddr_in6 sin6;
3659 tlen = SCTP_BUF_LEN(control);
3662 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3663 /* There is not enough room for one more. */
3667 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3668 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3669 /* We dont't have a complete CMSG header. */
3673 if (((int)cmh.cmsg_len + at) > tlen) {
3674 /* We don't have the complete CMSG. */
3678 if (cmh.cmsg_level == IPPROTO_SCTP) {
3679 switch (cmh.cmsg_type) {
3681 case SCTP_DSTADDRV4:
3682 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3686 memset(&sin, 0, sizeof(struct sockaddr_in));
3687 sin.sin_family = AF_INET;
3689 sin.sin_len = sizeof(struct sockaddr_in);
3691 sin.sin_port = port;
3692 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3693 addr = (struct sockaddr *)&sin;
3697 case SCTP_DSTADDRV6:
3698 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3702 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3703 sin6.sin6_family = AF_INET6;
3704 #ifdef HAVE_SIN6_LEN
3705 sin6.sin6_len = sizeof(struct sockaddr_in6);
3707 sin6.sin6_port = port;
3708 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3710 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3711 in6_sin6_2_sin(&sin, &sin6);
3712 addr = (struct sockaddr *)&sin;
3715 addr = (struct sockaddr *)&sin6;
3723 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3729 at += CMSG_ALIGN(cmh.cmsg_len);
3734 static struct mbuf *
3735 sctp_add_cookie(struct mbuf *init, int init_offset,
3736 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3738 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3739 struct sctp_state_cookie *stc;
3740 struct sctp_paramhdr *ph;
3746 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3747 sizeof(struct sctp_paramhdr)), 0,
3748 M_NOWAIT, 1, MT_DATA);
3752 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3753 if (copy_init == NULL) {
3757 #ifdef SCTP_MBUF_LOGGING
3758 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3761 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
3762 if (SCTP_BUF_IS_EXTENDED(mat)) {
3763 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3768 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3770 if (copy_initack == NULL) {
3772 sctp_m_freem(copy_init);
3775 #ifdef SCTP_MBUF_LOGGING
3776 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3779 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
3780 if (SCTP_BUF_IS_EXTENDED(mat)) {
3781 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3786 /* easy side we just drop it on the end */
3787 ph = mtod(mret, struct sctp_paramhdr *);
3788 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3789 sizeof(struct sctp_paramhdr);
3790 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3791 sizeof(struct sctp_paramhdr));
3792 ph->param_type = htons(SCTP_STATE_COOKIE);
3793 ph->param_length = 0; /* fill in at the end */
3794 /* Fill in the stc cookie data */
3795 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3797 /* tack the INIT and then the INIT-ACK onto the chain */
3799 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3800 cookie_sz += SCTP_BUF_LEN(m_at);
3801 if (SCTP_BUF_NEXT(m_at) == NULL) {
3802 SCTP_BUF_NEXT(m_at) = copy_init;
3806 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3807 cookie_sz += SCTP_BUF_LEN(m_at);
3808 if (SCTP_BUF_NEXT(m_at) == NULL) {
3809 SCTP_BUF_NEXT(m_at) = copy_initack;
3813 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3814 cookie_sz += SCTP_BUF_LEN(m_at);
3815 if (SCTP_BUF_NEXT(m_at) == NULL) {
3819 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3821 /* no space, so free the entire chain */
3825 SCTP_BUF_LEN(sig) = 0;
3826 SCTP_BUF_NEXT(m_at) = sig;
3828 foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset);
3829 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3831 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3832 cookie_sz += SCTP_SIGNATURE_SIZE;
3833 ph->param_length = htons(cookie_sz);
3839 sctp_get_ect(struct sctp_tcb *stcb)
3841 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3842 return (SCTP_ECT0_BIT);
3848 #if defined(INET) || defined(INET6)
3850 sctp_handle_no_route(struct sctp_tcb *stcb,
3851 struct sctp_nets *net,
3854 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3857 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3858 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3859 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3860 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3861 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3862 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3866 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3867 net->dest_state &= ~SCTP_ADDR_PF;
3871 if (net == stcb->asoc.primary_destination) {
3872 /* need a new primary */
3873 struct sctp_nets *alt;
3875 alt = sctp_find_alternate_net(stcb, net, 0);
3877 if (stcb->asoc.alternate) {
3878 sctp_free_remote_addr(stcb->asoc.alternate);
3880 stcb->asoc.alternate = alt;
3881 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3882 if (net->ro._s_addr) {
3883 sctp_free_ifa(net->ro._s_addr);
3884 net->ro._s_addr = NULL;
3886 net->src_addr_selected = 0;
3895 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3896 struct sctp_tcb *stcb, /* may be NULL */
3897 struct sctp_nets *net,
3898 struct sockaddr *to,
3900 uint32_t auth_offset,
3901 struct sctp_auth_chunk *auth,
3902 uint16_t auth_keyid,
3903 int nofragment_flag,
3910 union sctp_sockstore *over_addr,
3911 #if defined(__FreeBSD__)
3912 uint8_t use_mflowid, uint32_t mflowid,
3914 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3915 int so_locked SCTP_UNUSED
3920 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3923 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3924 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3925 * - fill in the HMAC digest of any AUTH chunk in the packet.
3926 * - calculate and fill in the SCTP checksum.
3927 * - prepend an IP address header.
3928 * - if boundall use INADDR_ANY.
3929 * - if boundspecific do source address selection.
3930 * - set fragmentation option for ipV4.
3931 * - On return from IP output, check/adjust mtu size of output
3932 * interface and smallest_mtu size as well.
3934 /* Will need ifdefs around this */
3936 pakhandle_type o_pak;
3939 struct sctphdr *sctphdr;
3942 #if defined(INET) || defined(INET6)
3945 #if defined(INET) || defined(INET6)
3946 #if !defined(__Panda__)
3949 sctp_route_t *ro = NULL;
3950 struct udphdr *udp = NULL;
3953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3954 struct socket *so = NULL;
3957 #if defined(__APPLE__)
3959 sctp_lock_assert(SCTP_INP_SO(inp));
3960 SCTP_TCB_LOCK_ASSERT(stcb);
3962 sctp_unlock_assert(SCTP_INP_SO(inp));
3965 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3966 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3970 #if defined(INET) || defined(INET6)
3972 vrf_id = stcb->asoc.vrf_id;
3974 vrf_id = inp->def_vrf_id;
3977 /* fill in the HMAC digest for any AUTH chunk in the packet */
3978 if ((auth != NULL) && (stcb != NULL)) {
3979 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3983 tos_value = net->dscp;
3985 tos_value = stcb->asoc.default_dscp;
3987 tos_value = inp->sctp_ep.default_dscp;
3990 switch (to->sa_family) {
3994 struct ip *ip = NULL;
3995 sctp_route_t iproute;
3998 len = sizeof(struct ip) + sizeof(struct sctphdr);
4000 len += sizeof(struct udphdr);
4002 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4005 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4008 SCTP_ALIGN_TO_END(newm, len);
4009 SCTP_BUF_LEN(newm) = len;
4010 SCTP_BUF_NEXT(newm) = m;
4012 #if defined(__FreeBSD__)
4015 if (net->flowidset == 0) {
4016 panic("Flow ID not set");
4019 m->m_pkthdr.flowid = net->flowid;
4020 m->m_flags |= M_FLOWID;
4022 if (use_mflowid != 0) {
4023 m->m_pkthdr.flowid = mflowid;
4024 m->m_flags |= M_FLOWID;
4028 packet_length = sctp_calculate_len(m);
4029 ip = mtod(m, struct ip *);
4030 ip->ip_v = IPVERSION;
4031 ip->ip_hl = (sizeof(struct ip) >> 2);
4032 if (tos_value == 0) {
4034 * This means especially, that it is not set at the
4035 * SCTP layer. So use the value from the IP layer.
4037 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4038 tos_value = inp->ip_inp.inp.inp_ip_tos;
4040 tos_value = inp->inp_ip_tos;
4045 tos_value |= sctp_get_ect(stcb);
4047 if ((nofragment_flag) && (port == 0)) {
4048 #if defined(__FreeBSD__)
4049 #if __FreeBSD_version >= 1000000
4050 ip->ip_off = htons(IP_DF);
4054 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__)
4057 ip->ip_off = htons(IP_DF);
4060 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4061 ip->ip_off = htons(0);
4066 #if defined(__FreeBSD__)
4067 /* FreeBSD has a function for ip_id's */
4068 ip->ip_id = ip_newid();
4069 #elif defined(RANDOM_IP_ID)
4070 /* Apple has RANDOM_IP_ID switch */
4071 ip->ip_id = htons(ip_randomid());
4072 #elif defined(__Userspace__)
4073 ip->ip_id = htons(SCTP_IP_ID(inp)++);
4075 ip->ip_id = SCTP_IP_ID(inp)++;
4078 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4079 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4081 ip->ip_ttl = inp->inp_ip_ttl;
4083 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4084 ip->ip_len = htons(packet_length);
4086 ip->ip_len = packet_length;
4088 ip->ip_tos = tos_value;
4090 ip->ip_p = IPPROTO_UDP;
4092 ip->ip_p = IPPROTO_SCTP;
4097 memset(&iproute, 0, sizeof(iproute));
4099 memcpy(&ro->ro_dst, to, to->sa_len);
4101 memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4104 ro = (sctp_route_t *)&net->ro;
4106 /* Now the address selection part */
4107 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4109 /* call the routine to select the src address */
4110 if (net && out_of_asoc_ok == 0) {
4111 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4112 sctp_free_ifa(net->ro._s_addr);
4113 net->ro._s_addr = NULL;
4114 net->src_addr_selected = 0;
4120 if (net->src_addr_selected == 0) {
4121 /* Cache the source address */
4122 net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4125 net->src_addr_selected = 1;
4127 if (net->ro._s_addr == NULL) {
4128 /* No route to host */
4129 net->src_addr_selected = 0;
4130 sctp_handle_no_route(stcb, net, so_locked);
4131 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4133 return (EHOSTUNREACH);
4135 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4137 if (over_addr == NULL) {
4138 struct sctp_ifa *_lsrc;
4140 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4144 if (_lsrc == NULL) {
4145 sctp_handle_no_route(stcb, net, so_locked);
4146 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4148 return (EHOSTUNREACH);
4150 ip->ip_src = _lsrc->address.sin.sin_addr;
4151 sctp_free_ifa(_lsrc);
4153 ip->ip_src = over_addr->sin.sin_addr;
4154 SCTP_RTALLOC(ro, vrf_id);
4158 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4159 sctp_handle_no_route(stcb, net, so_locked);
4160 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4162 return (EHOSTUNREACH);
4164 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4165 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4166 udp->uh_dport = port;
4167 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4168 #if !defined(__Windows__) && !defined(__Userspace__)
4169 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4171 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4176 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4181 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4183 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4186 sctphdr->src_port = src_port;
4187 sctphdr->dest_port = dest_port;
4188 sctphdr->v_tag = v_tag;
4189 sctphdr->checksum = 0;
4192 * If source address selection fails and we find no route
4193 * then the ip_output should fail as well with a
4194 * NO_ROUTE_TO_HOST type error. We probably should catch
4195 * that somewhere and abort the association right away
4196 * (assuming this is an INIT being sent).
4198 if (ro->ro_rt == NULL) {
4200 * src addr selection failed to find a route (or
4201 * valid source addr), so we can't get there from
4204 sctp_handle_no_route(stcb, net, so_locked);
4205 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4207 return (EHOSTUNREACH);
4209 if (ro != &iproute) {
4210 memcpy(&iproute, ro, sizeof(*ro));
4212 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4213 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4214 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4215 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4216 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4219 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4220 /* failed to prepend data, give up */
4221 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4225 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4227 #if defined(SCTP_WITH_NO_CSUM)
4228 SCTP_STAT_INCR(sctps_sendnocrc);
4230 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4231 SCTP_STAT_INCR(sctps_sendswcrc);
4233 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4235 SCTP_ENABLE_UDP_CSUM(o_pak);
4238 SCTP_ENABLE_UDP_CSUM(o_pak);
4241 #if defined(SCTP_WITH_NO_CSUM)
4242 SCTP_STAT_INCR(sctps_sendnocrc);
4244 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4245 m->m_pkthdr.csum_flags = CSUM_SCTP;
4246 m->m_pkthdr.csum_data = 0;
4247 SCTP_STAT_INCR(sctps_sendhwcrc);
4249 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4250 (stcb) && (stcb->asoc.scope.loopback_scope))) {
4251 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4252 SCTP_STAT_INCR(sctps_sendswcrc);
4254 SCTP_STAT_INCR(sctps_sendnocrc);
4259 #ifdef SCTP_PACKET_LOGGING
4260 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4261 sctp_packet_log(o_pak);
4263 /* send it out. table id is taken from stcb */
4264 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4265 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4266 so = SCTP_INP_SO(inp);
4267 SCTP_SOCKET_UNLOCK(so, 0);
4270 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4271 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4272 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4273 atomic_add_int(&stcb->asoc.refcnt, 1);
4274 SCTP_TCB_UNLOCK(stcb);
4275 SCTP_SOCKET_LOCK(so, 0);
4276 SCTP_TCB_LOCK(stcb);
4277 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4280 SCTP_STAT_INCR(sctps_sendpackets);
4281 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4283 SCTP_STAT_INCR(sctps_senderrors);
4285 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4287 /* free tempy routes */
4288 #if defined(__FreeBSD__) && __FreeBSD_version > 901000
4297 /* PMTU check versus smallest asoc MTU goes here */
4298 if ((ro->ro_rt != NULL) &&
4299 (net->ro._s_addr)) {
4301 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4303 mtu -= sizeof(struct udphdr);
4305 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4306 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4309 } else if (ro->ro_rt == NULL) {
4310 /* route was freed */
4311 if (net->ro._s_addr &&
4312 net->src_addr_selected) {
4313 sctp_free_ifa(net->ro._s_addr);
4314 net->ro._s_addr = NULL;
4316 net->src_addr_selected = 0;
4325 uint32_t flowlabel, flowinfo;
4326 struct ip6_hdr *ip6h;
4327 struct route_in6 ip6route;
4328 #if !(defined(__Panda__) || defined(__Userspace__))
4331 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4333 #ifdef SCTP_EMBEDDED_V6_SCOPE
4334 struct sockaddr_in6 lsa6_storage;
4337 u_short prev_port = 0;
4341 flowlabel = net->flowlabel;
4343 flowlabel = stcb->asoc.default_flowlabel;
4345 flowlabel = inp->sctp_ep.default_flowlabel;
4347 if (flowlabel == 0) {
4349 * This means especially, that it is not set at the
4350 * SCTP layer. So use the value from the IP layer.
4352 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4354 flowlabel &= 0x000fffff;
4355 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4357 len += sizeof(struct udphdr);
4359 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4362 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4365 SCTP_ALIGN_TO_END(newm, len);
4366 SCTP_BUF_LEN(newm) = len;
4367 SCTP_BUF_NEXT(newm) = m;
4369 #if defined(__FreeBSD__)
4372 if (net->flowidset == 0) {
4373 panic("Flow ID not set");
4376 m->m_pkthdr.flowid = net->flowid;
4377 m->m_flags |= M_FLOWID;
4379 if (use_mflowid != 0) {
4380 m->m_pkthdr.flowid = mflowid;
4381 m->m_flags |= M_FLOWID;
4385 packet_length = sctp_calculate_len(m);
4387 ip6h = mtod(m, struct ip6_hdr *);
4388 /* protect *sin6 from overwrite */
4389 sin6 = (struct sockaddr_in6 *)to;
4393 #ifdef SCTP_EMBEDDED_V6_SCOPE
4394 /* KAME hack: embed scopeid */
4395 #if defined(__APPLE__)
4396 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4397 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4399 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4401 #elif defined(SCTP_KAME)
4402 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4404 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4407 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4410 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4412 memset(&ip6route, 0, sizeof(ip6route));
4413 ro = (sctp_route_t *)&ip6route;
4414 #ifdef HAVE_SIN6_LEN
4415 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4417 memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4420 ro = (sctp_route_t *)&net->ro;
4423 * We assume here that inp_flow is in host byte order within
4426 if (tos_value == 0) {
4428 * This means especially, that it is not set at the
4429 * SCTP layer. So use the value from the IP layer.
4431 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4432 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4437 tos_value |= sctp_get_ect(stcb);
4441 flowinfo |= tos_value;
4443 flowinfo |= flowlabel;
4444 ip6h->ip6_flow = htonl(flowinfo);
4446 ip6h->ip6_nxt = IPPROTO_UDP;
4448 ip6h->ip6_nxt = IPPROTO_SCTP;
4450 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4451 ip6h->ip6_dst = sin6->sin6_addr;
4454 * Add SRC address selection here: we can only reuse to a
4455 * limited degree the kame src-addr-sel, since we can try
4456 * their selection but it may not be bound.
4458 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4459 lsa6_tmp.sin6_family = AF_INET6;
4460 #ifdef HAVE_SIN6_LEN
4461 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4464 if (net && out_of_asoc_ok == 0) {
4465 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4466 sctp_free_ifa(net->ro._s_addr);
4467 net->ro._s_addr = NULL;
4468 net->src_addr_selected = 0;
4474 if (net->src_addr_selected == 0) {
4475 #ifdef SCTP_EMBEDDED_V6_SCOPE
4476 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4477 /* KAME hack: embed scopeid */
4478 #if defined(__APPLE__)
4479 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4480 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4482 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4484 #elif defined(SCTP_KAME)
4485 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4487 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4490 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4493 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4494 /* Cache the source address */
4495 net->ro._s_addr = sctp_source_address_selection(inp,
4501 #ifdef SCTP_EMBEDDED_V6_SCOPE
4503 (void)sa6_recoverscope(sin6);
4505 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4506 #endif /* SCTP_KAME */
4507 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4508 net->src_addr_selected = 1;
4510 if (net->ro._s_addr == NULL) {
4511 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4512 net->src_addr_selected = 0;
4513 sctp_handle_no_route(stcb, net, so_locked);
4514 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4516 return (EHOSTUNREACH);
4518 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4520 #ifdef SCTP_EMBEDDED_V6_SCOPE
4521 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4522 /* KAME hack: embed scopeid */
4523 #if defined(__APPLE__)
4524 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4525 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4527 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4529 #elif defined(SCTP_KAME)
4530 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4532 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4535 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4538 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4539 if (over_addr == NULL) {
4540 struct sctp_ifa *_lsrc;
4542 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4546 if (_lsrc == NULL) {
4547 sctp_handle_no_route(stcb, net, so_locked);
4548 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4550 return (EHOSTUNREACH);
4552 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4553 sctp_free_ifa(_lsrc);
4555 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4556 SCTP_RTALLOC(ro, vrf_id);
4558 #ifdef SCTP_EMBEDDED_V6_SCOPE
4560 (void)sa6_recoverscope(sin6);
4562 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4563 #endif /* SCTP_KAME */
4564 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4566 lsa6->sin6_port = inp->sctp_lport;
4568 if (ro->ro_rt == NULL) {
4570 * src addr selection failed to find a route (or
4571 * valid source addr), so we can't get there from
4574 sctp_handle_no_route(stcb, net, so_locked);
4575 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4577 return (EHOSTUNREACH);
4579 #ifndef SCOPEDROUTING
4580 #ifdef SCTP_EMBEDDED_V6_SCOPE
4582 * XXX: sa6 may not have a valid sin6_scope_id in the
4583 * non-SCOPEDROUTING case.
4585 bzero(&lsa6_storage, sizeof(lsa6_storage));
4586 lsa6_storage.sin6_family = AF_INET6;
4587 #ifdef HAVE_SIN6_LEN
4588 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4591 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4592 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4594 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4596 #endif /* SCTP_KAME */
4597 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4602 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4603 lsa6_storage.sin6_port = inp->sctp_lport;
4604 lsa6 = &lsa6_storage;
4605 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4606 #endif /* SCOPEDROUTING */
4607 ip6h->ip6_src = lsa6->sin6_addr;
4610 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4611 sctp_handle_no_route(stcb, net, so_locked);
4612 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4614 return (EHOSTUNREACH);
4616 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4617 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4618 udp->uh_dport = port;
4619 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4621 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4623 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4626 sctphdr->src_port = src_port;
4627 sctphdr->dest_port = dest_port;
4628 sctphdr->v_tag = v_tag;
4629 sctphdr->checksum = 0;
4632 * We set the hop limit now since there is a good chance
4633 * that our ro pointer is now filled
4635 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4636 #if !(defined(__Panda__) || defined(__Userspace__))
4637 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4641 /* Copy to be sure something bad is not happening */
4642 sin6->sin6_addr = ip6h->ip6_dst;
4643 lsa6->sin6_addr = ip6h->ip6_src;
4646 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4647 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4648 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4649 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4650 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4652 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4653 /* preserve the port and scope for link local send */
4654 prev_scope = sin6->sin6_scope_id;
4655 prev_port = sin6->sin6_port;
4658 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4659 /* failed to prepend data, give up */
4661 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4664 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4666 #if defined(SCTP_WITH_NO_CSUM)
4667 SCTP_STAT_INCR(sctps_sendnocrc);
4669 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4670 SCTP_STAT_INCR(sctps_sendswcrc);
4672 #if defined(__Windows__)
4674 #elif !defined(__Userspace__)
4675 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4676 udp->uh_sum = 0xffff;
4680 #if defined(SCTP_WITH_NO_CSUM)
4681 SCTP_STAT_INCR(sctps_sendnocrc);
4683 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4684 #if __FreeBSD_version < 900000
4685 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4686 SCTP_STAT_INCR(sctps_sendswcrc);
4688 #if __FreeBSD_version > 901000
4689 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4691 m->m_pkthdr.csum_flags = CSUM_SCTP;
4693 m->m_pkthdr.csum_data = 0;
4694 SCTP_STAT_INCR(sctps_sendhwcrc);
4697 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4698 (stcb) && (stcb->asoc.scope.loopback_scope))) {
4699 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4700 SCTP_STAT_INCR(sctps_sendswcrc);
4702 SCTP_STAT_INCR(sctps_sendnocrc);
4707 /* send it out. table id is taken from stcb */
4708 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4709 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4710 so = SCTP_INP_SO(inp);
4711 SCTP_SOCKET_UNLOCK(so, 0);
4714 #ifdef SCTP_PACKET_LOGGING
4715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4716 sctp_packet_log(o_pak);
4718 #if !(defined(__Panda__) || defined(__Userspace__))
4719 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4721 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4723 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4724 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4725 atomic_add_int(&stcb->asoc.refcnt, 1);
4726 SCTP_TCB_UNLOCK(stcb);
4727 SCTP_SOCKET_LOCK(so, 0);
4728 SCTP_TCB_LOCK(stcb);
4729 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4733 /* for link local this must be done */
4734 sin6->sin6_scope_id = prev_scope;
4735 sin6->sin6_port = prev_port;
4737 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4738 SCTP_STAT_INCR(sctps_sendpackets);
4739 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4741 SCTP_STAT_INCR(sctps_senderrors);
4744 /* Now if we had a temp route free it */
4745 #if defined(__FreeBSD__) && __FreeBSD_version > 901000
4754 /* PMTU check versus smallest asoc MTU goes here */
4755 if (ro->ro_rt == NULL) {
4756 /* Route was freed */
4757 if (net->ro._s_addr &&
4758 net->src_addr_selected) {
4759 sctp_free_ifa(net->ro._s_addr);
4760 net->ro._s_addr = NULL;
4762 net->src_addr_selected = 0;
4764 if ((ro->ro_rt != NULL) &&
4765 (net->ro._s_addr)) {
4767 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4769 (stcb->asoc.smallest_mtu > mtu)) {
4770 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4773 net->mtu -= sizeof(struct udphdr);
4777 #if !defined(__Panda__) && !defined(__Userspace__)
4779 #if defined(__Windows__)
4780 #define ND_IFINFO(ifp) (ifp)
4781 #define linkmtu if_mtu
4783 if (ND_IFINFO(ifp)->linkmtu &&
4784 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4785 sctp_mtu_size_reset(inp,
4787 ND_IFINFO(ifp)->linkmtu);
4795 #if defined(__Userspace__)
4799 struct sockaddr_conn *sconn;
4802 sconn = (struct sockaddr_conn *)to;
4803 len = sizeof(struct sctphdr);
4804 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4807 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4810 SCTP_ALIGN_TO_END(newm, len);
4811 SCTP_BUF_LEN(newm) = len;
4812 SCTP_BUF_NEXT(newm) = m;
4814 packet_length = sctp_calculate_len(m);
4815 sctphdr = mtod(m, struct sctphdr *);
4816 sctphdr->src_port = src_port;
4817 sctphdr->dest_port = dest_port;
4818 sctphdr->v_tag = v_tag;
4819 sctphdr->checksum = 0;
4820 #if defined(SCTP_WITH_NO_CSUM)
4821 SCTP_STAT_INCR(sctps_sendnocrc);
4823 sctphdr->checksum = sctp_calculate_cksum(m, 0);
4824 SCTP_STAT_INCR(sctps_sendswcrc);
4826 if (tos_value == 0) {
4827 tos_value = inp->ip_inp.inp.inp_ip_tos;
4831 tos_value |= sctp_get_ect(stcb);
4833 /* Don't alloc/free for each packet */
4834 if ((buffer = malloc(packet_length)) != NULL) {
4835 m_copydata(m, 0, packet_length, buffer);
4836 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
4846 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4847 ((struct sockaddr *)to)->sa_family);
4849 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4856 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4857 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4863 struct sctp_nets *net;
4864 struct sctp_init_chunk *init;
4865 struct sctp_supported_addr_param *sup_addr;
4866 struct sctp_adaptation_layer_indication *ali;
4867 struct sctp_supported_chunk_types_param *pr_supported;
4868 struct sctp_paramhdr *ph;
4869 int cnt_inits_to = 0;
4871 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4873 #if defined(__APPLE__)
4875 sctp_lock_assert(SCTP_INP_SO(inp));
4877 sctp_unlock_assert(SCTP_INP_SO(inp));
4880 /* INIT's always go to the primary (and usually ONLY address) */
4881 net = stcb->asoc.primary_destination;
4883 net = TAILQ_FIRST(&stcb->asoc.nets);
4888 /* we confirm any address we send an INIT to */
4889 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4890 (void)sctp_set_primary_addr(stcb, NULL, net);
4892 /* we confirm any address we send an INIT to */
4893 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4895 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4897 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4899 * special hook, if we are sending to link local it will not
4900 * show up in our private address count.
4902 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4906 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4907 /* This case should not happen */
4908 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4911 /* start the INIT timer */
4912 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4914 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4916 /* No memory, INIT timer will re-attempt. */
4917 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4920 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
4923 * assume peer supports asconf in order to be able to queue
4924 * local address changes while an INIT is in flight and before
4925 * the assoc is established.
4927 stcb->asoc.peer_supports_asconf = 1;
4928 /* Now lets put the chunk header in place */
4929 init = mtod(m, struct sctp_init_chunk *);
4930 /* now the chunk header */
4931 init->ch.chunk_type = SCTP_INITIATION;
4932 init->ch.chunk_flags = 0;
4933 /* fill in later from mbuf we build */
4934 init->ch.chunk_length = 0;
4935 /* place in my tag */
4936 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4937 /* set up some of the credits. */
4938 init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
4939 SCTP_MINIMAL_RWND));
4940 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4941 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4942 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4944 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4947 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4948 if (stcb->asoc.scope.ipv4_addr_legal) {
4949 parameter_len += (uint16_t)sizeof(uint16_t);
4951 if (stcb->asoc.scope.ipv6_addr_legal) {
4952 parameter_len += (uint16_t)sizeof(uint16_t);
4954 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
4955 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4956 sup_addr->ph.param_length = htons(parameter_len);
4958 if (stcb->asoc.scope.ipv4_addr_legal) {
4959 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4961 if (stcb->asoc.scope.ipv6_addr_legal) {
4962 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4964 padding_len = 4 - 2 * i;
4965 chunk_len += parameter_len;
4968 /* Adaptation layer indication parameter */
4969 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4970 if (padding_len > 0) {
4971 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
4972 chunk_len += padding_len;
4975 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
4976 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
4977 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4978 ali->ph.param_length = htons(parameter_len);
4979 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4980 chunk_len += parameter_len;
4983 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4984 /* Add NAT friendly parameter. */
4985 if (padding_len > 0) {
4986 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
4987 chunk_len += padding_len;
4990 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4991 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
4992 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4993 ph->param_length = htons(parameter_len);
4994 chunk_len += parameter_len;
4997 /* now any cookie time extensions */
4998 if (stcb->asoc.cookie_preserve_req) {
4999 struct sctp_cookie_perserve_param *cookie_preserve;
5001 if (padding_len > 0) {
5002 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5003 chunk_len += padding_len;
5006 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5007 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5008 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5009 cookie_preserve->ph.param_length = htons(parameter_len);
5010 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5011 stcb->asoc.cookie_preserve_req = 0;
5012 chunk_len += parameter_len;
5016 if (stcb->asoc.ecn_allowed == 1) {
5017 if (padding_len > 0) {
5018 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5019 chunk_len += padding_len;
5022 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5023 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5024 ph->param_type = htons(SCTP_ECN_CAPABLE);
5025 ph->param_length = htons(parameter_len);
5026 chunk_len += parameter_len;
5029 /* And now tell the peer we do support PR-SCTP. */
5030 if (padding_len > 0) {
5031 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5032 chunk_len += padding_len;
5035 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5036 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5037 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5038 ph->param_length = htons(parameter_len);
5039 chunk_len += parameter_len;
5041 /* And now tell the peer we do all the extensions */
5042 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5043 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5045 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5046 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5047 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5048 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5049 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5050 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5051 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5053 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
5054 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5056 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5057 pr_supported->ph.param_length = htons(parameter_len);
5058 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5059 chunk_len += parameter_len;
5061 /* add authentication parameters */
5062 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5063 /* attach RANDOM parameter, if available */
5064 if (stcb->asoc.authinfo.random != NULL) {
5065 struct sctp_auth_random *randp;
5067 if (padding_len > 0) {
5068 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5069 chunk_len += padding_len;
5072 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5073 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5074 /* random key already contains the header */
5075 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5076 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5077 chunk_len += parameter_len;
5079 /* add HMAC_ALGO parameter */
5080 if ((stcb->asoc.local_hmacs != NULL) &&
5081 (stcb->asoc.local_hmacs->num_algo > 0)) {
5082 struct sctp_auth_hmac_algo *hmacs;
5084 if (padding_len > 0) {
5085 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5086 chunk_len += padding_len;
5089 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5090 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5091 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5092 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5093 hmacs->ph.param_length = htons(parameter_len);
5094 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5095 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5096 chunk_len += parameter_len;
5098 /* add CHUNKS parameter */
5099 if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
5100 struct sctp_auth_chunk_list *chunks;
5102 if (padding_len > 0) {
5103 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5104 chunk_len += padding_len;
5107 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5108 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5109 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5110 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5111 chunks->ph.param_length = htons(parameter_len);
5112 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5113 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5114 chunk_len += parameter_len;
5117 SCTP_BUF_LEN(m) = chunk_len;
5119 /* now the addresses */
5120 /* To optimize this we could put the scoping stuff
5121 * into a structure and remove the individual uint8's from
5122 * the assoc structure. Then we could just sifa in the
5123 * address within the stcb. But for now this is a quick
5124 * hack to get the address stuff teased apart.
5126 sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len);
5128 init->ch.chunk_length = htons(chunk_len);
5129 if (padding_len > 0) {
5130 struct mbuf *m_at, *mp_last;
5133 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
5134 if (SCTP_BUF_NEXT(m_at) == NULL)
5137 if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
5142 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5143 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
5144 (struct sockaddr *)&net->ro._l_addr,
5145 m, 0, NULL, 0, 0, 0, 0,
5146 inp->sctp_lport, stcb->rport, htonl(0),
5148 #if defined(__FreeBSD__)
5152 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
5153 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5154 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5158 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5159 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
5162 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5163 * being equal to the beginning of the params i.e. (iphlen +
5164 * sizeof(struct sctp_init_msg) parse through the parameters to the
5165 * end of the mbuf verifying that all parameters are known.
5167 * For unknown parameters build and return a mbuf with
5168 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5169 * processing this chunk stop, and set *abort_processing to 1.
5171 * By having param_offset be pre-set to where parameters begin it is
5172 * hoped that this routine may be reused in the future by new
5175 struct sctp_paramhdr *phdr, params;
5177 struct mbuf *mat, *op_err;
5178 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
5179 int at, limit, pad_needed;
5180 uint16_t ptype, plen, padded_size;
5183 *abort_processing = 0;
5186 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5189 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5190 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5191 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5192 ptype = ntohs(phdr->param_type);
5193 plen = ntohs(phdr->param_length);
5194 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5195 /* wacked parameter */
5196 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5199 limit -= SCTP_SIZE32(plen);
5201 * All parameters for all chunks that we know/understand are
5202 * listed here. We process them other places and make
5203 * appropriate stop actions per the upper bits. However this
5204 * is the generic routine processor's can call to get back
5205 * an operr.. to either incorporate (init-ack) or send.
5207 padded_size = SCTP_SIZE32(plen);
5209 /* Param's with variable size */
5210 case SCTP_HEARTBEAT_INFO:
5211 case SCTP_STATE_COOKIE:
5212 case SCTP_UNRECOG_PARAM:
5213 case SCTP_ERROR_CAUSE_IND:
5217 /* Param's with variable size within a range */
5218 case SCTP_CHUNK_LIST:
5219 case SCTP_SUPPORTED_CHUNK_EXT:
5220 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5221 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5226 case SCTP_SUPPORTED_ADDRTYPE:
5227 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5228 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5234 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5235 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5240 case SCTP_SET_PRIM_ADDR:
5241 case SCTP_DEL_IP_ADDRESS:
5242 case SCTP_ADD_IP_ADDRESS:
5243 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5244 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5245 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5250 /* Param's with a fixed size */
5251 case SCTP_IPV4_ADDRESS:
5252 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5253 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5258 case SCTP_IPV6_ADDRESS:
5259 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5260 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5265 case SCTP_COOKIE_PRESERVE:
5266 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5267 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5272 case SCTP_HAS_NAT_SUPPORT:
5275 case SCTP_PRSCTP_SUPPORTED:
5277 if (padded_size != sizeof(struct sctp_paramhdr)) {
5278 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5283 case SCTP_ECN_CAPABLE:
5284 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
5285 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5290 case SCTP_ULP_ADAPTATION:
5291 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5292 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5297 case SCTP_SUCCESS_REPORT:
5298 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5299 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5304 case SCTP_HOSTNAME_ADDRESS:
5306 /* We can NOT handle HOST NAME addresses!! */
5309 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5310 *abort_processing = 1;
5311 if (op_err == NULL) {
5312 /* Ok need to try to get a mbuf */
5314 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5316 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5319 l_len += sizeof(struct sctp_paramhdr);
5320 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5322 SCTP_BUF_LEN(op_err) = 0;
5324 * pre-reserve space for ip and sctp
5325 * header and chunk hdr
5328 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5330 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5332 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5333 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5337 /* If we have space */
5338 struct sctp_paramhdr s;
5341 uint32_t cpthis = 0;
5343 pad_needed = 4 - (err_at % 4);
5344 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5345 err_at += pad_needed;
5347 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5348 s.param_length = htons(sizeof(s) + plen);
5349 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5350 err_at += sizeof(s);
5351 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
5353 sctp_m_freem(op_err);
5355 * we are out of memory but we still
5356 * need to have a look at what to do
5357 * (the system is in trouble
5362 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5369 * we do not recognize the parameter figure out what
5372 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5373 if ((ptype & 0x4000) == 0x4000) {
5374 /* Report bit is set?? */
5375 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5376 if (op_err == NULL) {
5378 /* Ok need to try to get an mbuf */
5380 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5382 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5385 l_len += sizeof(struct sctp_paramhdr);
5386 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5388 SCTP_BUF_LEN(op_err) = 0;
5390 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5392 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5394 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5395 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5399 /* If we have space */
5400 struct sctp_paramhdr s;
5403 uint32_t cpthis = 0;
5405 pad_needed = 4 - (err_at % 4);
5406 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5407 err_at += pad_needed;
5409 s.param_type = htons(SCTP_UNRECOG_PARAM);
5410 s.param_length = htons(sizeof(s) + plen);
5411 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5412 err_at += sizeof(s);
5413 if (plen > sizeof(tempbuf)) {
5414 plen = sizeof(tempbuf);
5416 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
5418 sctp_m_freem(op_err);
5420 * we are out of memory but
5421 * we still need to have a
5422 * look at what to do (the
5423 * system is in trouble
5427 goto more_processing;
5429 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5434 if ((ptype & 0x8000) == 0x0000) {
5435 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5438 /* skip this chunk and continue processing */
5439 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5440 at += SCTP_SIZE32(plen);
5445 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5449 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5450 *abort_processing = 1;
5451 if ((op_err == NULL) && phdr) {
5454 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5456 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5458 l_len += (2 * sizeof(struct sctp_paramhdr));
5459 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5461 SCTP_BUF_LEN(op_err) = 0;
5463 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5465 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5467 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5468 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5471 if ((op_err) && phdr) {
5472 struct sctp_paramhdr s;
5475 uint32_t cpthis = 0;
5477 pad_needed = 4 - (err_at % 4);
5478 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5479 err_at += pad_needed;
5481 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5482 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5483 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5484 err_at += sizeof(s);
5485 /* Only copy back the p-hdr that caused the issue */
5486 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5492 sctp_are_there_new_addresses(struct sctp_association *asoc,
5493 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5496 * Given a INIT packet, look through the packet to verify that there
5497 * are NO new addresses. As we go through the parameters add reports
5498 * of any un-understood parameters that require an error. Also we
5499 * must return (1) to drop the packet if we see a un-understood
5500 * parameter that tells us to drop the chunk.
5502 struct sockaddr *sa_touse;
5503 struct sockaddr *sa;
5504 struct sctp_paramhdr *phdr, params;
5505 uint16_t ptype, plen;
5507 struct sctp_nets *net;
5509 struct sockaddr_in sin4, *sa4;
5512 struct sockaddr_in6 sin6, *sa6;
5516 memset(&sin4, 0, sizeof(sin4));
5517 sin4.sin_family = AF_INET;
5519 sin4.sin_len = sizeof(sin4);
5523 memset(&sin6, 0, sizeof(sin6));
5524 sin6.sin6_family = AF_INET6;
5525 #ifdef HAVE_SIN6_LEN
5526 sin6.sin6_len = sizeof(sin6);
5529 /* First what about the src address of the pkt ? */
5531 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5532 sa = (struct sockaddr *)&net->ro._l_addr;
5533 if (sa->sa_family == src->sa_family) {
5535 if (sa->sa_family == AF_INET) {
5536 struct sockaddr_in *src4;
5538 sa4 = (struct sockaddr_in *)sa;
5539 src4 = (struct sockaddr_in *)src;
5540 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5547 if (sa->sa_family == AF_INET6) {
5548 struct sockaddr_in6 *src6;
5550 sa6 = (struct sockaddr_in6 *)sa;
5551 src6 = (struct sockaddr_in6 *)src;
5552 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5561 /* New address added! no need to look futher. */
5564 /* Ok so far lets munge through the rest of the packet */
5565 offset += sizeof(struct sctp_init_chunk);
5566 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5569 ptype = ntohs(phdr->param_type);
5570 plen = ntohs(phdr->param_length);
5573 case SCTP_IPV4_ADDRESS:
5575 struct sctp_ipv4addr_param *p4, p4_buf;
5577 phdr = sctp_get_next_param(in_initpkt, offset,
5578 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5579 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5583 p4 = (struct sctp_ipv4addr_param *)phdr;
5584 sin4.sin_addr.s_addr = p4->addr;
5585 sa_touse = (struct sockaddr *)&sin4;
5590 case SCTP_IPV6_ADDRESS:
5592 struct sctp_ipv6addr_param *p6, p6_buf;
5594 phdr = sctp_get_next_param(in_initpkt, offset,
5595 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5596 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5600 p6 = (struct sctp_ipv6addr_param *)phdr;
5601 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5603 sa_touse = (struct sockaddr *)&sin6;
5612 /* ok, sa_touse points to one to check */
5614 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5615 sa = (struct sockaddr *)&net->ro._l_addr;
5616 if (sa->sa_family != sa_touse->sa_family) {
5620 if (sa->sa_family == AF_INET) {
5621 sa4 = (struct sockaddr_in *)sa;
5622 if (sa4->sin_addr.s_addr ==
5623 sin4.sin_addr.s_addr) {
5630 if (sa->sa_family == AF_INET6) {
5631 sa6 = (struct sockaddr_in6 *)sa;
5632 if (SCTP6_ARE_ADDR_EQUAL(
5641 /* New addr added! no need to look further */
5645 offset += SCTP_SIZE32(plen);
5646 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5652 * Given a MBUF chain that was sent into us containing an INIT. Build a
5653 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5654 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5655 * message (i.e. the struct sctp_init_msg).
5658 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5659 struct mbuf *init_pkt, int iphlen, int offset,
5660 struct sockaddr *src, struct sockaddr *dst,
5661 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5662 #if defined(__FreeBSD__)
5663 uint8_t use_mflowid, uint32_t mflowid,
5665 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5667 struct sctp_association *asoc;
5668 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5669 struct sctp_init_ack_chunk *initack;
5670 struct sctp_adaptation_layer_indication *ali;
5671 struct sctp_ecn_supported_param *ecn;
5672 struct sctp_prsctp_supported_param *prsctp;
5673 struct sctp_supported_chunk_types_param *pr_supported;
5674 union sctp_sockstore *over_addr;
5676 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5677 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5678 struct sockaddr_in *sin;
5681 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5682 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5683 struct sockaddr_in6 *sin6;
5685 #if defined(__Userspace__)
5686 struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5687 struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
5688 struct sockaddr_conn *sconn;
5690 struct sockaddr *to;
5691 struct sctp_state_cookie stc;
5692 struct sctp_nets *net = NULL;
5693 uint8_t *signature = NULL;
5694 int cnt_inits_to = 0;
5695 uint16_t his_limit, i_want;
5696 int abort_flag, padval;
5699 int nat_friendly = 0;
5708 if ((asoc != NULL) &&
5709 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5710 (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
5711 /* new addresses, out of here in non-cookie-wait states */
5713 * Send a ABORT, we don't add the new address error clause
5714 * though we even set the T bit and copy in the 0 tag.. this
5715 * looks no different than if no listener was present.
5717 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, NULL,
5718 #if defined(__FreeBSD__)
5719 use_mflowid, mflowid,
5725 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5726 (offset + sizeof(struct sctp_init_chunk)),
5727 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5730 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5731 init_chk->init.initiate_tag, op_err,
5732 #if defined(__FreeBSD__)
5733 use_mflowid, mflowid,
5738 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5740 /* No memory, INIT timer will re-attempt. */
5742 sctp_m_freem(op_err);
5745 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5748 * We might not overwrite the identification[] completely and on
5749 * some platforms time_entered will contain some padding.
5750 * Therefore zero out the cookie to avoid putting
5751 * uninitialized memory on the wire.
5753 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5755 /* the time I built cookie */
5756 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5758 /* populate any tie tags */
5760 /* unlock before tag selections */
5761 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5762 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5763 stc.cookie_life = asoc->cookie_life;
5764 net = asoc->primary_destination;
5766 stc.tie_tag_my_vtag = 0;
5767 stc.tie_tag_peer_vtag = 0;
5768 /* life I will award this cookie */
5769 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5772 /* copy in the ports for later check */
5773 stc.myport = sh->dest_port;
5774 stc.peerport = sh->src_port;
5777 * If we wanted to honor cookie life extentions, we would add to
5778 * stc.cookie_life. For now we should NOT honor any extension
5780 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5781 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5782 stc.ipv6_addr_legal = 1;
5783 if (SCTP_IPV6_V6ONLY(inp)) {
5784 stc.ipv4_addr_legal = 0;
5786 stc.ipv4_addr_legal = 1;
5788 #if defined(__Userspace__)
5789 stc.conn_addr_legal = 0;
5792 stc.ipv6_addr_legal = 0;
5793 #if defined(__Userspace__)
5794 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
5795 stc.conn_addr_legal = 1;
5796 stc.ipv4_addr_legal = 0;
5798 stc.conn_addr_legal = 0;
5799 stc.ipv4_addr_legal = 1;
5802 stc.ipv4_addr_legal = 1;
5805 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5812 switch (dst->sa_family) {
5816 /* lookup address */
5817 stc.address[0] = src4->sin_addr.s_addr;
5821 stc.addr_type = SCTP_IPV4_ADDRESS;
5822 /* local from address */
5823 stc.laddress[0] = dst4->sin_addr.s_addr;
5824 stc.laddress[1] = 0;
5825 stc.laddress[2] = 0;
5826 stc.laddress[3] = 0;
5827 stc.laddr_type = SCTP_IPV4_ADDRESS;
5828 /* scope_id is only for v6 */
5830 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5831 if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
5836 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5837 /* Must use the address in this case */
5838 if (sctp_is_address_on_local_host(src, vrf_id)) {
5839 stc.loopback_scope = 1;
5842 stc.local_scope = 0;
5850 stc.addr_type = SCTP_IPV6_ADDRESS;
5851 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5852 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
5853 stc.scope_id = in6_getscope(&src6->sin6_addr);
5857 if (sctp_is_address_on_local_host(src, vrf_id)) {
5858 stc.loopback_scope = 1;
5859 stc.local_scope = 0;
5862 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
5864 * If the new destination is a LINK_LOCAL we
5865 * must have common both site and local
5866 * scope. Don't set local scope though since
5867 * we must depend on the source to be added
5868 * implicitly. We cannot assure just because
5869 * we share one link that all links are
5872 #if defined(__APPLE__)
5873 /* Mac OS X currently doesn't have in6_getscope() */
5874 stc.scope_id = src6->sin6_addr.s6_addr16[1];
5876 stc.local_scope = 0;
5880 * we start counting for the private address
5881 * stuff at 1. since the link local we
5882 * source from won't show up in our scoped
5886 /* pull out the scope_id from incoming pkt */
5887 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
5889 * If the new destination is SITE_LOCAL then
5890 * we must have site scope in common.
5894 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5895 stc.laddr_type = SCTP_IPV6_ADDRESS;
5899 #if defined(__Userspace__)
5902 /* lookup address */
5907 memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
5908 stc.addr_type = SCTP_CONN_ADDRESS;
5909 /* local from address */
5910 stc.laddress[0] = 0;
5911 stc.laddress[1] = 0;
5912 stc.laddress[2] = 0;
5913 stc.laddress[3] = 0;
5914 memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
5915 stc.laddr_type = SCTP_CONN_ADDRESS;
5916 /* scope_id is only for v6 */
5927 /* set the scope per the existing tcb */
5930 struct sctp_nets *lnet;
5933 stc.loopback_scope = asoc->scope.loopback_scope;
5934 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5935 stc.site_scope = asoc->scope.site_scope;
5936 stc.local_scope = asoc->scope.local_scope;
5938 /* Why do we not consider IPv4 LL addresses? */
5939 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5940 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5941 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5943 * if we have a LL address, start
5951 /* use the net pointer */
5952 to = (struct sockaddr *)&net->ro._l_addr;
5953 switch (to->sa_family) {
5956 sin = (struct sockaddr_in *)to;
5957 stc.address[0] = sin->sin_addr.s_addr;
5961 stc.addr_type = SCTP_IPV4_ADDRESS;
5962 if (net->src_addr_selected == 0) {
5964 * strange case here, the INIT should have
5965 * did the selection.
5967 net->ro._s_addr = sctp_source_address_selection(inp,
5968 stcb, (sctp_route_t *)&net->ro,
5970 if (net->ro._s_addr == NULL)
5973 net->src_addr_selected = 1;
5976 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5977 stc.laddress[1] = 0;
5978 stc.laddress[2] = 0;
5979 stc.laddress[3] = 0;
5980 stc.laddr_type = SCTP_IPV4_ADDRESS;
5981 /* scope_id is only for v6 */
5987 sin6 = (struct sockaddr_in6 *)to;
5988 memcpy(&stc.address, &sin6->sin6_addr,
5989 sizeof(struct in6_addr));
5990 stc.addr_type = SCTP_IPV6_ADDRESS;
5991 stc.scope_id = sin6->sin6_scope_id;
5992 if (net->src_addr_selected == 0) {
5994 * strange case here, the INIT should have
5995 * done the selection.
5997 net->ro._s_addr = sctp_source_address_selection(inp,
5998 stcb, (sctp_route_t *)&net->ro,
6000 if (net->ro._s_addr == NULL)
6003 net->src_addr_selected = 1;
6005 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6006 sizeof(struct in6_addr));
6007 stc.laddr_type = SCTP_IPV6_ADDRESS;
6010 #if defined(__Userspace__)
6012 sconn = (struct sockaddr_conn *)to;
6017 memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6018 stc.addr_type = SCTP_CONN_ADDRESS;
6019 stc.laddress[0] = 0;
6020 stc.laddress[1] = 0;
6021 stc.laddress[2] = 0;
6022 stc.laddress[3] = 0;
6023 memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6024 stc.laddr_type = SCTP_CONN_ADDRESS;
6030 /* Now lets put the SCTP header in place */
6031 initack = mtod(m, struct sctp_init_ack_chunk *);
6032 /* Save it off for quick ref */
6033 stc.peers_vtag = init_chk->init.initiate_tag;
6035 memcpy(stc.identification, SCTP_VERSION_STRING,
6036 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6037 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6038 /* now the chunk header */
6039 initack->ch.chunk_type = SCTP_INITIATION_ACK;
6040 initack->ch.chunk_flags = 0;
6041 /* fill in later from mbuf we build */
6042 initack->ch.chunk_length = 0;
6043 /* place in my tag */
6044 if ((asoc != NULL) &&
6045 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
6046 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
6047 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
6048 /* re-use the v-tags and init-seq here */
6049 initack->init.initiate_tag = htonl(asoc->my_vtag);
6050 initack->init.initial_tsn = htonl(asoc->init_seq_number);
6052 uint32_t vtag, itsn;
6053 if (hold_inp_lock) {
6054 SCTP_INP_INCR_REF(inp);
6055 SCTP_INP_RUNLOCK(inp);
6058 atomic_add_int(&asoc->refcnt, 1);
6059 SCTP_TCB_UNLOCK(stcb);
6061 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6062 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
6063 /* Got a duplicate vtag on some guy behind a nat
6064 * make sure we don't use it.
6068 initack->init.initiate_tag = htonl(vtag);
6069 /* get a TSN to use too */
6070 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6071 initack->init.initial_tsn = htonl(itsn);
6072 SCTP_TCB_LOCK(stcb);
6073 atomic_add_int(&asoc->refcnt, -1);
6075 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6076 initack->init.initiate_tag = htonl(vtag);
6077 /* get a TSN to use too */
6078 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6080 if (hold_inp_lock) {
6081 SCTP_INP_RLOCK(inp);
6082 SCTP_INP_DECR_REF(inp);
6085 /* save away my tag to */
6086 stc.my_vtag = initack->init.initiate_tag;
6088 /* set up some of the credits. */
6089 so = inp->sctp_socket;
6091 /* memory problem */
6095 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6097 /* set what I want */
6098 his_limit = ntohs(init_chk->init.num_inbound_streams);
6099 /* choose what I want */
6101 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
6102 i_want = asoc->streamoutcnt;
6104 i_want = inp->sctp_ep.pre_open_stream_count;
6107 i_want = inp->sctp_ep.pre_open_stream_count;
6109 if (his_limit < i_want) {
6110 /* I Want more :< */
6111 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6113 /* I can have what I want :> */
6114 initack->init.num_outbound_streams = htons(i_want);
6116 /* tell him his limit. */
6117 initack->init.num_inbound_streams =
6118 htons(inp->sctp_ep.max_open_streams_intome);
6120 /* adaptation layer indication parameter */
6121 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6122 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
6123 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6124 ali->ph.param_length = htons(sizeof(*ali));
6125 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
6126 SCTP_BUF_LEN(m) += sizeof(*ali);
6127 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
6129 ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack));
6133 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
6134 (inp->sctp_ecn_enable == 1)) {
6135 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
6136 ecn->ph.param_length = htons(sizeof(*ecn));
6137 SCTP_BUF_LEN(m) += sizeof(*ecn);
6139 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
6142 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
6144 /* And now tell the peer we do pr-sctp */
6145 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
6146 prsctp->ph.param_length = htons(sizeof(*prsctp));
6147 SCTP_BUF_LEN(m) += sizeof(*prsctp);
6149 /* Add NAT friendly parameter */
6150 struct sctp_paramhdr *ph;
6152 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6153 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6154 ph->param_length = htons(sizeof(struct sctp_paramhdr));
6155 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
6157 /* And now tell the peer we do all the extensions */
6158 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6159 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6161 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6162 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6163 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6164 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6165 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6166 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
6167 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6168 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
6169 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6170 p_len = sizeof(*pr_supported) + num_ext;
6171 pr_supported->ph.param_length = htons(p_len);
6172 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
6173 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
6175 /* add authentication parameters */
6176 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
6177 struct sctp_auth_random *randp;
6178 struct sctp_auth_hmac_algo *hmacs;
6179 struct sctp_auth_chunk_list *chunks;
6180 uint16_t random_len;
6182 /* generate and add RANDOM parameter */
6183 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6184 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6185 randp->ph.param_type = htons(SCTP_RANDOM);
6186 p_len = sizeof(*randp) + random_len;
6187 randp->ph.param_length = htons(p_len);
6188 SCTP_READ_RANDOM(randp->random_data, random_len);
6189 /* zero out any padding required */
6190 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
6191 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
6193 /* add HMAC_ALGO parameter */
6194 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6195 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6196 (uint8_t *) hmacs->hmac_ids);
6198 p_len += sizeof(*hmacs);
6199 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6200 hmacs->ph.param_length = htons(p_len);
6201 /* zero out any padding required */
6202 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
6203 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
6205 /* add CHUNKS parameter */
6206 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
6207 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6208 chunks->chunk_types);
6210 p_len += sizeof(*chunks);
6211 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6212 chunks->ph.param_length = htons(p_len);
6213 /* zero out any padding required */
6214 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
6215 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
6219 /* now the addresses */
6221 struct sctp_scoping scp;
6222 /* To optimize this we could put the scoping stuff
6223 * into a structure and remove the individual uint8's from
6224 * the stc structure. Then we could just sifa in the
6225 * address within the stc.. but for now this is a quick
6226 * hack to get the address stuff teased apart.
6228 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6229 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6230 #if defined(__Userspace__)
6231 scp.conn_addr_legal = stc.conn_addr_legal;
6233 scp.loopback_scope = stc.loopback_scope;
6234 scp.ipv4_local_scope = stc.ipv4_scope;
6235 scp.local_scope = stc.local_scope;
6236 scp.site_scope = stc.site_scope;
6237 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
6240 /* tack on the operational error if present */
6248 llen += SCTP_BUF_LEN(ol);
6249 ol = SCTP_BUF_NEXT(ol);
6252 /* must add a pad to the param */
6253 uint32_t cpthis = 0;
6256 padlen = 4 - (llen % 4);
6257 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
6259 while (SCTP_BUF_NEXT(m_at) != NULL) {
6260 m_at = SCTP_BUF_NEXT(m_at);
6262 SCTP_BUF_NEXT(m_at) = op_err;
6263 while (SCTP_BUF_NEXT(m_at) != NULL) {
6264 m_at = SCTP_BUF_NEXT(m_at);
6267 /* pre-calulate the size and update pkt header and chunk header */
6269 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6270 p_len += SCTP_BUF_LEN(m_tmp);
6271 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6272 /* m_tmp should now point to last one */
6277 /* Now we must build a cookie */
6278 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6279 if (m_cookie == NULL) {
6280 /* memory problem */
6284 /* Now append the cookie to the end and update the space/size */
6285 SCTP_BUF_NEXT(m_tmp) = m_cookie;
6287 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6288 p_len += SCTP_BUF_LEN(m_tmp);
6289 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6290 /* m_tmp should now point to last one */
6295 /* Place in the size, but we don't include
6296 * the last pad (if any) in the INIT-ACK.
6298 initack->ch.chunk_length = htons(p_len);
6300 /* Time to sign the cookie, we don't sign over the cookie
6301 * signature though thus we set trailer.
6303 (void)sctp_hmac_m(SCTP_HMAC,
6304 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6305 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6306 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6308 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6309 * here since the timer will drive a retranmission.
6312 if ((padval) && (mp_last)) {
6313 /* see my previous comments on mp_last */
6314 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
6315 /* Houston we have a problem, no space */
6320 if (stc.loopback_scope) {
6321 over_addr = (union sctp_sockstore *)dst;
6326 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6328 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6330 #if defined(__FreeBSD__)
6331 use_mflowid, mflowid,
6333 SCTP_SO_NOT_LOCKED);
6334 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6339 sctp_prune_prsctp(struct sctp_tcb *stcb,
6340 struct sctp_association *asoc,
6341 struct sctp_sndrcvinfo *srcv,
6345 struct sctp_tmit_chunk *chk, *nchk;
6347 SCTP_TCB_LOCK_ASSERT(stcb);
6348 if ((asoc->peer_supports_prsctp) &&
6349 (asoc->sent_queue_cnt_removeable > 0)) {
6350 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6352 * Look for chunks marked with the PR_SCTP flag AND
6353 * the buffer space flag. If the one being sent is
6354 * equal or greater priority then purge the old one
6355 * and free some space.
6357 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6359 * This one is PR-SCTP AND buffer space
6362 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6364 * Lower numbers equates to higher
6365 * priority so if the one we are
6366 * looking at has a larger or equal
6367 * priority we want to drop the data
6368 * and NOT retransmit it.
6372 * We release the book_size
6373 * if the mbuf is here
6378 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6382 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6385 freed_spc += ret_spc;
6386 if (freed_spc >= dataout) {
6389 } /* if chunk was present */
6390 } /* if of sufficent priority */
6391 } /* if chunk has enabled */
6392 } /* tailqforeach */
6394 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6395 /* Here we must move to the sent queue and mark */
6396 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6397 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6400 * We release the book_size
6401 * if the mbuf is here
6405 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6408 freed_spc += ret_spc;
6409 if (freed_spc >= dataout) {
6412 } /* end if chk->data */
6413 } /* end if right class */
6414 } /* end if chk pr-sctp */
6415 } /* tailqforeachsafe (chk) */
6416 } /* if enabled in asoc */
6420 sctp_get_frag_point(struct sctp_tcb *stcb,
6421 struct sctp_association *asoc)
6426 * For endpoints that have both v6 and v4 addresses we must reserve
6427 * room for the ipv6 header, for those that are only dealing with V4
6428 * we use a larger frag point.
6430 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6431 ovh = SCTP_MED_OVERHEAD;
6433 ovh = SCTP_MED_V4_OVERHEAD;
6436 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6437 siz = asoc->smallest_mtu - ovh;
6439 siz = (stcb->asoc.sctp_frag_point - ovh);
6441 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6443 /* A data chunk MUST fit in a cluster */
6444 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6447 /* adjust for an AUTH chunk if DATA requires auth */
6448 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6449 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6452 /* make it an even word boundary please */
6459 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6462 * We assume that the user wants PR_SCTP_TTL if the user
6463 * provides a positive lifetime but does not specify any
6466 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6467 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6468 } else if (sp->timetolive > 0) {
6469 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6470 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6474 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6475 case CHUNK_FLAGS_PR_SCTP_BUF:
6477 * Time to live is a priority stored in tv_sec when
6478 * doing the buffer drop thing.
6480 sp->ts.tv_sec = sp->timetolive;
6483 case CHUNK_FLAGS_PR_SCTP_TTL:
6486 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6487 tv.tv_sec = sp->timetolive / 1000;
6488 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6489 /* TODO sctp_constants.h needs alternative time macros when
6490 * _KERNEL is undefined.
6493 timeradd(&sp->ts, &tv, &sp->ts);
6495 timevaladd(&sp->ts, &tv);
6499 case CHUNK_FLAGS_PR_SCTP_RTX:
6501 * Time to live is a the number or retransmissions
6504 sp->ts.tv_sec = sp->timetolive;
6508 SCTPDBG(SCTP_DEBUG_USRREQ1,
6509 "Unknown PR_SCTP policy %u.\n",
6510 PR_SCTP_POLICY(sp->sinfo_flags));
6516 sctp_msg_append(struct sctp_tcb *stcb,
6517 struct sctp_nets *net,
6519 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6523 struct sctp_stream_queue_pending *sp = NULL;
6524 struct sctp_stream_out *strm;
6526 /* Given an mbuf chain, put it
6527 * into the association send queue and
6528 * place it on the wheel
6530 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6531 /* Invalid stream number */
6532 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6536 if ((stcb->asoc.stream_locked) &&
6537 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6538 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6542 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6543 /* Now can we send this? */
6544 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6545 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6546 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6547 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6548 /* got data while shutting down */
6549 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6553 sctp_alloc_a_strmoq(stcb, sp);
6555 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6559 sp->sinfo_flags = srcv->sinfo_flags;
6560 sp->timetolive = srcv->sinfo_timetolive;
6561 sp->ppid = srcv->sinfo_ppid;
6562 sp->context = srcv->sinfo_context;
6563 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6565 atomic_add_int(&sp->net->ref_count, 1);
6569 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6570 sp->stream = srcv->sinfo_stream;
6571 sp->msg_is_complete = 1;
6572 sp->sender_all_done = 1;
6575 sp->tail_mbuf = NULL;
6576 sctp_set_prsctp_policy(sp);
6577 /* We could in theory (for sendall) sifa the length
6578 * in, but we would still have to hunt through the
6579 * chain since we need to setup the tail_mbuf
6582 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6583 if (SCTP_BUF_NEXT(at) == NULL)
6585 sp->length += SCTP_BUF_LEN(at);
6587 if (srcv->sinfo_keynumber_valid) {
6588 sp->auth_keyid = srcv->sinfo_keynumber;
6590 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6592 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6593 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6594 sp->holds_key_ref = 1;
6596 if (hold_stcb_lock == 0) {
6597 SCTP_TCB_SEND_LOCK(stcb);
6599 sctp_snd_sb_alloc(stcb, sp->length);
6600 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6601 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6602 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6604 if (hold_stcb_lock == 0) {
6605 SCTP_TCB_SEND_UNLOCK(stcb);
6615 static struct mbuf *
6616 sctp_copy_mbufchain(struct mbuf *clonechain,
6617 struct mbuf *outchain,
6618 struct mbuf **endofchain,
6621 uint8_t copy_by_ref)
6624 struct mbuf *appendchain;
6628 if (endofchain == NULL) {
6632 sctp_m_freem(outchain);
6635 if (can_take_mbuf) {
6636 appendchain = clonechain;
6639 #if defined(__Panda__)
6642 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6645 /* Its not in a cluster */
6646 if (*endofchain == NULL) {
6647 /* lets get a mbuf cluster */
6648 if (outchain == NULL) {
6649 /* This is the general case */
6651 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6652 if (outchain == NULL) {
6655 SCTP_BUF_LEN(outchain) = 0;
6656 *endofchain = outchain;
6657 /* get the prepend space */
6658 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
6660 /* We really should not get a NULL in endofchain */
6664 if (SCTP_BUF_NEXT(m) == NULL) {
6668 m = SCTP_BUF_NEXT(m);
6671 if (*endofchain == NULL) {
6672 /* huh, TSNH XXX maybe we should panic */
6673 sctp_m_freem(outchain);
6677 /* get the new end of length */
6678 len = M_TRAILINGSPACE(*endofchain);
6680 /* how much is left at the end? */
6681 len = M_TRAILINGSPACE(*endofchain);
6683 /* Find the end of the data, for appending */
6684 cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
6686 /* Now lets copy it out */
6687 if (len >= sizeofcpy) {
6688 /* It all fits, copy it in */
6689 m_copydata(clonechain, 0, sizeofcpy, cp);
6690 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6692 /* fill up the end of the chain */
6694 m_copydata(clonechain, 0, len, cp);
6695 SCTP_BUF_LEN((*endofchain)) += len;
6696 /* now we need another one */
6699 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6704 SCTP_BUF_NEXT((*endofchain)) = m;
6706 cp = mtod((*endofchain), caddr_t);
6707 m_copydata(clonechain, len, sizeofcpy, cp);
6708 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6712 /* copy the old fashion way */
6713 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6714 #ifdef SCTP_MBUF_LOGGING
6715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6718 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
6719 if (SCTP_BUF_IS_EXTENDED(mat)) {
6720 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6727 if (appendchain == NULL) {
6730 sctp_m_freem(outchain);
6734 /* tack on to the end */
6735 if (*endofchain != NULL) {
6736 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6740 if (SCTP_BUF_NEXT(m) == NULL) {
6741 SCTP_BUF_NEXT(m) = appendchain;
6744 m = SCTP_BUF_NEXT(m);
6748 * save off the end and update the end-chain
6753 if (SCTP_BUF_NEXT(m) == NULL) {
6757 m = SCTP_BUF_NEXT(m);
6761 /* save off the end and update the end-chain postion */
6764 if (SCTP_BUF_NEXT(m) == NULL) {
6768 m = SCTP_BUF_NEXT(m);
6770 return (appendchain);
6775 sctp_med_chunk_output(struct sctp_inpcb *inp,
6776 struct sctp_tcb *stcb,
6777 struct sctp_association *asoc,
6780 int control_only, int from_where,
6781 struct timeval *now, int *now_filled, int frag_point, int so_locked
6782 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6788 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6789 uint32_t val SCTP_UNUSED)
6791 struct sctp_copy_all *ca;
6794 int added_control = 0;
6795 int un_sent, do_chunk_output = 1;
6796 struct sctp_association *asoc;
6797 struct sctp_nets *net;
6799 ca = (struct sctp_copy_all *)ptr;
6800 if (ca->m == NULL) {
6803 if (ca->inp != inp) {
6807 if (ca->sndlen > 0) {
6808 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6810 /* can't copy so we are done */
6814 #ifdef SCTP_MBUF_LOGGING
6815 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6818 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6819 if (SCTP_BUF_IS_EXTENDED(mat)) {
6820 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6828 SCTP_TCB_LOCK_ASSERT(stcb);
6829 if (stcb->asoc.alternate) {
6830 net = stcb->asoc.alternate;
6832 net = stcb->asoc.primary_destination;
6834 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6835 /* Abort this assoc with m as the user defined reason */
6837 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6839 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6840 0, M_NOWAIT, 1, MT_DATA);
6841 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6844 struct sctp_paramhdr *ph;
6846 ph = mtod(m, struct sctp_paramhdr *);
6847 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6848 ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen);
6850 /* We add one here to keep the assoc from
6851 * dis-appearing on us.
6853 atomic_add_int(&stcb->asoc.refcnt, 1);
6854 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6855 /* sctp_abort_an_association calls sctp_free_asoc()
6856 * free association will NOT free it since we
6857 * incremented the refcnt .. we do this to prevent
6858 * it being freed and things getting tricky since
6859 * we could end up (from free_asoc) calling inpcb_free
6860 * which would get a recursive lock call to the
6861 * iterator lock.. But as a consequence of that the
6862 * stcb will return to us un-locked.. since free_asoc
6863 * returns with either no TCB or the TCB unlocked, we
6864 * must relock.. to unlock in the iterator timer :-0
6866 SCTP_TCB_LOCK(stcb);
6867 atomic_add_int(&stcb->asoc.refcnt, -1);
6868 goto no_chunk_output;
6871 ret = sctp_msg_append(stcb, net, m,
6875 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6876 /* shutdown this assoc */
6878 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6880 if (TAILQ_EMPTY(&asoc->send_queue) &&
6881 TAILQ_EMPTY(&asoc->sent_queue) &&
6883 if (asoc->locked_on_sending) {
6886 /* there is nothing queued to send, so I'm done... */
6887 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6888 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6889 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6890 /* only send SHUTDOWN the first time through */
6891 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6892 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6894 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6895 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6896 sctp_stop_timers_for_shutdown(stcb);
6897 sctp_send_shutdown(stcb, net);
6898 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6900 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6901 asoc->primary_destination);
6903 do_chunk_output = 0;
6907 * we still got (or just got) data to send, so set
6911 * XXX sockets draft says that SCTP_EOF should be
6912 * sent with no data. currently, we will allow user
6913 * data to be sent first and move to
6916 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6917 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6918 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6919 if (asoc->locked_on_sending) {
6920 /* Locked to send out the data */
6921 struct sctp_stream_queue_pending *sp;
6922 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6924 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6925 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6928 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6929 if (TAILQ_EMPTY(&asoc->send_queue) &&
6930 TAILQ_EMPTY(&asoc->sent_queue) &&
6931 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6933 atomic_add_int(&stcb->asoc.refcnt, 1);
6934 sctp_abort_an_association(stcb->sctp_ep, stcb,
6935 NULL, SCTP_SO_NOT_LOCKED);
6936 atomic_add_int(&stcb->asoc.refcnt, -1);
6937 goto no_chunk_output;
6939 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6940 asoc->primary_destination);
6946 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6947 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6949 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6950 (stcb->asoc.total_flight > 0) &&
6951 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6952 do_chunk_output = 0;
6954 if (do_chunk_output)
6955 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6956 else if (added_control) {
6957 int num_out = 0, reason = 0, now_filled = 0;
6960 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6961 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6962 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6973 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6975 struct sctp_copy_all *ca;
6977 ca = (struct sctp_copy_all *)ptr;
6979 * Do a notify here? Kacheong suggests that the notify be done at
6980 * the send time.. so you would push up a notification if any send
6981 * failed. Don't know if this is feasable since the only failures we
6982 * have is "memory" related and if you cannot get an mbuf to send
6983 * the data you surely can't get an mbuf to send up to notify the
6984 * user you can't send the data :->
6987 /* now free everything */
6988 sctp_m_freem(ca->m);
6989 SCTP_FREE(ca, SCTP_M_COPYAL);
6993 #define MC_ALIGN(m, len) do { \
6994 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6999 static struct mbuf *
7000 sctp_copy_out_all(struct uio *uio, int len)
7002 struct mbuf *ret, *at;
7003 int left, willcpy, cancpy, error;
7005 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7011 SCTP_BUF_LEN(ret) = 0;
7012 /* save space for the data chunk header */
7013 cancpy = M_TRAILINGSPACE(ret);
7014 willcpy = min(cancpy, left);
7017 /* Align data to the end */
7018 error = uiomove(mtod(at, caddr_t), willcpy, uio);
7024 SCTP_BUF_LEN(at) = willcpy;
7025 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7028 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
7029 if (SCTP_BUF_NEXT(at) == NULL) {
7032 at = SCTP_BUF_NEXT(at);
7033 SCTP_BUF_LEN(at) = 0;
7034 cancpy = M_TRAILINGSPACE(at);
7035 willcpy = min(cancpy, left);
7042 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7043 struct sctp_sndrcvinfo *srcv)
7046 struct sctp_copy_all *ca;
7048 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7052 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7055 memset(ca, 0, sizeof(struct sctp_copy_all));
7059 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7062 * take off the sendall flag, it would be bad if we failed to do
7065 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7066 /* get length and mbuf chain */
7068 #if defined(__APPLE__)
7069 #if defined(APPLE_LEOPARD)
7070 ca->sndlen = uio->uio_resid;
7072 ca->sndlen = uio_resid(uio);
7075 ca->sndlen = uio->uio_resid;
7077 #if defined(__APPLE__)
7078 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7080 ca->m = sctp_copy_out_all(uio, ca->sndlen);
7081 #if defined(__APPLE__)
7082 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7084 if (ca->m == NULL) {
7085 SCTP_FREE(ca, SCTP_M_COPYAL);
7086 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7090 /* Gather the length of the send */
7094 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7095 ca->sndlen += SCTP_BUF_LEN(mat);
7098 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7099 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7100 SCTP_ASOC_ANY_STATE,
7102 sctp_sendall_completes, inp, 1);
7104 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
7105 SCTP_FREE(ca, SCTP_M_COPYAL);
7106 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7114 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7116 struct sctp_tmit_chunk *chk, *nchk;
7118 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7119 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7120 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7122 sctp_m_freem(chk->data);
7125 asoc->ctrl_queue_cnt--;
7126 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7132 sctp_toss_old_asconf(struct sctp_tcb *stcb)
7134 struct sctp_association *asoc;
7135 struct sctp_tmit_chunk *chk, *nchk;
7136 struct sctp_asconf_chunk *acp;
7139 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7140 /* find SCTP_ASCONF chunk in queue */
7141 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7143 acp = mtod(chk->data, struct sctp_asconf_chunk *);
7144 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7149 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7151 sctp_m_freem(chk->data);
7154 asoc->ctrl_queue_cnt--;
7155 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7162 sctp_clean_up_datalist(struct sctp_tcb *stcb,
7163 struct sctp_association *asoc,
7164 struct sctp_tmit_chunk **data_list,
7166 struct sctp_nets *net)
7169 struct sctp_tmit_chunk *tp1;
7171 for (i = 0; i < bundle_at; i++) {
7172 /* off of the send queue */
7173 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7174 asoc->send_queue_cnt--;
7177 * Any chunk NOT 0 you zap the time chunk 0 gets
7178 * zapped or set based on if a RTO measurment is
7181 data_list[i]->do_rtt = 0;
7184 data_list[i]->sent_rcv_time = net->last_sent_time;
7185 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7186 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
7187 if (data_list[i]->whoTo == NULL) {
7188 data_list[i]->whoTo = net;
7189 atomic_add_int(&net->ref_count, 1);
7191 /* on to the sent queue */
7192 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7193 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
7194 struct sctp_tmit_chunk *tpp;
7196 /* need to move back */
7198 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7200 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7204 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
7207 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7209 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7214 /* This does not lower until the cum-ack passes it */
7215 asoc->sent_queue_cnt++;
7216 if ((asoc->peers_rwnd <= 0) &&
7217 (asoc->total_flight == 0) &&
7219 /* Mark the chunk as being a window probe */
7220 SCTP_STAT_INCR(sctps_windowprobed);
7222 #ifdef SCTP_AUDITING_ENABLED
7223 sctp_audit_log(0xC2, 3);
7225 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7226 data_list[i]->snd_count = 1;
7227 data_list[i]->rec.data.chunk_was_revoked = 0;
7228 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7229 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7230 data_list[i]->whoTo->flight_size,
7231 data_list[i]->book_size,
7232 (uintptr_t)data_list[i]->whoTo,
7233 data_list[i]->rec.data.TSN_seq);
7235 sctp_flight_size_increase(data_list[i]);
7236 sctp_total_flight_increase(stcb, data_list[i]);
7237 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7238 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7239 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7241 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7242 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7243 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7244 /* SWS sender side engages */
7245 asoc->peers_rwnd = 0;
7248 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7249 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7254 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7255 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7260 struct sctp_tmit_chunk *chk, *nchk;
7262 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7263 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7264 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7265 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7266 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7267 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7268 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7269 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7270 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7271 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7272 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7273 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7274 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7275 /* Stray chunks must be cleaned up */
7277 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7279 sctp_m_freem(chk->data);
7282 asoc->ctrl_queue_cnt--;
7283 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7284 asoc->fwd_tsn_cnt--;
7285 sctp_free_a_chunk(stcb, chk, so_locked);
7286 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7287 /* special handling, we must look into the param */
7288 if (chk != asoc->str_reset) {
7289 goto clean_up_anyway;
7297 sctp_can_we_split_this(struct sctp_tcb *stcb,
7299 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7301 /* Make a decision on if I should split a
7302 * msg into multiple parts. This is only asked of
7303 * incomplete messages.
7306 /* If we are doing EEOR we need to always send
7307 * it if its the entire thing, since it might
7308 * be all the guy is putting in the hopper.
7310 if (goal_mtu >= length) {
7312 * If we have data outstanding,
7313 * we get another chance when the sack
7314 * arrives to transmit - wait for more data
7316 if (stcb->asoc.total_flight == 0) {
7317 /* If nothing is in flight, we zero
7318 * the packet counter.
7325 /* You can fill the rest */
7330 * For those strange folk that make the send buffer
7331 * smaller than our fragmentation point, we can't
7332 * get a full msg in so we have to allow splitting.
7334 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7338 if ((length <= goal_mtu) ||
7339 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7340 /* Sub-optimial residual don't split in non-eeor mode. */
7343 /* If we reach here length is larger
7344 * than the goal_mtu. Do we wish to split
7345 * it for the sake of packet putting together?
7347 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7348 /* Its ok to split it */
7349 return (min(goal_mtu, frag_point));
7351 /* Nope, can't split */
7357 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7358 struct sctp_stream_out *strq,
7360 uint32_t frag_point,
7366 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7371 /* Move from the stream to the send_queue keeping track of the total */
7372 struct sctp_association *asoc;
7373 struct sctp_stream_queue_pending *sp;
7374 struct sctp_tmit_chunk *chk;
7375 struct sctp_data_chunk *dchkh;
7376 uint32_t to_move, length;
7377 uint8_t rcv_flags = 0;
7379 uint8_t send_lock_up = 0;
7381 SCTP_TCB_LOCK_ASSERT(stcb);
7384 /*sa_ignore FREED_MEMORY*/
7385 sp = TAILQ_FIRST(&strq->outqueue);
7388 if (send_lock_up == 0) {
7389 SCTP_TCB_SEND_LOCK(stcb);
7392 sp = TAILQ_FIRST(&strq->outqueue);
7396 if (strq->last_msg_incomplete) {
7397 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7399 strq->last_msg_incomplete);
7400 strq->last_msg_incomplete = 0;
7404 SCTP_TCB_SEND_UNLOCK(stcb);
7409 if ((sp->msg_is_complete) && (sp->length == 0)) {
7410 if (sp->sender_all_done) {
7411 /* We are doing differed cleanup. Last
7412 * time through when we took all the data
7413 * the sender_all_done was not set.
7415 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7416 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7417 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7418 sp->sender_all_done,
7420 sp->msg_is_complete,
7424 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7425 SCTP_TCB_SEND_LOCK(stcb);
7428 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7429 TAILQ_REMOVE(&strq->outqueue, sp, next);
7430 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7432 sctp_free_remote_addr(sp->net);
7436 sctp_m_freem(sp->data);
7439 sctp_free_a_strmoq(stcb, sp, so_locked);
7440 /* we can't be locked to it */
7442 stcb->asoc.locked_on_sending = NULL;
7444 SCTP_TCB_SEND_UNLOCK(stcb);
7447 /* back to get the next msg */
7450 /* sender just finished this but
7451 * still holds a reference
7459 /* is there some to get */
7460 if (sp->length == 0) {
7466 } else if (sp->discard_rest) {
7467 if (send_lock_up == 0) {
7468 SCTP_TCB_SEND_LOCK(stcb);
7471 /* Whack down the size */
7472 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7473 if ((stcb->sctp_socket != NULL) && \
7474 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7475 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7476 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7479 sctp_m_freem(sp->data);
7481 sp->tail_mbuf = NULL;
7491 some_taken = sp->some_taken;
7492 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7493 sp->msg_is_complete = 1;
7496 length = sp->length;
7497 if (sp->msg_is_complete) {
7498 /* The message is complete */
7499 to_move = min(length, frag_point);
7500 if (to_move == length) {
7501 /* All of it fits in the MTU */
7502 if (sp->some_taken) {
7503 rcv_flags |= SCTP_DATA_LAST_FRAG;
7504 sp->put_last_out = 1;
7506 rcv_flags |= SCTP_DATA_NOT_FRAG;
7507 sp->put_last_out = 1;
7510 /* Not all of it fits, we fragment */
7511 if (sp->some_taken == 0) {
7512 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7517 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7520 * We use a snapshot of length in case it
7521 * is expanding during the compare.
7526 if (to_move >= llen) {
7528 if (send_lock_up == 0) {
7530 * We are taking all of an incomplete msg
7531 * thus we need a send lock.
7533 SCTP_TCB_SEND_LOCK(stcb);
7535 if (sp->msg_is_complete) {
7536 /* the sender finished the msg */
7541 if (sp->some_taken == 0) {
7542 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7546 /* Nothing to take. */
7547 if (sp->some_taken) {
7556 /* If we reach here, we can copy out a chunk */
7557 sctp_alloc_a_chunk(stcb, chk);
7559 /* No chunk memory */
7564 /* Setup for unordered if needed by looking
7565 * at the user sent info flags.
7567 if (sp->sinfo_flags & SCTP_UNORDERED) {
7568 rcv_flags |= SCTP_DATA_UNORDERED;
7570 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7571 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7572 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7574 /* clear out the chunk before setting up */
7575 memset(chk, 0, sizeof(*chk));
7576 chk->rec.data.rcv_flags = rcv_flags;
7578 if (to_move >= length) {
7579 /* we think we can steal the whole thing */
7580 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7581 SCTP_TCB_SEND_LOCK(stcb);
7584 if (to_move < sp->length) {
7585 /* bail, it changed */
7588 chk->data = sp->data;
7589 chk->last_mbuf = sp->tail_mbuf;
7590 /* register the stealing */
7591 sp->data = sp->tail_mbuf = NULL;
7595 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7596 chk->last_mbuf = NULL;
7597 if (chk->data == NULL) {
7598 sp->some_taken = some_taken;
7599 sctp_free_a_chunk(stcb, chk, so_locked);
7604 #ifdef SCTP_MBUF_LOGGING
7605 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7608 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
7609 if (SCTP_BUF_IS_EXTENDED(mat)) {
7610 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7615 /* Pull off the data */
7616 m_adj(sp->data, to_move);
7617 /* Now lets work our way down and compact it */
7619 while (m && (SCTP_BUF_LEN(m) == 0)) {
7620 sp->data = SCTP_BUF_NEXT(m);
7621 SCTP_BUF_NEXT(m) = NULL;
7622 if (sp->tail_mbuf == m) {
7624 * Freeing tail? TSNH since
7625 * we supposedly were taking less
7626 * than the sp->length.
7629 panic("Huh, freing tail? - TSNH");
7631 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7632 sp->tail_mbuf = sp->data = NULL;
7641 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7642 chk->copy_by_ref = 1;
7644 chk->copy_by_ref = 0;
7646 /* get last_mbuf and counts of mb useage
7647 * This is ugly but hopefully its only one mbuf.
7649 if (chk->last_mbuf == NULL) {
7650 chk->last_mbuf = chk->data;
7651 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7652 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7656 if (to_move > length) {
7657 /*- This should not happen either
7658 * since we always lower to_move to the size
7659 * of sp->length if its larger.
7662 panic("Huh, how can to_move be larger?");
7664 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7668 atomic_subtract_int(&sp->length, to_move);
7670 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7671 /* Not enough room for a chunk header, get some */
7673 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7676 * we're in trouble here. _PREPEND below will free
7677 * all the data if there is no leading space, so we
7678 * must put the data back and restore.
7680 if (send_lock_up == 0) {
7681 SCTP_TCB_SEND_LOCK(stcb);
7684 if (chk->data == NULL) {
7685 /* unsteal the data */
7686 sp->data = chk->data;
7687 sp->tail_mbuf = chk->last_mbuf;
7690 /* reassemble the data */
7692 sp->data = chk->data;
7693 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7695 sp->some_taken = some_taken;
7696 atomic_add_int(&sp->length, to_move);
7699 sctp_free_a_chunk(stcb, chk, so_locked);
7703 SCTP_BUF_LEN(m) = 0;
7704 SCTP_BUF_NEXT(m) = chk->data;
7706 M_ALIGN(chk->data, 4);
7709 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
7710 if (chk->data == NULL) {
7711 /* HELP, TSNH since we assured it would not above? */
7713 panic("prepend failes HELP?");
7715 SCTP_PRINTF("prepend fails HELP?\n");
7716 sctp_free_a_chunk(stcb, chk, so_locked);
7722 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7723 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7724 chk->book_size_scale = 0;
7725 chk->sent = SCTP_DATAGRAM_UNSENT;
7728 chk->asoc = &stcb->asoc;
7729 chk->pad_inplace = 0;
7730 chk->no_fr_allowed = 0;
7731 chk->rec.data.stream_seq = strq->next_sequence_send;
7732 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7733 strq->next_sequence_send++;
7735 chk->rec.data.stream_number = sp->stream;
7736 chk->rec.data.payloadtype = sp->ppid;
7737 chk->rec.data.context = sp->context;
7738 chk->rec.data.doing_fast_retransmit = 0;
7740 chk->rec.data.timetodrop = sp->ts;
7741 chk->flags = sp->act_flags;
7744 chk->whoTo = sp->net;
7745 atomic_add_int(&chk->whoTo->ref_count, 1);
7749 if (sp->holds_key_ref) {
7750 chk->auth_keyid = sp->auth_keyid;
7751 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7752 chk->holds_key_ref = 1;
7755 #if defined(__FreeBSD__) || defined(__Panda__)
7756 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7758 chk->rec.data.TSN_seq = asoc->sending_seq++;
7760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7761 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7762 (uintptr_t)stcb, sp->length,
7763 (uint32_t)((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7764 chk->rec.data.TSN_seq);
7766 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7768 * Put the rest of the things in place now. Size was done
7769 * earlier in previous loop prior to padding.
7772 #ifdef SCTP_ASOCLOG_OF_TSNS
7773 SCTP_TCB_LOCK_ASSERT(stcb);
7774 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7775 asoc->tsn_out_at = 0;
7776 asoc->tsn_out_wrapped = 1;
7778 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7779 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7780 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7781 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7782 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7783 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7784 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7785 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7789 dchkh->ch.chunk_type = SCTP_DATA;
7790 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7791 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7792 dchkh->dp.stream_id = htons(strq->stream_no);
7793 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7794 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7795 dchkh->ch.chunk_length = htons(chk->send_size);
7796 /* Now advance the chk->send_size by the actual pad needed. */
7797 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7802 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7803 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7804 chk->pad_inplace = 1;
7806 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7807 /* pad added an mbuf */
7808 chk->last_mbuf = lm;
7810 chk->send_size += pads;
7812 if (PR_SCTP_ENABLED(chk->flags)) {
7813 asoc->pr_sctp_cnt++;
7815 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7816 /* All done pull and kill the message */
7817 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7818 if (sp->put_last_out == 0) {
7819 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7820 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7821 sp->sender_all_done,
7823 sp->msg_is_complete,
7827 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7828 SCTP_TCB_SEND_LOCK(stcb);
7831 TAILQ_REMOVE(&strq->outqueue, sp, next);
7832 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7834 sctp_free_remote_addr(sp->net);
7838 sctp_m_freem(sp->data);
7841 sctp_free_a_strmoq(stcb, sp, so_locked);
7843 /* we can't be locked to it */
7845 stcb->asoc.locked_on_sending = NULL;
7847 /* more to go, we are locked */
7850 asoc->chunks_on_out_queue++;
7851 strq->chunks_on_queues++;
7852 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7853 asoc->send_queue_cnt++;
7856 SCTP_TCB_SEND_UNLOCK(stcb);
7863 sctp_fill_outqueue(struct sctp_tcb *stcb,
7864 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7865 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7870 struct sctp_association *asoc;
7871 struct sctp_stream_out *strq;
7872 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7875 SCTP_TCB_LOCK_ASSERT(stcb);
7877 switch (net->ro._l_addr.sa.sa_family) {
7880 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7885 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7888 #if defined(__Userspace__)
7890 goal_mtu = net->mtu - sizeof(struct sctphdr);
7895 goal_mtu = net->mtu;
7898 /* Need an allowance for the data chunk header too */
7899 goal_mtu -= sizeof(struct sctp_data_chunk);
7901 /* must make even word boundary */
7902 goal_mtu &= 0xfffffffc;
7903 if (asoc->locked_on_sending) {
7904 /* We are stuck on one stream until the message completes. */
7905 strq = asoc->locked_on_sending;
7908 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7911 while ((goal_mtu > 0) && strq) {
7914 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7915 &giveup, eeor_mode, &bail, so_locked);
7917 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7920 asoc->locked_on_sending = strq;
7921 if ((moved_how_much == 0) || (giveup) || bail)
7922 /* no more to move for now */
7925 asoc->locked_on_sending = NULL;
7926 if ((giveup) || bail) {
7929 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7934 total_moved += moved_how_much;
7935 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7936 goal_mtu &= 0xfffffffc;
7941 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7943 if (total_moved == 0) {
7944 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7945 (net == stcb->asoc.primary_destination)) {
7946 /* ran dry for primary network net */
7947 SCTP_STAT_INCR(sctps_primary_randry);
7948 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7949 /* ran dry with CMT on */
7950 SCTP_STAT_INCR(sctps_cmt_randry);
7956 sctp_fix_ecn_echo(struct sctp_association *asoc)
7958 struct sctp_tmit_chunk *chk;
7960 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7961 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7962 chk->sent = SCTP_DATAGRAM_UNSENT;
7968 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7970 struct sctp_association *asoc;
7971 struct sctp_tmit_chunk *chk;
7972 struct sctp_stream_queue_pending *sp;
7979 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7980 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7981 if (sp->net == net) {
7982 sctp_free_remote_addr(sp->net);
7987 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7988 if (chk->whoTo == net) {
7989 sctp_free_remote_addr(chk->whoTo);
7996 sctp_med_chunk_output(struct sctp_inpcb *inp,
7997 struct sctp_tcb *stcb,
7998 struct sctp_association *asoc,
8001 int control_only, int from_where,
8002 struct timeval *now, int *now_filled, int frag_point, int so_locked
8003 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8009 * Ok this is the generic chunk service queue. we must do the
8010 * following: - Service the stream queue that is next, moving any
8011 * message (note I must get a complete message i.e. FIRST/MIDDLE and
8012 * LAST to the out queue in one pass) and assigning TSN's - Check to
8013 * see if the cwnd/rwnd allows any output, if so we go ahead and
8014 * fomulate and send the low level chunks. Making sure to combine
8015 * any control in the control chunk queue also.
8017 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8018 struct mbuf *outchain, *endoutchain;
8019 struct sctp_tmit_chunk *chk, *nchk;
8021 /* temp arrays for unlinking */
8022 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8023 int no_fragmentflg, error;
8024 unsigned int max_rwnd_per_dest, max_send_per_dest;
8025 int one_chunk, hbflag, skip_data_for_this_net;
8026 int asconf, cookie, no_out_cnt;
8027 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8028 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8030 uint32_t auth_offset = 0;
8031 struct sctp_auth_chunk *auth = NULL;
8032 uint16_t auth_keyid;
8033 int override_ok = 1;
8034 int skip_fill_up = 0;
8035 int data_auth_reqd = 0;
8036 /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8040 #if defined(__APPLE__)
8042 sctp_lock_assert(SCTP_INP_SO(inp));
8044 sctp_unlock_assert(SCTP_INP_SO(inp));
8048 auth_keyid = stcb->asoc.authinfo.active_keyid;
8050 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8051 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
8052 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8057 ctl_cnt = no_out_cnt = asconf = cookie = 0;
8059 * First lets prime the pump. For each destination, if there is room
8060 * in the flight size, attempt to pull an MTU's worth out of the
8061 * stream queues into the general send_queue
8063 #ifdef SCTP_AUDITING_ENABLED
8064 sctp_audit_log(0xC2, 2);
8066 SCTP_TCB_LOCK_ASSERT(stcb);
8068 if ((control_only) || (asoc->stream_reset_outstanding))
8073 /* Nothing to possible to send? */
8074 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8075 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8076 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8077 TAILQ_EMPTY(&asoc->send_queue) &&
8078 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
8083 if (asoc->peers_rwnd == 0) {
8084 /* No room in peers rwnd */
8086 if (asoc->total_flight > 0) {
8087 /* we are allowed one chunk in flight */
8091 if (stcb->asoc.ecn_echo_cnt_onq) {
8092 /* Record where a sack goes, if any */
8093 if (no_data_chunks &&
8094 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8095 /* Nothing but ECNe to send - we don't do that */
8096 goto nothing_to_send;
8098 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8099 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8100 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8101 sack_goes_to = chk->whoTo;
8106 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8107 if (stcb->sctp_socket)
8108 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8110 max_send_per_dest = 0;
8111 if (no_data_chunks == 0) {
8112 /* How many non-directed chunks are there? */
8113 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8114 if (chk->whoTo == NULL) {
8115 /* We already have non-directed
8116 * chunks on the queue, no need
8125 if ((no_data_chunks == 0) &&
8126 (skip_fill_up == 0) &&
8127 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8128 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8130 * This for loop we are in takes in
8131 * each net, if its's got space in cwnd and
8132 * has data sent to it (when CMT is off) then it
8133 * calls sctp_fill_outqueue for the net. This gets
8134 * data on the send queue for that network.
8136 * In sctp_fill_outqueue TSN's are assigned and
8137 * data is copied out of the stream buffers. Note
8138 * mostly copy by reference (we hope).
8140 net->window_probe = 0;
8141 if ((net != stcb->asoc.alternate) &&
8142 ((net->dest_state & SCTP_ADDR_PF) ||
8143 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8144 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8146 sctp_log_cwnd(stcb, net, 1,
8147 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8151 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8152 (net->flight_size == 0)) {
8153 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8155 if (net->flight_size >= net->cwnd) {
8156 /* skip this network, no room - can't fill */
8157 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8158 sctp_log_cwnd(stcb, net, 3,
8159 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8163 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8164 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8166 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8168 /* memory alloc failure */
8174 /* now service each destination and send out what we can for it */
8175 /* Nothing to send? */
8176 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8177 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8178 TAILQ_EMPTY(&asoc->send_queue)) {
8183 if (asoc->sctp_cmt_on_off > 0) {
8184 /* get the last start point */
8185 start_at = asoc->last_net_cmt_send_started;
8186 if (start_at == NULL) {
8187 /* null so to beginning */
8188 start_at = TAILQ_FIRST(&asoc->nets);
8190 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8191 if (start_at == NULL) {
8192 start_at = TAILQ_FIRST(&asoc->nets);
8195 asoc->last_net_cmt_send_started = start_at;
8197 start_at = TAILQ_FIRST(&asoc->nets);
8199 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8200 if (chk->whoTo == NULL) {
8201 if (asoc->alternate) {
8202 chk->whoTo = asoc->alternate;
8204 chk->whoTo = asoc->primary_destination;
8206 atomic_add_int(&chk->whoTo->ref_count, 1);
8209 old_start_at = NULL;
8210 again_one_more_time:
8211 for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8212 /* how much can we send? */
8213 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8214 if (old_start_at && (old_start_at == net)) {
8215 /* through list ocmpletely. */
8219 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8220 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8221 (net->flight_size >= net->cwnd)) {
8222 /* Nothing on control or asconf and flight is full, we can skip
8223 * even in the CMT case.
8228 endoutchain = outchain = NULL;
8231 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8232 skip_data_for_this_net = 1;
8234 skip_data_for_this_net = 0;
8236 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) || defined(__APPLE__))
8237 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
8239 * if we have a route and an ifp check to see if we
8240 * have room to send to this guy
8244 ifp = net->ro.ro_rt->rt_ifp;
8245 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
8246 SCTP_STAT_INCR(sctps_ifnomemqueued);
8247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
8248 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
8254 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8257 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8262 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8265 #if defined(__Userspace__)
8267 mtu = net->mtu - sizeof(struct sctphdr);
8277 if (mtu > asoc->peers_rwnd) {
8278 if (asoc->total_flight > 0) {
8279 /* We have a packet in flight somewhere */
8280 r_mtu = asoc->peers_rwnd;
8282 /* We are always allowed to send one MTU out */
8289 /************************/
8290 /* ASCONF transmission */
8291 /************************/
8292 /* Now first lets go through the asconf queue */
8293 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8294 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8297 if (chk->whoTo == NULL) {
8298 if (asoc->alternate == NULL) {
8299 if (asoc->primary_destination != net) {
8303 if (asoc->alternate != net) {
8308 if (chk->whoTo != net) {
8312 if (chk->data == NULL) {
8315 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8316 chk->sent != SCTP_DATAGRAM_RESEND) {
8320 * if no AUTH is yet included and this chunk
8321 * requires it, make sure to account for it. We
8322 * don't apply the size until the AUTH chunk is
8323 * actually added below in case there is no room for
8324 * this chunk. NOTE: we overload the use of "omtu"
8327 if ((auth == NULL) &&
8328 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8329 stcb->asoc.peer_auth_chunks)) {
8330 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8333 /* Here we do NOT factor the r_mtu */
8334 if ((chk->send_size < (int)(mtu - omtu)) ||
8335 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8337 * We probably should glom the mbuf chain
8338 * from the chk->data for control but the
8339 * problem is it becomes yet one more level
8340 * of tracking to do if for some reason
8341 * output fails. Then I have got to
8342 * reconstruct the merged control chain.. el
8343 * yucko.. for now we take the easy way and
8347 * Add an AUTH chunk, if chunk requires it
8348 * save the offset into the chain for AUTH
8350 if ((auth == NULL) &&
8351 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8352 stcb->asoc.peer_auth_chunks))) {
8353 outchain = sctp_add_auth_chunk(outchain,
8358 chk->rec.chunk_id.id);
8359 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8361 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8362 (int)chk->rec.chunk_id.can_take_data,
8363 chk->send_size, chk->copy_by_ref);
8364 if (outchain == NULL) {
8366 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8369 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8370 /* update our MTU size */
8371 if (mtu > (chk->send_size + omtu))
8372 mtu -= (chk->send_size + omtu);
8375 to_out += (chk->send_size + omtu);
8376 /* Do clear IP_DF ? */
8377 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8380 if (chk->rec.chunk_id.can_take_data)
8383 * set hb flag since we can
8389 * should sysctl this: don't
8390 * bundle data with ASCONF
8391 * since it requires AUTH
8394 chk->sent = SCTP_DATAGRAM_SENT;
8395 if (chk->whoTo == NULL) {
8397 atomic_add_int(&net->ref_count, 1);
8402 * Ok we are out of room but we can
8403 * output without effecting the
8404 * flight size since this little guy
8405 * is a control only packet.
8407 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8409 * do NOT clear the asconf
8410 * flag as it is used to do
8411 * appropriate source address
8414 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8415 (struct sockaddr *)&net->ro._l_addr,
8416 outchain, auth_offset, auth,
8417 stcb->asoc.authinfo.active_keyid,
8418 no_fragmentflg, 0, asconf,
8419 inp->sctp_lport, stcb->rport,
8420 htonl(stcb->asoc.peer_vtag),
8422 #if defined(__FreeBSD__)
8426 if (error == ENOBUFS) {
8427 asoc->ifp_had_enobuf = 1;
8428 SCTP_STAT_INCR(sctps_lowlevelerr);
8430 if (from_where == 0) {
8431 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8433 if (*now_filled == 0) {
8434 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8436 *now = net->last_sent_time;
8438 net->last_sent_time = *now;
8441 /* error, could not output */
8442 if (error == EHOSTUNREACH) {
8448 sctp_move_chunks_from_net(stcb, net);
8453 asoc->ifp_had_enobuf = 0;
8454 if (*now_filled == 0) {
8455 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8457 *now = net->last_sent_time;
8459 net->last_sent_time = *now;
8463 * increase the number we sent, if a
8464 * cookie is sent we don't tell them
8467 outchain = endoutchain = NULL;
8471 *num_out += ctl_cnt;
8472 /* recalc a clean slate and setup */
8473 switch (net->ro._l_addr.sa.sa_family) {
8476 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8481 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8484 #if defined(__Userspace__)
8486 mtu = net->mtu - sizeof(struct sctphdr);
8499 /************************/
8500 /* Control transmission */
8501 /************************/
8502 /* Now first lets go through the control queue */
8503 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8504 if ((sack_goes_to) &&
8505 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8506 (chk->whoTo != sack_goes_to)) {
8508 * if we have a sack in queue, and we are looking at an
8509 * ecn echo that is NOT queued to where the sack is going..
8511 if (chk->whoTo == net) {
8512 /* Don't transmit it to where its going (current net) */
8514 } else if (sack_goes_to == net) {
8515 /* But do transmit it to this address */
8516 goto skip_net_check;
8519 if (chk->whoTo == NULL) {
8520 if (asoc->alternate == NULL) {
8521 if (asoc->primary_destination != net) {
8525 if (asoc->alternate != net) {
8530 if (chk->whoTo != net) {
8535 if (chk->data == NULL) {
8538 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8540 * It must be unsent. Cookies and ASCONF's
8541 * hang around but there timers will force
8542 * when marked for resend.
8547 * if no AUTH is yet included and this chunk
8548 * requires it, make sure to account for it. We
8549 * don't apply the size until the AUTH chunk is
8550 * actually added below in case there is no room for
8551 * this chunk. NOTE: we overload the use of "omtu"
8554 if ((auth == NULL) &&
8555 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8556 stcb->asoc.peer_auth_chunks)) {
8557 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8560 /* Here we do NOT factor the r_mtu */
8561 if ((chk->send_size <= (int)(mtu - omtu)) ||
8562 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8564 * We probably should glom the mbuf chain
8565 * from the chk->data for control but the
8566 * problem is it becomes yet one more level
8567 * of tracking to do if for some reason
8568 * output fails. Then I have got to
8569 * reconstruct the merged control chain.. el
8570 * yucko.. for now we take the easy way and
8574 * Add an AUTH chunk, if chunk requires it
8575 * save the offset into the chain for AUTH
8577 if ((auth == NULL) &&
8578 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8579 stcb->asoc.peer_auth_chunks))) {
8580 outchain = sctp_add_auth_chunk(outchain,
8585 chk->rec.chunk_id.id);
8586 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8588 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8589 (int)chk->rec.chunk_id.can_take_data,
8590 chk->send_size, chk->copy_by_ref);
8591 if (outchain == NULL) {
8593 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8596 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8597 /* update our MTU size */
8598 if (mtu > (chk->send_size + omtu))
8599 mtu -= (chk->send_size + omtu);
8602 to_out += (chk->send_size + omtu);
8603 /* Do clear IP_DF ? */
8604 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8607 if (chk->rec.chunk_id.can_take_data)
8609 /* Mark things to be removed, if needed */
8610 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8611 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8612 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8613 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8614 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8615 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8616 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8617 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8618 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8619 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8620 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8621 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8624 /* remove these chunks at the end */
8625 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8626 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8627 /* turn off the timer */
8628 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8629 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8630 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_1);
8636 * Other chunks, since they have
8637 * timers running (i.e. COOKIE)
8638 * we just "trust" that it
8639 * gets sent or retransmitted.
8642 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8645 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8647 * Increment ecne send count here
8648 * this means we may be over-zealous in
8649 * our counting if the send fails, but its
8650 * the best place to do it (we used to do
8651 * it in the queue of the chunk, but that did
8652 * not tell how many times it was sent.
8654 SCTP_STAT_INCR(sctps_sendecne);
8656 chk->sent = SCTP_DATAGRAM_SENT;
8657 if (chk->whoTo == NULL) {
8659 atomic_add_int(&net->ref_count, 1);
8665 * Ok we are out of room but we can
8666 * output without effecting the
8667 * flight size since this little guy
8668 * is a control only packet.
8671 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8673 * do NOT clear the asconf
8674 * flag as it is used to do
8675 * appropriate source address
8680 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8683 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8684 (struct sockaddr *)&net->ro._l_addr,
8687 stcb->asoc.authinfo.active_keyid,
8688 no_fragmentflg, 0, asconf,
8689 inp->sctp_lport, stcb->rport,
8690 htonl(stcb->asoc.peer_vtag),
8692 #if defined(__FreeBSD__)
8696 if (error == ENOBUFS) {
8697 asoc->ifp_had_enobuf = 1;
8698 SCTP_STAT_INCR(sctps_lowlevelerr);
8700 if (from_where == 0) {
8701 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8703 /* error, could not output */
8705 if (*now_filled == 0) {
8706 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8708 *now = net->last_sent_time;
8710 net->last_sent_time = *now;
8714 if (error == EHOSTUNREACH) {
8720 sctp_move_chunks_from_net(stcb, net);
8725 asoc->ifp_had_enobuf = 0;
8726 /* Only HB or ASCONF advances time */
8728 if (*now_filled == 0) {
8729 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8731 *now = net->last_sent_time;
8733 net->last_sent_time = *now;
8738 * increase the number we sent, if a
8739 * cookie is sent we don't tell them
8742 outchain = endoutchain = NULL;
8746 *num_out += ctl_cnt;
8747 /* recalc a clean slate and setup */
8748 switch (net->ro._l_addr.sa.sa_family) {
8751 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8756 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8759 #if defined(__Userspace__)
8761 mtu = net->mtu - sizeof(struct sctphdr);
8774 /* JRI: if dest is in PF state, do not send data to it */
8775 if ((asoc->sctp_cmt_on_off > 0) &&
8776 (net != stcb->asoc.alternate) &&
8777 (net->dest_state & SCTP_ADDR_PF)) {
8780 if (net->flight_size >= net->cwnd) {
8783 if ((asoc->sctp_cmt_on_off > 0) &&
8784 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8785 (net->flight_size > max_rwnd_per_dest)) {
8789 * We need a specific accounting for the usage of the
8790 * send buffer. We also need to check the number of messages
8791 * per net. For now, this is better than nothing and it
8792 * disabled by default...
8794 if ((asoc->sctp_cmt_on_off > 0) &&
8795 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8796 (max_send_per_dest > 0) &&
8797 (net->flight_size > max_send_per_dest)) {
8800 /*********************/
8801 /* Data transmission */
8802 /*********************/
8804 * if AUTH for DATA is required and no AUTH has been added
8805 * yet, account for this in the mtu now... if no data can be
8806 * bundled, this adjustment won't matter anyways since the
8807 * packet will be going out...
8809 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8810 stcb->asoc.peer_auth_chunks);
8811 if (data_auth_reqd && (auth == NULL)) {
8812 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8814 /* now lets add any data within the MTU constraints */
8815 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8818 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8819 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8826 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8827 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8832 #if defined(__Userspace__)
8834 if (net->mtu > sizeof(struct sctphdr)) {
8835 omtu = net->mtu - sizeof(struct sctphdr);
8846 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8847 (skip_data_for_this_net == 0)) ||
8849 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8850 if (no_data_chunks) {
8851 /* let only control go out */
8855 if (net->flight_size >= net->cwnd) {
8856 /* skip this net, no room for data */
8860 if ((chk->whoTo != NULL) &&
8861 (chk->whoTo != net)) {
8862 /* Don't send the chunk on this net */
8866 if (asoc->sctp_cmt_on_off == 0) {
8867 if ((asoc->alternate) &&
8868 (asoc->alternate != net) &&
8869 (chk->whoTo == NULL)) {
8871 } else if ((net != asoc->primary_destination) &&
8872 (asoc->alternate == NULL) &&
8873 (chk->whoTo == NULL)) {
8877 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8879 * strange, we have a chunk that is
8880 * to big for its destination and
8881 * yet no fragment ok flag.
8882 * Something went wrong when the
8883 * PMTU changed...we did not mark
8884 * this chunk for some reason?? I
8885 * will fix it here by letting IP
8886 * fragment it for now and printing
8887 * a warning. This really should not
8890 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8891 chk->send_size, mtu);
8892 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8894 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8895 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8896 struct sctp_data_chunk *dchkh;
8898 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8899 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8901 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8902 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8903 /* ok we will add this one */
8906 * Add an AUTH chunk, if chunk
8907 * requires it, save the offset into
8908 * the chain for AUTH
8910 if (data_auth_reqd) {
8912 outchain = sctp_add_auth_chunk(outchain,
8918 auth_keyid = chk->auth_keyid;
8920 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8921 } else if (override_ok) {
8922 /* use this data's keyid */
8923 auth_keyid = chk->auth_keyid;
8925 } else if (auth_keyid != chk->auth_keyid) {
8926 /* different keyid, so done bundling */
8930 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8931 chk->send_size, chk->copy_by_ref);
8932 if (outchain == NULL) {
8933 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8934 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8935 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8938 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8941 /* upate our MTU size */
8942 /* Do clear IP_DF ? */
8943 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8946 /* unsigned subtraction of mtu */
8947 if (mtu > chk->send_size)
8948 mtu -= chk->send_size;
8951 /* unsigned subtraction of r_mtu */
8952 if (r_mtu > chk->send_size)
8953 r_mtu -= chk->send_size;
8957 to_out += chk->send_size;
8958 if ((to_out > mx_mtu) && no_fragmentflg) {
8960 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8962 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8966 chk->window_probe = 0;
8967 data_list[bundle_at++] = chk;
8968 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8971 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8972 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8973 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8975 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8977 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8978 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8979 /* Count number of user msg's that were fragmented
8980 * we do this by counting when we see a LAST fragment
8983 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8985 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8986 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8987 data_list[0]->window_probe = 1;
8988 net->window_probe = 1;
8994 * Must be sent in order of the
8995 * TSN's (on a network)
8999 } /* for (chunk gather loop for this net) */
9000 } /* if asoc.state OPEN */
9002 /* Is there something to send for this destination? */
9004 /* We may need to start a control timer or two */
9006 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9009 * do NOT clear the asconf flag as it is used
9010 * to do appropriate source address selection.
9014 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9017 /* must start a send timer if data is being sent */
9018 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9020 * no timer running on this destination
9023 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9025 /* Now send it, if there is anything to send :> */
9026 if ((error = sctp_lowlevel_chunk_output(inp,
9029 (struct sockaddr *)&net->ro._l_addr,
9037 inp->sctp_lport, stcb->rport,
9038 htonl(stcb->asoc.peer_vtag),
9040 #if defined(__FreeBSD__)
9044 /* error, we could not output */
9045 if (error == ENOBUFS) {
9046 SCTP_STAT_INCR(sctps_lowlevelerr);
9047 asoc->ifp_had_enobuf = 1;
9049 if (from_where == 0) {
9050 SCTP_STAT_INCR(sctps_lowlevelerrusr);
9052 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9054 if (*now_filled == 0) {
9055 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
9057 *now = net->last_sent_time;
9059 net->last_sent_time = *now;
9063 if (error == EHOSTUNREACH) {
9065 * Destination went unreachable
9068 sctp_move_chunks_from_net(stcb, net);
9072 * I add this line to be paranoid. As far as
9073 * I can tell the continue, takes us back to
9074 * the top of the for, but just to make sure
9075 * I will reset these again here.
9077 ctl_cnt = bundle_at = 0;
9078 continue; /* This takes us back to the for() for the nets. */
9080 asoc->ifp_had_enobuf = 0;
9085 if (bundle_at || hbflag) {
9086 /* For data/asconf and hb set time */
9087 if (*now_filled == 0) {
9088 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
9090 *now = net->last_sent_time;
9092 net->last_sent_time = *now;
9096 *num_out += (ctl_cnt + bundle_at);
9099 /* setup for a RTO measurement */
9100 tsns_sent = data_list[0]->rec.data.TSN_seq;
9101 /* fill time if not already filled */
9102 if (*now_filled == 0) {
9103 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9105 *now = asoc->time_last_sent;
9107 asoc->time_last_sent = *now;
9109 if (net->rto_needed) {
9110 data_list[0]->do_rtt = 1;
9111 net->rto_needed = 0;
9113 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9114 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9121 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9124 if (old_start_at == NULL) {
9125 old_start_at = start_at;
9126 start_at = TAILQ_FIRST(&asoc->nets);
9128 goto again_one_more_time;
9132 * At the end there should be no NON timed chunks hanging on this
9135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9136 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9138 if ((*num_out == 0) && (*reason_code == 0)) {
9143 sctp_clean_up_ctl(stcb, asoc, so_locked);
9148 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9151 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9152 * the control chunk queue.
9154 struct sctp_chunkhdr *hdr;
9155 struct sctp_tmit_chunk *chk;
9158 SCTP_TCB_LOCK_ASSERT(stcb);
9159 sctp_alloc_a_chunk(stcb, chk);
9162 sctp_m_freem(op_err);
9165 chk->copy_by_ref = 0;
9166 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9167 if (op_err == NULL) {
9168 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
9173 while (mat != NULL) {
9174 chk->send_size += SCTP_BUF_LEN(mat);
9175 mat = SCTP_BUF_NEXT(mat);
9177 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9178 chk->rec.chunk_id.can_take_data = 1;
9179 chk->sent = SCTP_DATAGRAM_UNSENT;
9182 chk->asoc = &stcb->asoc;
9185 hdr = mtod(op_err, struct sctp_chunkhdr *);
9186 hdr->chunk_type = SCTP_OPERATION_ERROR;
9187 hdr->chunk_flags = 0;
9188 hdr->chunk_length = htons(chk->send_size);
9189 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
9192 chk->asoc->ctrl_queue_cnt++;
9196 sctp_send_cookie_echo(struct mbuf *m,
9198 struct sctp_tcb *stcb,
9199 struct sctp_nets *net)
9202 * pull out the cookie and put it at the front of the control chunk
9206 struct mbuf *cookie;
9207 struct sctp_paramhdr parm, *phdr;
9208 struct sctp_chunkhdr *hdr;
9209 struct sctp_tmit_chunk *chk;
9210 uint16_t ptype, plen;
9212 /* First find the cookie in the param area */
9214 at = offset + sizeof(struct sctp_init_chunk);
9216 SCTP_TCB_LOCK_ASSERT(stcb);
9218 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
9222 ptype = ntohs(phdr->param_type);
9223 plen = ntohs(phdr->param_length);
9224 if (ptype == SCTP_STATE_COOKIE) {
9227 /* found the cookie */
9228 if ((pad = (plen % 4))) {
9231 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9232 if (cookie == NULL) {
9236 #ifdef SCTP_MBUF_LOGGING
9237 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9240 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
9241 if (SCTP_BUF_IS_EXTENDED(mat)) {
9242 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9249 at += SCTP_SIZE32(plen);
9251 if (cookie == NULL) {
9252 /* Did not find the cookie */
9255 /* ok, we got the cookie lets change it into a cookie echo chunk */
9257 /* first the change from param to cookie */
9258 hdr = mtod(cookie, struct sctp_chunkhdr *);
9259 hdr->chunk_type = SCTP_COOKIE_ECHO;
9260 hdr->chunk_flags = 0;
9261 /* get the chunk stuff now and place it in the FRONT of the queue */
9262 sctp_alloc_a_chunk(stcb, chk);
9265 sctp_m_freem(cookie);
9268 chk->copy_by_ref = 0;
9269 chk->send_size = plen;
9270 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9271 chk->rec.chunk_id.can_take_data = 0;
9272 chk->sent = SCTP_DATAGRAM_UNSENT;
9274 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9275 chk->asoc = &stcb->asoc;
9278 atomic_add_int(&chk->whoTo->ref_count, 1);
9279 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9280 chk->asoc->ctrl_queue_cnt++;
9285 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9289 struct sctp_nets *net)
9292 * take a HB request and make it into a HB ack and send it.
9294 struct mbuf *outchain;
9295 struct sctp_chunkhdr *chdr;
9296 struct sctp_tmit_chunk *chk;
9300 /* must have a net pointer */
9303 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9304 if (outchain == NULL) {
9305 /* gak out of memory */
9308 #ifdef SCTP_MBUF_LOGGING
9309 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9312 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
9313 if (SCTP_BUF_IS_EXTENDED(mat)) {
9314 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9319 chdr = mtod(outchain, struct sctp_chunkhdr *);
9320 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9321 chdr->chunk_flags = 0;
9322 if (chk_length % 4) {
9324 uint32_t cpthis = 0;
9327 padlen = 4 - (chk_length % 4);
9328 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9330 sctp_alloc_a_chunk(stcb, chk);
9333 sctp_m_freem(outchain);
9336 chk->copy_by_ref = 0;
9337 chk->send_size = chk_length;
9338 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9339 chk->rec.chunk_id.can_take_data = 1;
9340 chk->sent = SCTP_DATAGRAM_UNSENT;
9343 chk->asoc = &stcb->asoc;
9344 chk->data = outchain;
9346 atomic_add_int(&chk->whoTo->ref_count, 1);
9347 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9348 chk->asoc->ctrl_queue_cnt++;
9352 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9354 /* formulate and queue a cookie-ack back to sender */
9355 struct mbuf *cookie_ack;
9356 struct sctp_chunkhdr *hdr;
9357 struct sctp_tmit_chunk *chk;
9360 SCTP_TCB_LOCK_ASSERT(stcb);
9362 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9363 if (cookie_ack == NULL) {
9367 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9368 sctp_alloc_a_chunk(stcb, chk);
9371 sctp_m_freem(cookie_ack);
9374 chk->copy_by_ref = 0;
9375 chk->send_size = sizeof(struct sctp_chunkhdr);
9376 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9377 chk->rec.chunk_id.can_take_data = 1;
9378 chk->sent = SCTP_DATAGRAM_UNSENT;
9381 chk->asoc = &stcb->asoc;
9382 chk->data = cookie_ack;
9383 if (chk->asoc->last_control_chunk_from != NULL) {
9384 chk->whoTo = chk->asoc->last_control_chunk_from;
9385 atomic_add_int(&chk->whoTo->ref_count, 1);
9389 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9390 hdr->chunk_type = SCTP_COOKIE_ACK;
9391 hdr->chunk_flags = 0;
9392 hdr->chunk_length = htons(chk->send_size);
9393 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9394 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9395 chk->asoc->ctrl_queue_cnt++;
9401 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9403 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9404 struct mbuf *m_shutdown_ack;
9405 struct sctp_shutdown_ack_chunk *ack_cp;
9406 struct sctp_tmit_chunk *chk;
9408 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9409 if (m_shutdown_ack == NULL) {
9413 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9414 sctp_alloc_a_chunk(stcb, chk);
9417 sctp_m_freem(m_shutdown_ack);
9420 chk->copy_by_ref = 0;
9421 chk->send_size = sizeof(struct sctp_chunkhdr);
9422 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9423 chk->rec.chunk_id.can_take_data = 1;
9424 chk->sent = SCTP_DATAGRAM_UNSENT;
9427 chk->asoc = &stcb->asoc;
9428 chk->data = m_shutdown_ack;
9431 atomic_add_int(&chk->whoTo->ref_count, 1);
9433 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9434 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9435 ack_cp->ch.chunk_flags = 0;
9436 ack_cp->ch.chunk_length = htons(chk->send_size);
9437 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9438 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9439 chk->asoc->ctrl_queue_cnt++;
9444 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9446 /* formulate and queue a SHUTDOWN to the sender */
9447 struct mbuf *m_shutdown;
9448 struct sctp_shutdown_chunk *shutdown_cp;
9449 struct sctp_tmit_chunk *chk;
9451 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9452 if (m_shutdown == NULL) {
9456 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9457 sctp_alloc_a_chunk(stcb, chk);
9460 sctp_m_freem(m_shutdown);
9463 chk->copy_by_ref = 0;
9464 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9465 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9466 chk->rec.chunk_id.can_take_data = 1;
9467 chk->sent = SCTP_DATAGRAM_UNSENT;
9470 chk->asoc = &stcb->asoc;
9471 chk->data = m_shutdown;
9474 atomic_add_int(&chk->whoTo->ref_count, 1);
9476 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9477 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9478 shutdown_cp->ch.chunk_flags = 0;
9479 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9480 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9481 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9482 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9483 chk->asoc->ctrl_queue_cnt++;
9488 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9491 * formulate and queue an ASCONF to the peer.
9492 * ASCONF parameters should be queued on the assoc queue.
9494 struct sctp_tmit_chunk *chk;
9495 struct mbuf *m_asconf;
9498 SCTP_TCB_LOCK_ASSERT(stcb);
9500 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9501 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9502 /* can't send a new one if there is one in flight already */
9506 /* compose an ASCONF chunk, maximum length is PMTU */
9507 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9508 if (m_asconf == NULL) {
9512 sctp_alloc_a_chunk(stcb, chk);
9515 sctp_m_freem(m_asconf);
9519 chk->copy_by_ref = 0;
9520 chk->data = m_asconf;
9521 chk->send_size = len;
9522 chk->rec.chunk_id.id = SCTP_ASCONF;
9523 chk->rec.chunk_id.can_take_data = 0;
9524 chk->sent = SCTP_DATAGRAM_UNSENT;
9526 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9527 chk->asoc = &stcb->asoc;
9530 atomic_add_int(&chk->whoTo->ref_count, 1);
9532 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9533 chk->asoc->ctrl_queue_cnt++;
9538 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9541 * formulate and queue a asconf-ack back to sender.
9542 * the asconf-ack must be stored in the tcb.
9544 struct sctp_tmit_chunk *chk;
9545 struct sctp_asconf_ack *ack, *latest_ack;
9547 struct sctp_nets *net = NULL;
9549 SCTP_TCB_LOCK_ASSERT(stcb);
9550 /* Get the latest ASCONF-ACK */
9551 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9552 if (latest_ack == NULL) {
9555 if (latest_ack->last_sent_to != NULL &&
9556 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9557 /* we're doing a retransmission */
9558 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9561 if (stcb->asoc.last_control_chunk_from == NULL) {
9562 if (stcb->asoc.alternate) {
9563 net = stcb->asoc.alternate;
9565 net = stcb->asoc.primary_destination;
9568 net = stcb->asoc.last_control_chunk_from;
9573 if (stcb->asoc.last_control_chunk_from == NULL) {
9574 if (stcb->asoc.alternate) {
9575 net = stcb->asoc.alternate;
9577 net = stcb->asoc.primary_destination;
9580 net = stcb->asoc.last_control_chunk_from;
9583 latest_ack->last_sent_to = net;
9585 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9586 if (ack->data == NULL) {
9590 /* copy the asconf_ack */
9591 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9592 if (m_ack == NULL) {
9593 /* couldn't copy it */
9596 #ifdef SCTP_MBUF_LOGGING
9597 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9600 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
9601 if (SCTP_BUF_IS_EXTENDED(mat)) {
9602 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9608 sctp_alloc_a_chunk(stcb, chk);
9612 sctp_m_freem(m_ack);
9615 chk->copy_by_ref = 0;
9619 atomic_add_int(&chk->whoTo->ref_count, 1);
9624 chk->send_size = ack->len;
9625 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9626 chk->rec.chunk_id.can_take_data = 1;
9627 chk->sent = SCTP_DATAGRAM_UNSENT;
9629 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9630 chk->asoc = &stcb->asoc;
9632 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9633 chk->asoc->ctrl_queue_cnt++;
9640 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9641 struct sctp_tcb *stcb,
9642 struct sctp_association *asoc,
9643 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9644 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9650 * send out one MTU of retransmission. If fast_retransmit is
9651 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9652 * rwnd. For a Cookie or Asconf in the control chunk queue we
9653 * retransmit them by themselves.
9655 * For data chunks we will pick out the lowest TSN's in the sent_queue
9656 * marked for resend and bundle them all together (up to a MTU of
9657 * destination). The address to send to should have been
9658 * selected/changed where the retransmission was marked (i.e. in FR
9659 * or t3-timeout routines).
9661 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9662 struct sctp_tmit_chunk *chk, *fwd;
9663 struct mbuf *m, *endofchain;
9664 struct sctp_nets *net = NULL;
9665 uint32_t tsns_sent = 0;
9666 int no_fragmentflg, bundle_at, cnt_thru;
9668 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9669 struct sctp_auth_chunk *auth = NULL;
9670 uint32_t auth_offset = 0;
9671 uint16_t auth_keyid;
9672 int override_ok = 1;
9673 int data_auth_reqd = 0;
9676 #if defined(__APPLE__)
9678 sctp_lock_assert(SCTP_INP_SO(inp));
9680 sctp_unlock_assert(SCTP_INP_SO(inp));
9683 SCTP_TCB_LOCK_ASSERT(stcb);
9684 tmr_started = ctl_cnt = bundle_at = error = 0;
9689 endofchain = m = NULL;
9690 auth_keyid = stcb->asoc.authinfo.active_keyid;
9691 #ifdef SCTP_AUDITING_ENABLED
9692 sctp_audit_log(0xC3, 1);
9694 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9695 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9696 SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
9697 asoc->sent_queue_retran_cnt);
9698 asoc->sent_queue_cnt = 0;
9699 asoc->sent_queue_cnt_removeable = 0;
9700 /* send back 0/0 so we enter normal transmission */
9704 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9705 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9706 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9707 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9708 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9711 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9712 if (chk != asoc->str_reset) {
9714 * not eligible for retran if its
9721 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9725 * Add an AUTH chunk, if chunk requires it save the
9726 * offset into the chain for AUTH
9728 if ((auth == NULL) &&
9729 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9730 stcb->asoc.peer_auth_chunks))) {
9731 m = sctp_add_auth_chunk(m, &endofchain,
9732 &auth, &auth_offset,
9734 chk->rec.chunk_id.id);
9735 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9737 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9743 /* do we have control chunks to retransmit? */
9745 /* Start a timer no matter if we suceed or fail */
9746 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9747 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9748 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9749 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9750 chk->snd_count++; /* update our count */
9751 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9752 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9753 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9754 no_fragmentflg, 0, 0,
9755 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9756 chk->whoTo->port, NULL,
9757 #if defined(__FreeBSD__)
9761 SCTP_STAT_INCR(sctps_lowlevelerr);
9768 * We don't want to mark the net->sent time here since this
9769 * we use this for HB and retrans cannot measure RTT
9771 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9773 chk->sent = SCTP_DATAGRAM_SENT;
9774 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9778 /* Clean up the fwd-tsn list */
9779 sctp_clean_up_ctl(stcb, asoc, so_locked);
9784 * Ok, it is just data retransmission we need to do or that and a
9785 * fwd-tsn with it all.
9787 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9788 return (SCTP_RETRAN_DONE);
9790 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9791 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9792 /* not yet open, resend the cookie and that is it */
9795 #ifdef SCTP_AUDITING_ENABLED
9796 sctp_auditing(20, inp, stcb, NULL);
9798 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9799 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9800 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9801 /* No, not sent to this net or not ready for rtx */
9804 if (chk->data == NULL) {
9805 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9806 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9809 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9810 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9811 /* Gak, we have exceeded max unlucky retran, abort! */
9812 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9814 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9815 atomic_add_int(&stcb->asoc.refcnt, 1);
9816 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
9817 SCTP_TCB_LOCK(stcb);
9818 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9819 return (SCTP_RETRAN_EXIT);
9821 /* pick up the net */
9823 switch (net->ro._l_addr.sa.sa_family) {
9826 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9831 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9834 #if defined(__Userspace__)
9836 mtu = net->mtu - sizeof(struct sctphdr);
9845 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9846 /* No room in peers rwnd */
9849 tsn = asoc->last_acked_seq + 1;
9850 if (tsn == chk->rec.data.TSN_seq) {
9852 * we make a special exception for this
9853 * case. The peer has no rwnd but is missing
9854 * the lowest chunk.. which is probably what
9855 * is holding up the rwnd.
9857 goto one_chunk_around;
9862 if (asoc->peers_rwnd < mtu) {
9864 if ((asoc->peers_rwnd == 0) &&
9865 (asoc->total_flight == 0)) {
9866 chk->window_probe = 1;
9867 chk->whoTo->window_probe = 1;
9870 #ifdef SCTP_AUDITING_ENABLED
9871 sctp_audit_log(0xC3, 2);
9875 net->fast_retran_ip = 0;
9876 if (chk->rec.data.doing_fast_retransmit == 0) {
9878 * if no FR in progress skip destination that have
9879 * flight_size > cwnd.
9881 if (net->flight_size >= net->cwnd) {
9886 * Mark the destination net to have FR recovery
9890 net->fast_retran_ip = 1;
9894 * if no AUTH is yet included and this chunk requires it,
9895 * make sure to account for it. We don't apply the size
9896 * until the AUTH chunk is actually added below in case
9897 * there is no room for this chunk.
9899 if (data_auth_reqd && (auth == NULL)) {
9900 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9904 if ((chk->send_size <= (mtu - dmtu)) ||
9905 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9906 /* ok we will add this one */
9907 if (data_auth_reqd) {
9909 m = sctp_add_auth_chunk(m,
9915 auth_keyid = chk->auth_keyid;
9917 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9918 } else if (override_ok) {
9919 auth_keyid = chk->auth_keyid;
9921 } else if (chk->auth_keyid != auth_keyid) {
9922 /* different keyid, so done bundling */
9926 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9928 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9931 /* Do clear IP_DF ? */
9932 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9935 /* upate our MTU size */
9936 if (mtu > (chk->send_size + dmtu))
9937 mtu -= (chk->send_size + dmtu);
9940 data_list[bundle_at++] = chk;
9941 if (one_chunk && (asoc->total_flight <= 0)) {
9942 SCTP_STAT_INCR(sctps_windowprobed);
9945 if (one_chunk == 0) {
9947 * now are there anymore forward from chk to pick
9950 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9951 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9952 /* Nope, not for retran */
9955 if (fwd->whoTo != net) {
9956 /* Nope, not the net in question */
9959 if (data_auth_reqd && (auth == NULL)) {
9960 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9963 if (fwd->send_size <= (mtu - dmtu)) {
9964 if (data_auth_reqd) {
9966 m = sctp_add_auth_chunk(m,
9972 auth_keyid = fwd->auth_keyid;
9974 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9975 } else if (override_ok) {
9976 auth_keyid = fwd->auth_keyid;
9978 } else if (fwd->auth_keyid != auth_keyid) {
9979 /* different keyid, so done bundling */
9983 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9985 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9988 /* Do clear IP_DF ? */
9989 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9992 /* upate our MTU size */
9993 if (mtu > (fwd->send_size + dmtu))
9994 mtu -= (fwd->send_size + dmtu);
9997 data_list[bundle_at++] = fwd;
9998 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10002 /* can't fit so we are done */
10007 /* Is there something to send for this destination? */
10010 * No matter if we fail/or suceed we should start a
10011 * timer. A failure is like a lost IP packet :-)
10013 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10015 * no timer running on this destination
10018 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10021 /* Now lets send it, if there is anything to send :> */
10022 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10023 (struct sockaddr *)&net->ro._l_addr, m,
10024 auth_offset, auth, auth_keyid,
10025 no_fragmentflg, 0, 0,
10026 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10028 #if defined(__FreeBSD__)
10032 /* error, we could not output */
10033 SCTP_STAT_INCR(sctps_lowlevelerr);
10041 * We don't want to mark the net->sent time here
10042 * since this we use this for HB and retrans cannot
10045 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10047 /* For auto-close */
10049 if (*now_filled == 0) {
10050 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10051 *now = asoc->time_last_sent;
10054 asoc->time_last_sent = *now;
10056 *cnt_out += bundle_at;
10057 #ifdef SCTP_AUDITING_ENABLED
10058 sctp_audit_log(0xC4, bundle_at);
10061 tsns_sent = data_list[0]->rec.data.TSN_seq;
10063 for (i = 0; i < bundle_at; i++) {
10064 SCTP_STAT_INCR(sctps_sendretransdata);
10065 data_list[i]->sent = SCTP_DATAGRAM_SENT;
10067 * When we have a revoked data, and we
10068 * retransmit it, then we clear the revoked
10069 * flag since this flag dictates if we
10070 * subtracted from the fs
10072 if (data_list[i]->rec.data.chunk_was_revoked) {
10073 /* Deflate the cwnd */
10074 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10075 data_list[i]->rec.data.chunk_was_revoked = 0;
10077 data_list[i]->snd_count++;
10078 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10079 /* record the time */
10080 data_list[i]->sent_rcv_time = asoc->time_last_sent;
10081 if (data_list[i]->book_size_scale) {
10083 * need to double the book size on
10086 data_list[i]->book_size_scale = 0;
10087 /* Since we double the booksize, we must
10088 * also double the output queue size, since this
10089 * get shrunk when we free by this amount.
10091 atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10092 data_list[i]->book_size *= 2;
10096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10097 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10098 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10100 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10101 (uint32_t) (data_list[i]->send_size +
10102 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10104 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10105 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10106 data_list[i]->whoTo->flight_size,
10107 data_list[i]->book_size,
10108 (uintptr_t)data_list[i]->whoTo,
10109 data_list[i]->rec.data.TSN_seq);
10111 sctp_flight_size_increase(data_list[i]);
10112 sctp_total_flight_increase(stcb, data_list[i]);
10113 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10114 /* SWS sender side engages */
10115 asoc->peers_rwnd = 0;
10118 (data_list[i]->rec.data.doing_fast_retransmit)) {
10119 SCTP_STAT_INCR(sctps_sendfastretrans);
10120 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10121 (tmr_started == 0)) {
10123 * ok we just fast-retrans'd
10124 * the lowest TSN, i.e the
10125 * first on the list. In
10126 * this case we want to give
10127 * some more time to get a
10128 * SACK back without a
10131 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10132 SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_4);
10133 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10137 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10138 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10140 #ifdef SCTP_AUDITING_ENABLED
10141 sctp_auditing(21, inp, stcb, NULL);
10144 /* None will fit */
10147 if (asoc->sent_queue_retran_cnt <= 0) {
10148 /* all done we have no more to retran */
10149 asoc->sent_queue_retran_cnt = 0;
10153 /* No more room in rwnd */
10156 /* stop the for loop here. we sent out a packet */
10163 sctp_timer_validation(struct sctp_inpcb *inp,
10164 struct sctp_tcb *stcb,
10165 struct sctp_association *asoc)
10167 struct sctp_nets *net;
10169 /* Validate that a timer is running somewhere */
10170 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10171 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10172 /* Here is a timer */
10176 SCTP_TCB_LOCK_ASSERT(stcb);
10177 /* Gak, we did not have a timer somewhere */
10178 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10179 if (asoc->alternate) {
10180 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10182 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10188 sctp_chunk_output (struct sctp_inpcb *inp,
10189 struct sctp_tcb *stcb,
10192 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10198 * Ok this is the generic chunk service queue. we must do the
10200 * - See if there are retransmits pending, if so we must
10202 * - Service the stream queue that is next, moving any
10203 * message (note I must get a complete message i.e.
10204 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10206 * - Check to see if the cwnd/rwnd allows any output, if so we
10207 * go ahead and fomulate and send the low level chunks. Making sure
10208 * to combine any control in the control chunk queue also.
10210 struct sctp_association *asoc;
10211 struct sctp_nets *net;
10212 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
10213 unsigned int burst_cnt = 0;
10214 struct timeval now;
10215 int now_filled = 0;
10217 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10220 unsigned int tot_frs = 0;
10222 #if defined(__APPLE__)
10224 sctp_lock_assert(SCTP_INP_SO(inp));
10226 sctp_unlock_assert(SCTP_INP_SO(inp));
10229 asoc = &stcb->asoc;
10230 /* The Nagle algorithm is only applied when handling a send call. */
10231 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10240 SCTP_TCB_LOCK_ASSERT(stcb);
10242 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10244 if ((un_sent <= 0) &&
10245 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10246 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10247 (asoc->sent_queue_retran_cnt == 0)) {
10248 /* Nothing to do unless there is something to be sent left */
10251 /* Do we have something to send, data or control AND
10252 * a sack timer running, if so piggy-back the sack.
10254 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10255 sctp_send_sack(stcb, so_locked);
10256 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10258 while (asoc->sent_queue_retran_cnt) {
10260 * Ok, it is retransmission time only, we send out only ONE
10261 * packet with a single call off to the retran code.
10263 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10265 * Special hook for handling cookiess discarded
10266 * by peer that carried data. Send cookie-ack only
10267 * and then the next call with get the retran's.
10269 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10271 &now, &now_filled, frag_point, so_locked);
10273 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10274 /* if its not from a HB then do it */
10276 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10282 * its from any other place, we don't allow retran
10283 * output (only control)
10288 /* Can't send anymore */
10290 * now lets push out control by calling med-level
10291 * output once. this assures that we WILL send HB's
10294 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10296 &now, &now_filled, frag_point, so_locked);
10297 #ifdef SCTP_AUDITING_ENABLED
10298 sctp_auditing(8, inp, stcb, NULL);
10300 sctp_timer_validation(inp, stcb, asoc);
10305 * The count was off.. retran is not happening so do
10306 * the normal retransmission.
10308 #ifdef SCTP_AUDITING_ENABLED
10309 sctp_auditing(9, inp, stcb, NULL);
10311 if (ret == SCTP_RETRAN_EXIT) {
10316 if (from_where == SCTP_OUTPUT_FROM_T3) {
10317 /* Only one transmission allowed out of a timeout */
10318 #ifdef SCTP_AUDITING_ENABLED
10319 sctp_auditing(10, inp, stcb, NULL);
10321 /* Push out any control */
10322 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10323 &now, &now_filled, frag_point, so_locked);
10326 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10327 /* Hit FR burst limit */
10330 if ((num_out == 0) && (ret == 0)) {
10331 /* No more retrans to send */
10335 #ifdef SCTP_AUDITING_ENABLED
10336 sctp_auditing(12, inp, stcb, NULL);
10338 /* Check for bad destinations, if they exist move chunks around. */
10339 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10340 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10342 * if possible move things off of this address we
10343 * still may send below due to the dormant state but
10344 * we try to find an alternate address to send to
10345 * and if we have one we move all queued data on the
10346 * out wheel to this alternate address.
10348 if (net->ref_count > 1)
10349 sctp_move_chunks_from_net(stcb, net);
10352 * if ((asoc->sat_network) || (net->addr_is_local))
10353 * { burst_limit = asoc->max_burst *
10354 * SCTP_SAT_NETWORK_BURST_INCR; }
10356 if (asoc->max_burst > 0) {
10357 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10358 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10359 /* JRS - Use the congestion control given in the congestion control module */
10360 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10361 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10362 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10364 SCTP_STAT_INCR(sctps_maxburstqueued);
10366 net->fast_retran_ip = 0;
10368 if (net->flight_size == 0) {
10369 /* Should be decaying the cwnd here */
10379 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10380 &reason_code, 0, from_where,
10381 &now, &now_filled, frag_point, so_locked);
10383 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10384 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10385 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10388 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10389 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10393 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10395 tot_out += num_out;
10397 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10398 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10399 if (num_out == 0) {
10400 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10405 * When the Nagle algorithm is used, look at how much
10406 * is unsent, then if its smaller than an MTU and we
10407 * have data in flight we stop, except if we are
10408 * handling a fragmented user message.
10410 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10411 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10412 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10413 (stcb->asoc.total_flight > 0) &&
10414 ((stcb->asoc.locked_on_sending == NULL) ||
10415 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10419 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10420 TAILQ_EMPTY(&asoc->send_queue) &&
10421 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10422 /* Nothing left to send */
10425 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10426 /* Nothing left to send */
10429 } while (num_out &&
10430 ((asoc->max_burst == 0) ||
10431 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10432 (burst_cnt < asoc->max_burst)));
10434 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10435 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10436 SCTP_STAT_INCR(sctps_maxburstqueued);
10437 asoc->burst_limit_applied = 1;
10438 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10439 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10442 asoc->burst_limit_applied = 0;
10445 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10446 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10448 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10452 * Now we need to clean up the control chunk chain if a ECNE is on
10453 * it. It must be marked as UNSENT again so next call will continue
10454 * to send it until such time that we get a CWR, to remove it.
10456 if (stcb->asoc.ecn_echo_cnt_onq)
10457 sctp_fix_ecn_echo(asoc);
10464 struct sctp_inpcb *inp,
10465 #if defined(__Panda__)
10470 struct sockaddr *addr,
10471 #if defined(__Panda__)
10472 pakhandle_type control,
10474 struct mbuf *control,
10476 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
10478 #elif defined(__Windows__)
10481 #if defined(__APPLE__)
10482 struct proc *p SCTP_UNUSED,
10490 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10494 if (inp->sctp_socket == NULL) {
10495 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10498 return (sctp_sosend(inp->sctp_socket,
10500 (struct uio *)NULL,
10503 #if defined(__APPLE__) || defined(__Panda__)
10512 send_forward_tsn(struct sctp_tcb *stcb,
10513 struct sctp_association *asoc)
10515 struct sctp_tmit_chunk *chk;
10516 struct sctp_forward_tsn_chunk *fwdtsn;
10517 uint32_t advance_peer_ack_point;
10519 SCTP_TCB_LOCK_ASSERT(stcb);
10520 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10521 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10522 /* mark it to unsent */
10523 chk->sent = SCTP_DATAGRAM_UNSENT;
10524 chk->snd_count = 0;
10525 /* Do we correct its output location? */
10527 sctp_free_remote_addr(chk->whoTo);
10530 goto sctp_fill_in_rest;
10533 /* Ok if we reach here we must build one */
10534 sctp_alloc_a_chunk(stcb, chk);
10538 asoc->fwd_tsn_cnt++;
10539 chk->copy_by_ref = 0;
10540 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10541 chk->rec.chunk_id.can_take_data = 0;
10544 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10545 if (chk->data == NULL) {
10546 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10549 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10550 chk->sent = SCTP_DATAGRAM_UNSENT;
10551 chk->snd_count = 0;
10552 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10553 asoc->ctrl_queue_cnt++;
10556 * Here we go through and fill out the part that deals with
10557 * stream/seq of the ones we skip.
10559 SCTP_BUF_LEN(chk->data) = 0;
10561 struct sctp_tmit_chunk *at, *tp1, *last;
10562 struct sctp_strseq *strseq;
10563 unsigned int cnt_of_space, i, ovh;
10564 unsigned int space_needed;
10565 unsigned int cnt_of_skipped = 0;
10567 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10568 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10569 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10570 /* no more to look at */
10573 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10574 /* We don't report these */
10579 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10580 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10582 cnt_of_space = M_TRAILINGSPACE(chk->data);
10584 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10585 ovh = SCTP_MIN_OVERHEAD;
10587 ovh = SCTP_MIN_V4_OVERHEAD;
10589 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10590 /* trim to a mtu size */
10591 cnt_of_space = asoc->smallest_mtu - ovh;
10593 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10594 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10595 0xff, 0, cnt_of_skipped,
10596 asoc->advanced_peer_ack_point);
10599 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10600 if (cnt_of_space < space_needed) {
10602 * ok we must trim down the chunk by lowering the
10603 * advance peer ack point.
10605 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10606 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10607 0xff, 0xff, cnt_of_space,
10610 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10611 cnt_of_skipped /= sizeof(struct sctp_strseq);
10613 * Go through and find the TSN that will be the one
10616 at = TAILQ_FIRST(&asoc->sent_queue);
10618 for (i = 0; i < cnt_of_skipped; i++) {
10619 tp1 = TAILQ_NEXT(at, sctp_next);
10626 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10627 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10628 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10629 asoc->advanced_peer_ack_point);
10633 * last now points to last one I can report, update
10637 advance_peer_ack_point = last->rec.data.TSN_seq;
10638 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10639 cnt_of_skipped * sizeof(struct sctp_strseq);
10641 chk->send_size = space_needed;
10642 /* Setup the chunk */
10643 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10644 fwdtsn->ch.chunk_length = htons(chk->send_size);
10645 fwdtsn->ch.chunk_flags = 0;
10646 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10647 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10648 SCTP_BUF_LEN(chk->data) = chk->send_size;
10651 * Move pointer to after the fwdtsn and transfer to the
10654 strseq = (struct sctp_strseq *)fwdtsn;
10656 * Now populate the strseq list. This is done blindly
10657 * without pulling out duplicate stream info. This is
10658 * inefficent but won't harm the process since the peer will
10659 * look at these in sequence and will thus release anything.
10660 * It could mean we exceed the PMTU and chop off some that
10661 * we could have included.. but this is unlikely (aka 1432/4
10662 * would mean 300+ stream seq's would have to be reported in
10663 * one FWD-TSN. With a bit of work we can later FIX this to
10664 * optimize and pull out duplcates.. but it does add more
10665 * overhead. So for now... not!
10667 at = TAILQ_FIRST(&asoc->sent_queue);
10668 for (i = 0; i < cnt_of_skipped; i++) {
10669 tp1 = TAILQ_NEXT(at, sctp_next);
10672 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10673 /* We don't report these */
10678 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10679 at->rec.data.fwd_tsn_cnt = 0;
10681 strseq->stream = ntohs(at->rec.data.stream_number);
10682 strseq->sequence = ntohs(at->rec.data.stream_seq);
10691 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10692 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10698 * Queue up a SACK or NR-SACK in the control queue.
10699 * We must first check to see if a SACK or NR-SACK is
10700 * somehow on the control queue.
10701 * If so, we will take and and remove the old one.
10703 struct sctp_association *asoc;
10704 struct sctp_tmit_chunk *chk, *a_chk;
10705 struct sctp_sack_chunk *sack;
10706 struct sctp_nr_sack_chunk *nr_sack;
10707 struct sctp_gap_ack_block *gap_descriptor;
10708 struct sack_track *selector;
10713 int limit_reached = 0;
10714 unsigned int i, siz, j;
10715 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10718 uint32_t highest_tsn;
10723 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10724 (stcb->asoc.peer_supports_nr_sack == 1)) {
10725 type = SCTP_NR_SELECTIVE_ACK;
10727 type = SCTP_SELECTIVE_ACK;
10730 asoc = &stcb->asoc;
10731 SCTP_TCB_LOCK_ASSERT(stcb);
10732 if (asoc->last_data_chunk_from == NULL) {
10733 /* Hmm we never received anything */
10736 sctp_slide_mapping_arrays(stcb);
10737 sctp_set_rwnd(stcb, asoc);
10738 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10739 if (chk->rec.chunk_id.id == type) {
10740 /* Hmm, found a sack already on queue, remove it */
10741 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10742 asoc->ctrl_queue_cnt--;
10745 sctp_m_freem(a_chk->data);
10746 a_chk->data = NULL;
10748 if (a_chk->whoTo) {
10749 sctp_free_remote_addr(a_chk->whoTo);
10750 a_chk->whoTo = NULL;
10755 if (a_chk == NULL) {
10756 sctp_alloc_a_chunk(stcb, a_chk);
10757 if (a_chk == NULL) {
10758 /* No memory so we drop the idea, and set a timer */
10759 if (stcb->asoc.delayed_ack) {
10760 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10761 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10762 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10763 stcb->sctp_ep, stcb, NULL);
10765 stcb->asoc.send_sack = 1;
10769 a_chk->copy_by_ref = 0;
10770 a_chk->rec.chunk_id.id = type;
10771 a_chk->rec.chunk_id.can_take_data = 1;
10773 /* Clear our pkt counts */
10774 asoc->data_pkts_seen = 0;
10776 a_chk->asoc = asoc;
10777 a_chk->snd_count = 0;
10778 a_chk->send_size = 0; /* fill in later */
10779 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10780 a_chk->whoTo = NULL;
10782 if ((asoc->numduptsns) ||
10783 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10785 * Ok, we have some duplicates or the destination for the
10786 * sack is unreachable, lets see if we can select an
10787 * alternate than asoc->last_data_chunk_from
10789 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10790 (asoc->used_alt_onsack > asoc->numnets)) {
10791 /* We used an alt last time, don't this time */
10792 a_chk->whoTo = NULL;
10794 asoc->used_alt_onsack++;
10795 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10797 if (a_chk->whoTo == NULL) {
10798 /* Nope, no alternate */
10799 a_chk->whoTo = asoc->last_data_chunk_from;
10800 asoc->used_alt_onsack = 0;
10804 * No duplicates so we use the last place we received data
10807 asoc->used_alt_onsack = 0;
10808 a_chk->whoTo = asoc->last_data_chunk_from;
10810 if (a_chk->whoTo) {
10811 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10813 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10814 highest_tsn = asoc->highest_tsn_inside_map;
10816 highest_tsn = asoc->highest_tsn_inside_nr_map;
10818 if (highest_tsn == asoc->cumulative_tsn) {
10820 if (type == SCTP_SELECTIVE_ACK) {
10821 space_req = sizeof(struct sctp_sack_chunk);
10823 space_req = sizeof(struct sctp_nr_sack_chunk);
10826 /* gaps get a cluster */
10827 space_req = MCLBYTES;
10829 /* Ok now lets formulate a MBUF with our sack */
10830 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10831 if ((a_chk->data == NULL) ||
10832 (a_chk->whoTo == NULL)) {
10833 /* rats, no mbuf memory */
10835 /* was a problem with the destination */
10836 sctp_m_freem(a_chk->data);
10837 a_chk->data = NULL;
10839 sctp_free_a_chunk(stcb, a_chk, so_locked);
10840 /* sa_ignore NO_NULL_CHK */
10841 if (stcb->asoc.delayed_ack) {
10842 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10843 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10844 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10845 stcb->sctp_ep, stcb, NULL);
10847 stcb->asoc.send_sack = 1;
10851 /* ok, lets go through and fill it in */
10852 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10853 space = M_TRAILINGSPACE(a_chk->data);
10854 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10855 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10857 limit = mtod(a_chk->data, caddr_t);
10862 if ((asoc->sctp_cmt_on_off > 0) &&
10863 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10865 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10866 * received, then set high bit to 1, else 0. Reset
10869 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10870 asoc->cmt_dac_pkts_rcvd = 0;
10872 #ifdef SCTP_ASOCLOG_OF_TSNS
10873 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10874 stcb->asoc.cumack_log_atsnt++;
10875 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10876 stcb->asoc.cumack_log_atsnt = 0;
10879 /* reset the readers interpretation */
10880 stcb->freed_by_sorcv_sincelast = 0;
10882 if (type == SCTP_SELECTIVE_ACK) {
10883 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10885 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10886 if (highest_tsn > asoc->mapping_array_base_tsn) {
10887 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10889 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10893 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10894 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10895 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10896 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10898 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10902 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10905 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10907 if (((type == SCTP_SELECTIVE_ACK) &&
10908 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10909 ((type == SCTP_NR_SELECTIVE_ACK) &&
10910 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10911 /* we have a gap .. maybe */
10912 for (i = 0; i < siz; i++) {
10913 tsn_map = asoc->mapping_array[i];
10914 if (type == SCTP_SELECTIVE_ACK) {
10915 tsn_map |= asoc->nr_mapping_array[i];
10919 * Clear all bits corresponding to TSNs
10920 * smaller or equal to the cumulative TSN.
10922 tsn_map &= (~0 << (1 - offset));
10924 selector = &sack_array[tsn_map];
10925 if (mergeable && selector->right_edge) {
10927 * Backup, left and right edges were ok to
10933 if (selector->num_entries == 0)
10936 for (j = 0; j < selector->num_entries; j++) {
10937 if (mergeable && selector->right_edge) {
10939 * do a merge by NOT setting
10945 * no merge, set the left
10949 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10951 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10954 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10960 if (selector->left_edge) {
10964 if (limit_reached) {
10965 /* Reached the limit stop */
10971 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10972 (limit_reached == 0)) {
10976 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10977 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10979 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10982 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10985 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10987 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10988 /* we have a gap .. maybe */
10989 for (i = 0; i < siz; i++) {
10990 tsn_map = asoc->nr_mapping_array[i];
10993 * Clear all bits corresponding to TSNs
10994 * smaller or equal to the cumulative TSN.
10996 tsn_map &= (~0 << (1 - offset));
10998 selector = &sack_array[tsn_map];
10999 if (mergeable && selector->right_edge) {
11001 * Backup, left and right edges were ok to
11004 num_nr_gap_blocks--;
11007 if (selector->num_entries == 0)
11010 for (j = 0; j < selector->num_entries; j++) {
11011 if (mergeable && selector->right_edge) {
11013 * do a merge by NOT setting
11019 * no merge, set the left
11023 gap_descriptor->start = htons((selector->gaps[j].start + offset));
11025 gap_descriptor->end = htons((selector->gaps[j].end + offset));
11026 num_nr_gap_blocks++;
11028 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11034 if (selector->left_edge) {
11038 if (limit_reached) {
11039 /* Reached the limit stop */
11046 /* now we must add any dups we are going to report. */
11047 if ((limit_reached == 0) && (asoc->numduptsns)) {
11048 dup = (uint32_t *) gap_descriptor;
11049 for (i = 0; i < asoc->numduptsns; i++) {
11050 *dup = htonl(asoc->dup_tsns[i]);
11053 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11058 asoc->numduptsns = 0;
11061 * now that the chunk is prepared queue it to the control chunk
11064 if (type == SCTP_SELECTIVE_ACK) {
11065 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
11066 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11067 num_dups * sizeof(int32_t);
11068 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11069 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11070 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11071 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11072 sack->sack.num_dup_tsns = htons(num_dups);
11073 sack->ch.chunk_type = type;
11074 sack->ch.chunk_flags = flags;
11075 sack->ch.chunk_length = htons(a_chk->send_size);
11077 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
11078 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11079 num_dups * sizeof(int32_t);
11080 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11081 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11082 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11083 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11084 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11085 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11086 nr_sack->nr_sack.reserved = 0;
11087 nr_sack->ch.chunk_type = type;
11088 nr_sack->ch.chunk_flags = flags;
11089 nr_sack->ch.chunk_length = htons(a_chk->send_size);
11091 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11092 asoc->my_last_reported_rwnd = asoc->my_rwnd;
11093 asoc->ctrl_queue_cnt++;
11094 asoc->send_sack = 0;
11095 SCTP_STAT_INCR(sctps_sendsacks);
11100 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
11101 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11106 struct mbuf *m_abort, *m, *m_last;
11107 struct mbuf *m_out, *m_end = NULL;
11108 struct sctp_abort_chunk *abort;
11109 struct sctp_auth_chunk *auth = NULL;
11110 struct sctp_nets *net;
11112 uint32_t auth_offset = 0;
11113 uint16_t cause_len, chunk_len, padding_len;
11115 #if defined(__APPLE__)
11117 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11119 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11122 SCTP_TCB_LOCK_ASSERT(stcb);
11124 * Add an AUTH chunk, if chunk requires it and save the offset into
11125 * the chain for AUTH
11127 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11128 stcb->asoc.peer_auth_chunks)) {
11129 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11130 stcb, SCTP_ABORT_ASSOCIATION);
11131 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11135 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11136 if (m_abort == NULL) {
11138 sctp_m_freem(m_out);
11141 sctp_m_freem(operr);
11145 /* link in any error */
11146 SCTP_BUF_NEXT(m_abort) = operr;
11149 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11150 cause_len += (uint16_t)SCTP_BUF_LEN(m);
11151 if (SCTP_BUF_NEXT(m) == NULL) {
11155 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11156 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11157 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11158 if (m_out == NULL) {
11159 /* NO Auth chunk prepended, so reserve space in front */
11160 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11163 /* Put AUTH chunk at the front of the chain */
11164 SCTP_BUF_NEXT(m_end) = m_abort;
11166 if (stcb->asoc.alternate) {
11167 net = stcb->asoc.alternate;
11169 net = stcb->asoc.primary_destination;
11171 /* Fill in the ABORT chunk header. */
11172 abort = mtod(m_abort, struct sctp_abort_chunk *);
11173 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11174 if (stcb->asoc.peer_vtag == 0) {
11175 /* This happens iff the assoc is in COOKIE-WAIT state. */
11176 vtag = stcb->asoc.my_vtag;
11177 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11179 vtag = stcb->asoc.peer_vtag;
11180 abort->ch.chunk_flags = 0;
11182 abort->ch.chunk_length = htons(chunk_len);
11183 /* Add padding, if necessary. */
11184 if (padding_len > 0) {
11185 if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) {
11186 sctp_m_freem(m_out);
11190 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11191 (struct sockaddr *)&net->ro._l_addr,
11192 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11193 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11194 stcb->asoc.primary_destination->port, NULL,
11195 #if defined(__FreeBSD__)
11199 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11203 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11204 struct sctp_nets *net,
11207 /* formulate and SEND a SHUTDOWN-COMPLETE */
11208 struct mbuf *m_shutdown_comp;
11209 struct sctp_shutdown_complete_chunk *shutdown_complete;
11213 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11214 if (m_shutdown_comp == NULL) {
11218 if (reflect_vtag) {
11219 flags = SCTP_HAD_NO_TCB;
11220 vtag = stcb->asoc.my_vtag;
11223 vtag = stcb->asoc.peer_vtag;
11225 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11226 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11227 shutdown_complete->ch.chunk_flags = flags;
11228 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11229 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11230 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11231 (struct sockaddr *)&net->ro._l_addr,
11232 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11233 stcb->sctp_ep->sctp_lport, stcb->rport,
11236 #if defined(__FreeBSD__)
11239 SCTP_SO_NOT_LOCKED);
11240 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11244 #if defined(__FreeBSD__)
11246 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11247 struct sctphdr *sh, uint32_t vtag,
11248 uint8_t type, struct mbuf *cause,
11249 uint8_t use_mflowid, uint32_t mflowid,
11250 uint32_t vrf_id, uint16_t port)
11253 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11254 struct sctphdr *sh, uint32_t vtag,
11255 uint8_t type, struct mbuf *cause,
11256 uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11260 pakhandle_type o_pak;
11262 struct mbuf *o_pak;
11265 struct sctphdr *shout;
11266 struct sctp_chunkhdr *ch;
11267 struct udphdr *udp;
11268 int len, cause_len, padding_len;
11269 #if defined(INET) || defined(INET6)
11273 #if defined(__APPLE__) || defined(__Panda__)
11276 struct sockaddr_in *src_sin, *dst_sin;
11280 struct sockaddr_in6 *src_sin6, *dst_sin6;
11281 struct ip6_hdr *ip6;
11284 /* Compute the length of the cause and add final padding. */
11286 if (cause != NULL) {
11287 struct mbuf *m_at, *m_last = NULL;
11289 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11290 if (SCTP_BUF_NEXT(m_at) == NULL)
11292 cause_len += SCTP_BUF_LEN(m_at);
11294 padding_len = cause_len % 4;
11295 if (padding_len != 0) {
11296 padding_len = 4 - padding_len;
11298 if (padding_len != 0) {
11299 if (sctp_add_pad_tombuf(m_last, padding_len)) {
11300 sctp_m_freem(cause);
11307 /* Get an mbuf for the header. */
11308 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11309 switch (dst->sa_family) {
11312 len += sizeof(struct ip);
11317 len += sizeof(struct ip6_hdr);
11324 len += sizeof(struct udphdr);
11326 #if defined(__APPLE__)
11327 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11328 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11330 mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11333 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11335 if (mout == NULL) {
11337 sctp_m_freem(cause);
11341 #if defined(__APPLE__)
11342 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11343 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11345 SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11348 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11350 SCTP_BUF_LEN(mout) = len;
11351 SCTP_BUF_NEXT(mout) = cause;
11352 #if defined(__FreeBSD__)
11353 if (use_mflowid != 0) {
11354 mout->m_pkthdr.flowid = mflowid;
11355 mout->m_flags |= M_FLOWID;
11364 switch (dst->sa_family) {
11367 src_sin = (struct sockaddr_in *)src;
11368 dst_sin = (struct sockaddr_in *)dst;
11369 ip = mtod(mout, struct ip *);
11370 ip->ip_v = IPVERSION;
11371 ip->ip_hl = (sizeof(struct ip) >> 2);
11373 #if defined(__FreeBSD__)
11374 ip->ip_id = ip_newid();
11375 #elif defined(__APPLE__)
11377 ip->ip_id = ip_randomid();
11379 ip->ip_id = htons(ip_id++);
11382 ip->ip_id = htons(ip_id++);
11385 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11387 ip->ip_p = IPPROTO_UDP;
11389 ip->ip_p = IPPROTO_SCTP;
11391 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11392 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11394 len = sizeof(struct ip);
11395 shout = (struct sctphdr *)((caddr_t)ip + len);
11400 src_sin6 = (struct sockaddr_in6 *)src;
11401 dst_sin6 = (struct sockaddr_in6 *)dst;
11402 ip6 = mtod(mout, struct ip6_hdr *);
11403 ip6->ip6_flow = htonl(0x60000000);
11404 #if defined(__FreeBSD__)
11405 if (V_ip6_auto_flowlabel) {
11406 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11409 #if defined(__Userspace__)
11410 ip6->ip6_hlim = IPv6_HOP_LIMIT;
11412 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11415 ip6->ip6_nxt = IPPROTO_UDP;
11417 ip6->ip6_nxt = IPPROTO_SCTP;
11419 ip6->ip6_src = dst_sin6->sin6_addr;
11420 ip6->ip6_dst = src_sin6->sin6_addr;
11421 len = sizeof(struct ip6_hdr);
11422 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11427 shout = mtod(mout, struct sctphdr *);
11431 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11432 sctp_m_freem(mout);
11435 udp = (struct udphdr *)shout;
11436 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11437 udp->uh_dport = port;
11439 udp->uh_ulen = htons(sizeof(struct udphdr) +
11440 sizeof(struct sctphdr) +
11441 sizeof(struct sctp_chunkhdr) +
11442 cause_len + padding_len);
11443 len += sizeof(struct udphdr);
11444 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11448 shout->src_port = sh->dest_port;
11449 shout->dest_port = sh->src_port;
11450 shout->checksum = 0;
11452 shout->v_tag = htonl(vtag);
11454 shout->v_tag = sh->v_tag;
11456 len += sizeof(struct sctphdr);
11457 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11458 ch->chunk_type = type;
11460 ch->chunk_flags = 0;
11462 ch->chunk_flags = SCTP_HAD_NO_TCB;
11464 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
11465 len += sizeof(struct sctp_chunkhdr);
11466 len += cause_len + padding_len;
11468 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11469 sctp_m_freem(mout);
11472 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11473 switch (dst->sa_family) {
11476 #if defined(__APPLE__) || defined(__Panda__)
11477 /* zap the stack pointer to the route */
11478 bzero(&ro, sizeof(sctp_route_t));
11479 #if defined(__Panda__)
11480 ro._l_addr.sa.sa_family = AF_INET;
11484 #if !defined(__Windows__) && !defined(__Userspace__)
11485 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11487 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11492 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11498 #if defined(__FreeBSD__)
11499 #if __FreeBSD_version >= 1000000
11500 ip->ip_len = htons(len);
11504 #elif defined(__APPLE__) || defined(__Userspace__)
11507 ip->ip_len = htons(len);
11510 #if defined(SCTP_WITH_NO_CSUM)
11511 SCTP_STAT_INCR(sctps_sendnocrc);
11513 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11514 SCTP_STAT_INCR(sctps_sendswcrc);
11516 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11518 SCTP_ENABLE_UDP_CSUM(o_pak);
11521 SCTP_ENABLE_UDP_CSUM(o_pak);
11524 #if defined(SCTP_WITH_NO_CSUM)
11525 SCTP_STAT_INCR(sctps_sendnocrc);
11527 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
11528 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11529 mout->m_pkthdr.csum_data = 0;
11530 SCTP_STAT_INCR(sctps_sendhwcrc);
11532 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11533 SCTP_STAT_INCR(sctps_sendswcrc);
11537 #ifdef SCTP_PACKET_LOGGING
11538 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11539 sctp_packet_log(o_pak);
11542 #if defined(__APPLE__) || defined(__Panda__)
11543 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11544 /* Free the route if we got one back */
11550 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11556 ip6->ip6_plen = len - sizeof(struct ip6_hdr);
11558 #if defined(SCTP_WITH_NO_CSUM)
11559 SCTP_STAT_INCR(sctps_sendnocrc);
11561 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11562 SCTP_STAT_INCR(sctps_sendswcrc);
11564 #if defined(__Windows__)
11566 #elif !defined(__Userspace__)
11567 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11568 udp->uh_sum = 0xffff;
11572 #if defined(SCTP_WITH_NO_CSUM)
11573 SCTP_STAT_INCR(sctps_sendnocrc);
11575 #if defined(__FreeBSD__) && __FreeBSD_version >= 900000
11576 #if __FreeBSD_version > 901000
11577 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11579 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11581 mout->m_pkthdr.csum_data = 0;
11582 SCTP_STAT_INCR(sctps_sendhwcrc);
11584 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
11585 SCTP_STAT_INCR(sctps_sendswcrc);
11589 #ifdef SCTP_PACKET_LOGGING
11590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11591 sctp_packet_log(o_pak);
11594 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11597 #if defined(__Userspace__)
11601 struct sockaddr_conn *sconn;
11603 sconn = (struct sockaddr_conn *)src;
11604 #if defined(SCTP_WITH_NO_CSUM)
11605 SCTP_STAT_INCR(sctps_sendnocrc);
11607 shout->checksum = sctp_calculate_cksum(mout, 0);
11608 SCTP_STAT_INCR(sctps_sendswcrc);
11610 #ifdef SCTP_PACKET_LOGGING
11611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11612 sctp_packet_log(mout);
11615 /* Don't alloc/free for each packet */
11616 if ((buffer = malloc(len)) != NULL) {
11617 m_copydata(mout, 0, len, buffer);
11618 SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
11621 sctp_m_freem(mout);
11626 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11628 sctp_m_freem(mout);
11629 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11632 SCTP_STAT_INCR(sctps_sendpackets);
11633 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11634 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11639 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11640 struct sctphdr *sh,
11641 #if defined(__FreeBSD__)
11642 uint8_t use_mflowid, uint32_t mflowid,
11644 uint32_t vrf_id, uint16_t port)
11646 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11647 #if defined(__FreeBSD__)
11648 use_mflowid, mflowid,
11654 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked
11655 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11660 struct sctp_tmit_chunk *chk;
11661 struct sctp_heartbeat_chunk *hb;
11662 struct timeval now;
11664 SCTP_TCB_LOCK_ASSERT(stcb);
11668 (void)SCTP_GETTIME_TIMEVAL(&now);
11669 switch (net->ro._l_addr.sa.sa_family) {
11678 #if defined(__Userspace__)
11685 sctp_alloc_a_chunk(stcb, chk);
11687 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11691 chk->copy_by_ref = 0;
11692 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11693 chk->rec.chunk_id.can_take_data = 1;
11694 chk->asoc = &stcb->asoc;
11695 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11697 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11698 if (chk->data == NULL) {
11699 sctp_free_a_chunk(stcb, chk, so_locked);
11702 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11703 SCTP_BUF_LEN(chk->data) = chk->send_size;
11704 chk->sent = SCTP_DATAGRAM_UNSENT;
11705 chk->snd_count = 0;
11707 atomic_add_int(&chk->whoTo->ref_count, 1);
11708 /* Now we have a mbuf that we can fill in with the details */
11709 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11710 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11711 /* fill out chunk header */
11712 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11713 hb->ch.chunk_flags = 0;
11714 hb->ch.chunk_length = htons(chk->send_size);
11715 /* Fill out hb parameter */
11716 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11717 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11718 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11719 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11720 /* Did our user request this one, put it in */
11721 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11723 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11725 switch (net->ro._l_addr.sa.sa_family) {
11728 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
11733 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
11736 #if defined(__Userspace__)
11738 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
11742 hb->heartbeat.hb_info.addr_len = 0;
11746 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11748 * we only take from the entropy pool if the address is not
11751 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11752 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11754 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11755 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11757 switch (net->ro._l_addr.sa.sa_family) {
11760 memcpy(hb->heartbeat.hb_info.address,
11761 &net->ro._l_addr.sin.sin_addr,
11762 sizeof(net->ro._l_addr.sin.sin_addr));
11767 memcpy(hb->heartbeat.hb_info.address,
11768 &net->ro._l_addr.sin6.sin6_addr,
11769 sizeof(net->ro._l_addr.sin6.sin6_addr));
11772 #if defined(__Userspace__)
11774 memcpy(hb->heartbeat.hb_info.address,
11775 &net->ro._l_addr.sconn.sconn_addr,
11776 sizeof(net->ro._l_addr.sconn.sconn_addr));
11783 net->hb_responded = 0;
11784 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11785 stcb->asoc.ctrl_queue_cnt++;
11786 SCTP_STAT_INCR(sctps_sendheartbeat);
11791 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11794 struct sctp_association *asoc;
11795 struct sctp_ecne_chunk *ecne;
11796 struct sctp_tmit_chunk *chk;
11801 asoc = &stcb->asoc;
11802 SCTP_TCB_LOCK_ASSERT(stcb);
11803 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11804 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11805 /* found a previous ECN_ECHO update it if needed */
11806 uint32_t cnt, ctsn;
11807 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11808 ctsn = ntohl(ecne->tsn);
11809 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11810 ecne->tsn = htonl(high_tsn);
11811 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11813 cnt = ntohl(ecne->num_pkts_since_cwr);
11815 ecne->num_pkts_since_cwr = htonl(cnt);
11819 /* nope could not find one to update so we must build one */
11820 sctp_alloc_a_chunk(stcb, chk);
11824 chk->copy_by_ref = 0;
11825 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11826 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11827 chk->rec.chunk_id.can_take_data = 0;
11828 chk->asoc = &stcb->asoc;
11829 chk->send_size = sizeof(struct sctp_ecne_chunk);
11830 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11831 if (chk->data == NULL) {
11832 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11835 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11836 SCTP_BUF_LEN(chk->data) = chk->send_size;
11837 chk->sent = SCTP_DATAGRAM_UNSENT;
11838 chk->snd_count = 0;
11840 atomic_add_int(&chk->whoTo->ref_count, 1);
11842 stcb->asoc.ecn_echo_cnt_onq++;
11843 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11844 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11845 ecne->ch.chunk_flags = 0;
11846 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11847 ecne->tsn = htonl(high_tsn);
11848 ecne->num_pkts_since_cwr = htonl(1);
11849 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11850 asoc->ctrl_queue_cnt++;
11854 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11855 struct mbuf *m, int len, int iphlen, int bad_crc)
11857 struct sctp_association *asoc;
11858 struct sctp_pktdrop_chunk *drp;
11859 struct sctp_tmit_chunk *chk;
11865 struct sctp_chunkhdr *ch, chunk_buf;
11866 unsigned int chk_length;
11871 asoc = &stcb->asoc;
11872 SCTP_TCB_LOCK_ASSERT(stcb);
11873 if (asoc->peer_supports_pktdrop == 0) {
11875 * peer must declare support before I send one.
11879 if (stcb->sctp_socket == NULL) {
11882 sctp_alloc_a_chunk(stcb, chk);
11886 chk->copy_by_ref = 0;
11888 chk->send_size = len;
11889 /* Validate that we do not have an ABORT in here. */
11890 offset = iphlen + sizeof(struct sctphdr);
11891 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11892 sizeof(*ch), (uint8_t *) & chunk_buf);
11893 while (ch != NULL) {
11894 chk_length = ntohs(ch->chunk_length);
11895 if (chk_length < sizeof(*ch)) {
11896 /* break to abort land */
11899 switch (ch->chunk_type) {
11900 case SCTP_PACKET_DROPPED:
11901 case SCTP_ABORT_ASSOCIATION:
11902 case SCTP_INITIATION_ACK:
11904 * We don't respond with an PKT-DROP to an ABORT
11905 * or PKT-DROP. We also do not respond to an
11906 * INIT-ACK, because we can't know if the initiation
11907 * tag is correct or not.
11909 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11914 offset += SCTP_SIZE32(chk_length);
11915 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11916 sizeof(*ch), (uint8_t *) & chunk_buf);
11919 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11920 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11921 /* only send 1 mtu worth, trim off the
11922 * excess on the end.
11925 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11928 chk->asoc = &stcb->asoc;
11929 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11930 if (chk->data == NULL) {
11932 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11935 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11936 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11938 sctp_m_freem(chk->data);
11942 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11943 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11944 chk->book_size_scale = 0;
11946 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11947 drp->trunc_len = htons(fullsz);
11948 /* Len is already adjusted to size minus overhead above
11949 * take out the pkt_drop chunk itself from it.
11951 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11952 len = chk->send_size;
11954 /* no truncation needed */
11955 drp->ch.chunk_flags = 0;
11956 drp->trunc_len = htons(0);
11959 drp->ch.chunk_flags |= SCTP_BADCRC;
11961 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11962 SCTP_BUF_LEN(chk->data) = chk->send_size;
11963 chk->sent = SCTP_DATAGRAM_UNSENT;
11964 chk->snd_count = 0;
11966 /* we should hit here */
11968 atomic_add_int(&chk->whoTo->ref_count, 1);
11972 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11973 chk->rec.chunk_id.can_take_data = 1;
11974 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11975 drp->ch.chunk_length = htons(chk->send_size);
11976 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11980 drp->bottle_bw = htonl(spc);
11981 if (asoc->my_rwnd) {
11982 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11983 asoc->size_on_all_streams +
11984 asoc->my_rwnd_control_len +
11985 stcb->sctp_socket->so_rcv.sb_cc);
11988 * If my rwnd is 0, possibly from mbuf depletion as well as
11989 * space used, tell the peer there is NO space aka onq == bw
11991 drp->current_onq = htonl(spc);
11995 m_copydata(m, iphlen, len, (caddr_t)datap);
11996 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11997 asoc->ctrl_queue_cnt++;
12001 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12003 struct sctp_association *asoc;
12004 struct sctp_cwr_chunk *cwr;
12005 struct sctp_tmit_chunk *chk;
12007 SCTP_TCB_LOCK_ASSERT(stcb);
12011 asoc = &stcb->asoc;
12012 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12013 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12014 /* found a previous CWR queued to same destination update it if needed */
12016 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12017 ctsn = ntohl(cwr->tsn);
12018 if (SCTP_TSN_GT(high_tsn, ctsn)) {
12019 cwr->tsn = htonl(high_tsn);
12021 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12022 /* Make sure override is carried */
12023 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12028 sctp_alloc_a_chunk(stcb, chk);
12032 chk->copy_by_ref = 0;
12033 chk->rec.chunk_id.id = SCTP_ECN_CWR;
12034 chk->rec.chunk_id.can_take_data = 1;
12035 chk->asoc = &stcb->asoc;
12036 chk->send_size = sizeof(struct sctp_cwr_chunk);
12037 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12038 if (chk->data == NULL) {
12039 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12042 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12043 SCTP_BUF_LEN(chk->data) = chk->send_size;
12044 chk->sent = SCTP_DATAGRAM_UNSENT;
12045 chk->snd_count = 0;
12047 atomic_add_int(&chk->whoTo->ref_count, 1);
12048 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12049 cwr->ch.chunk_type = SCTP_ECN_CWR;
12050 cwr->ch.chunk_flags = override;
12051 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12052 cwr->tsn = htonl(high_tsn);
12053 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12054 asoc->ctrl_queue_cnt++;
12058 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
12059 int number_entries, uint16_t * list,
12060 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12062 uint16_t len, old_len, i;
12063 struct sctp_stream_reset_out_request *req_out;
12064 struct sctp_chunkhdr *ch;
12066 ch = mtod(chk->data, struct sctp_chunkhdr *);
12067 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12069 /* get to new offset for the param. */
12070 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12071 /* now how long will this param be? */
12072 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12073 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12074 req_out->ph.param_length = htons(len);
12075 req_out->request_seq = htonl(seq);
12076 req_out->response_seq = htonl(resp_seq);
12077 req_out->send_reset_at_tsn = htonl(last_sent);
12078 if (number_entries) {
12079 for (i = 0; i < number_entries; i++) {
12080 req_out->list_of_streams[i] = htons(list[i]);
12083 if (SCTP_SIZE32(len) > len) {
12085 * Need to worry about the pad we may end up adding to the
12086 * end. This is easy since the struct is either aligned to 4
12087 * bytes or 2 bytes off.
12089 req_out->list_of_streams[number_entries] = 0;
12091 /* now fix the chunk length */
12092 ch->chunk_length = htons(len + old_len);
12093 chk->book_size = len + old_len;
12094 chk->book_size_scale = 0;
12095 chk->send_size = SCTP_SIZE32(chk->book_size);
12096 SCTP_BUF_LEN(chk->data) = chk->send_size;
12101 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12102 int number_entries, uint16_t *list,
12105 uint16_t len, old_len, i;
12106 struct sctp_stream_reset_in_request *req_in;
12107 struct sctp_chunkhdr *ch;
12109 ch = mtod(chk->data, struct sctp_chunkhdr *);
12110 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12112 /* get to new offset for the param. */
12113 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12114 /* now how long will this param be? */
12115 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12116 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12117 req_in->ph.param_length = htons(len);
12118 req_in->request_seq = htonl(seq);
12119 if (number_entries) {
12120 for (i = 0; i < number_entries; i++) {
12121 req_in->list_of_streams[i] = htons(list[i]);
12124 if (SCTP_SIZE32(len) > len) {
12126 * Need to worry about the pad we may end up adding to the
12127 * end. This is easy since the struct is either aligned to 4
12128 * bytes or 2 bytes off.
12130 req_in->list_of_streams[number_entries] = 0;
12132 /* now fix the chunk length */
12133 ch->chunk_length = htons(len + old_len);
12134 chk->book_size = len + old_len;
12135 chk->book_size_scale = 0;
12136 chk->send_size = SCTP_SIZE32(chk->book_size);
12137 SCTP_BUF_LEN(chk->data) = chk->send_size;
12142 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12145 uint16_t len, old_len;
12146 struct sctp_stream_reset_tsn_request *req_tsn;
12147 struct sctp_chunkhdr *ch;
12149 ch = mtod(chk->data, struct sctp_chunkhdr *);
12150 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12152 /* get to new offset for the param. */
12153 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12154 /* now how long will this param be? */
12155 len = sizeof(struct sctp_stream_reset_tsn_request);
12156 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12157 req_tsn->ph.param_length = htons(len);
12158 req_tsn->request_seq = htonl(seq);
12160 /* now fix the chunk length */
12161 ch->chunk_length = htons(len + old_len);
12162 chk->send_size = len + old_len;
12163 chk->book_size = SCTP_SIZE32(chk->send_size);
12164 chk->book_size_scale = 0;
12165 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12170 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12171 uint32_t resp_seq, uint32_t result)
12173 uint16_t len, old_len;
12174 struct sctp_stream_reset_response *resp;
12175 struct sctp_chunkhdr *ch;
12177 ch = mtod(chk->data, struct sctp_chunkhdr *);
12178 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12180 /* get to new offset for the param. */
12181 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12182 /* now how long will this param be? */
12183 len = sizeof(struct sctp_stream_reset_response);
12184 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12185 resp->ph.param_length = htons(len);
12186 resp->response_seq = htonl(resp_seq);
12187 resp->result = ntohl(result);
12189 /* now fix the chunk length */
12190 ch->chunk_length = htons(len + old_len);
12191 chk->book_size = len + old_len;
12192 chk->book_size_scale = 0;
12193 chk->send_size = SCTP_SIZE32(chk->book_size);
12194 SCTP_BUF_LEN(chk->data) = chk->send_size;
12199 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12200 uint32_t resp_seq, uint32_t result,
12201 uint32_t send_una, uint32_t recv_next)
12203 uint16_t len, old_len;
12204 struct sctp_stream_reset_response_tsn *resp;
12205 struct sctp_chunkhdr *ch;
12207 ch = mtod(chk->data, struct sctp_chunkhdr *);
12208 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12210 /* get to new offset for the param. */
12211 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12212 /* now how long will this param be? */
12213 len = sizeof(struct sctp_stream_reset_response_tsn);
12214 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12215 resp->ph.param_length = htons(len);
12216 resp->response_seq = htonl(resp_seq);
12217 resp->result = htonl(result);
12218 resp->senders_next_tsn = htonl(send_una);
12219 resp->receivers_next_tsn = htonl(recv_next);
12221 /* now fix the chunk length */
12222 ch->chunk_length = htons(len + old_len);
12223 chk->book_size = len + old_len;
12224 chk->send_size = SCTP_SIZE32(chk->book_size);
12225 chk->book_size_scale = 0;
12226 SCTP_BUF_LEN(chk->data) = chk->send_size;
12231 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12235 uint16_t len, old_len;
12236 struct sctp_chunkhdr *ch;
12237 struct sctp_stream_reset_add_strm *addstr;
12239 ch = mtod(chk->data, struct sctp_chunkhdr *);
12240 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12242 /* get to new offset for the param. */
12243 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12244 /* now how long will this param be? */
12245 len = sizeof(struct sctp_stream_reset_add_strm);
12248 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12249 addstr->ph.param_length = htons(len);
12250 addstr->request_seq = htonl(seq);
12251 addstr->number_of_streams = htons(adding);
12252 addstr->reserved = 0;
12254 /* now fix the chunk length */
12255 ch->chunk_length = htons(len + old_len);
12256 chk->send_size = len + old_len;
12257 chk->book_size = SCTP_SIZE32(chk->send_size);
12258 chk->book_size_scale = 0;
12259 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12264 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12268 uint16_t len, old_len;
12269 struct sctp_chunkhdr *ch;
12270 struct sctp_stream_reset_add_strm *addstr;
12272 ch = mtod(chk->data, struct sctp_chunkhdr *);
12273 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12275 /* get to new offset for the param. */
12276 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12277 /* now how long will this param be? */
12278 len = sizeof(struct sctp_stream_reset_add_strm);
12280 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12281 addstr->ph.param_length = htons(len);
12282 addstr->request_seq = htonl(seq);
12283 addstr->number_of_streams = htons(adding);
12284 addstr->reserved = 0;
12286 /* now fix the chunk length */
12287 ch->chunk_length = htons(len + old_len);
12288 chk->send_size = len + old_len;
12289 chk->book_size = SCTP_SIZE32(chk->send_size);
12290 chk->book_size_scale = 0;
12291 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12296 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12297 int number_entries, uint16_t *list,
12298 uint8_t send_out_req,
12299 uint8_t send_in_req,
12300 uint8_t send_tsn_req,
12301 uint8_t add_stream,
12303 uint16_t adding_i, uint8_t peer_asked)
12306 struct sctp_association *asoc;
12307 struct sctp_tmit_chunk *chk;
12308 struct sctp_chunkhdr *ch;
12311 asoc = &stcb->asoc;
12312 if (asoc->stream_reset_outstanding) {
12314 * Already one pending, must get ACK back to clear the flag.
12316 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12319 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
12320 (add_stream == 0)) {
12321 /* nothing to do */
12322 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12325 if (send_tsn_req && (send_out_req || send_in_req)) {
12326 /* error, can't do that */
12327 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12330 sctp_alloc_a_chunk(stcb, chk);
12332 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12335 chk->copy_by_ref = 0;
12336 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12337 chk->rec.chunk_id.can_take_data = 0;
12338 chk->asoc = &stcb->asoc;
12339 chk->book_size = sizeof(struct sctp_chunkhdr);
12340 chk->send_size = SCTP_SIZE32(chk->book_size);
12341 chk->book_size_scale = 0;
12343 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12344 if (chk->data == NULL) {
12345 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12346 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12349 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12351 /* setup chunk parameters */
12352 chk->sent = SCTP_DATAGRAM_UNSENT;
12353 chk->snd_count = 0;
12354 if (stcb->asoc.alternate) {
12355 chk->whoTo = stcb->asoc.alternate;
12357 chk->whoTo = stcb->asoc.primary_destination;
12359 atomic_add_int(&chk->whoTo->ref_count, 1);
12360 ch = mtod(chk->data, struct sctp_chunkhdr *);
12361 ch->chunk_type = SCTP_STREAM_RESET;
12362 ch->chunk_flags = 0;
12363 ch->chunk_length = htons(chk->book_size);
12364 SCTP_BUF_LEN(chk->data) = chk->send_size;
12366 seq = stcb->asoc.str_reset_seq_out;
12367 if (send_out_req) {
12368 sctp_add_stream_reset_out(chk, number_entries, list,
12369 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12370 asoc->stream_reset_out_is_outstanding = 1;
12372 asoc->stream_reset_outstanding++;
12374 if ((add_stream & 1) &&
12375 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12376 /* Need to allocate more */
12377 struct sctp_stream_out *oldstream;
12378 struct sctp_stream_queue_pending *sp, *nsp;
12381 oldstream = stcb->asoc.strmout;
12382 /* get some more */
12383 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12384 ((stcb->asoc.streamoutcnt+adding_o) * sizeof(struct sctp_stream_out)),
12386 if (stcb->asoc.strmout == NULL) {
12388 stcb->asoc.strmout = oldstream;
12389 /* Turn off the bit */
12390 x = add_stream & 0xfe;
12394 /* Ok now we proceed with copying the old out stuff and
12395 * initializing the new stuff.
12397 SCTP_TCB_SEND_LOCK(stcb);
12398 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12399 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12400 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12401 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12402 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
12403 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12404 stcb->asoc.strmout[i].stream_no = i;
12405 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
12406 /* now anything on those queues? */
12407 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12408 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12409 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12411 /* Now move assoc pointers too */
12412 if (stcb->asoc.last_out_stream == &oldstream[i]) {
12413 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
12415 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
12416 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
12419 /* now the new streams */
12420 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12421 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12422 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12423 stcb->asoc.strmout[i].chunks_on_queues = 0;
12424 stcb->asoc.strmout[i].next_sequence_send = 0x0;
12425 stcb->asoc.strmout[i].stream_no = i;
12426 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12427 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
12429 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12430 SCTP_FREE(oldstream, SCTP_M_STRMO);
12431 SCTP_TCB_SEND_UNLOCK(stcb);
12434 if ((add_stream & 1) && (adding_o > 0)) {
12435 asoc->strm_pending_add_size = adding_o;
12436 asoc->peer_req_out = peer_asked;
12437 sctp_add_an_out_stream(chk, seq, adding_o);
12439 asoc->stream_reset_outstanding++;
12441 if ((add_stream & 2) && (adding_i > 0)) {
12442 sctp_add_an_in_stream(chk, seq, adding_i);
12444 asoc->stream_reset_outstanding++;
12447 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12449 asoc->stream_reset_outstanding++;
12451 if (send_tsn_req) {
12452 sctp_add_stream_reset_tsn(chk, seq);
12453 asoc->stream_reset_outstanding++;
12455 asoc->str_reset = chk;
12456 /* insert the chunk for sending */
12457 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12460 asoc->ctrl_queue_cnt++;
12461 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12466 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12467 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12468 #if defined(__FreeBSD__)
12469 uint8_t use_mflowid, uint32_t mflowid,
12471 uint32_t vrf_id, uint16_t port)
12473 /* Don't respond to an ABORT with an ABORT. */
12474 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12476 sctp_m_freem(cause);
12479 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12480 #if defined(__FreeBSD__)
12481 use_mflowid, mflowid,
12488 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12489 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12490 #if defined(__FreeBSD__)
12491 uint8_t use_mflowid, uint32_t mflowid,
12493 uint32_t vrf_id, uint16_t port)
12495 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12496 #if defined(__FreeBSD__)
12497 use_mflowid, mflowid,
12503 static struct mbuf *
12504 sctp_copy_resume(struct uio *uio,
12506 #if defined(__FreeBSD__) && __FreeBSD_version > 602000
12507 int user_marks_eor,
12511 struct mbuf **new_tail)
12513 #if defined(__Panda__)
12516 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12517 (user_marks_eor ? M_EOR : 0));
12519 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12522 *sndout = m_length(m, NULL);
12523 *new_tail = m_last(m);
12526 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000
12529 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12530 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12532 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12535 *sndout = m_length(m, NULL);
12536 *new_tail = m_last(m);
12540 int left, cancpy, willcpy;
12541 struct mbuf *m, *head;
12543 #if defined(__APPLE__)
12544 #if defined(APPLE_LEOPARD)
12545 left = min(uio->uio_resid, max_send_len);
12547 left = min(uio_resid(uio), max_send_len);
12550 left = min(uio->uio_resid, max_send_len);
12552 /* Always get a header just in case */
12553 head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12554 if (head == NULL) {
12555 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12559 cancpy = M_TRAILINGSPACE(head);
12560 willcpy = min(cancpy, left);
12561 *error = uiomove(mtod(head, caddr_t), willcpy, uio);
12563 sctp_m_freem(head);
12566 *sndout += willcpy;
12568 SCTP_BUF_LEN(head) = willcpy;
12572 /* move in user data */
12573 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12574 if (SCTP_BUF_NEXT(m) == NULL) {
12575 sctp_m_freem(head);
12577 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12581 m = SCTP_BUF_NEXT(m);
12582 cancpy = M_TRAILINGSPACE(m);
12583 willcpy = min(cancpy, left);
12584 *error = uiomove(mtod(m, caddr_t), willcpy, uio);
12586 sctp_m_freem(head);
12588 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12592 SCTP_BUF_LEN(m) = willcpy;
12594 *sndout += willcpy;
12597 SCTP_BUF_NEXT(m) = NULL;
12605 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12610 #if defined(__Panda__)
12612 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12614 if (sp->data == NULL) {
12615 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12619 sp->tail_mbuf = m_last(sp->data);
12622 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000
12624 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12626 if (sp->data == NULL) {
12627 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12631 sp->tail_mbuf = m_last(sp->data);
12634 int cancpy, willcpy, error;
12635 struct mbuf *m, *head;
12638 /* First one gets a header */
12640 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
12642 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12646 * Add this one for m in now, that way if the alloc fails we won't
12649 SCTP_BUF_RESV_UF(m, resv_upfront);
12650 cancpy = M_TRAILINGSPACE(m);
12651 willcpy = min(cancpy, left);
12653 /* move in user data */
12654 error = uiomove(mtod(m, caddr_t), willcpy, uio);
12656 sctp_m_freem(head);
12659 SCTP_BUF_LEN(m) = willcpy;
12663 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12664 if (SCTP_BUF_NEXT(m) == NULL) {
12666 * the head goes back to caller, he can free
12669 sctp_m_freem(head);
12670 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12673 m = SCTP_BUF_NEXT(m);
12674 cancpy = M_TRAILINGSPACE(m);
12675 willcpy = min(cancpy, left);
12678 SCTP_BUF_NEXT(m) = NULL;
12689 static struct sctp_stream_queue_pending *
12690 sctp_copy_it_in(struct sctp_tcb *stcb,
12691 struct sctp_association *asoc,
12692 struct sctp_sndrcvinfo *srcv,
12694 struct sctp_nets *net,
12696 int user_marks_eor,
12701 * This routine must be very careful in its work. Protocol
12702 * processing is up and running so care must be taken to spl...()
12703 * when you need to do something that may effect the stcb/asoc. The
12704 * sb is locked however. When data is copied the protocol processing
12705 * should be enabled since this is a slower operation...
12707 struct sctp_stream_queue_pending *sp = NULL;
12711 /* Now can we send this? */
12712 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12713 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12714 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12715 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12716 /* got data while shutting down */
12717 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12718 *error = ECONNRESET;
12721 sctp_alloc_a_strmoq(stcb, sp);
12723 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12728 sp->sender_all_done = 0;
12729 sp->sinfo_flags = srcv->sinfo_flags;
12730 sp->timetolive = srcv->sinfo_timetolive;
12731 sp->ppid = srcv->sinfo_ppid;
12732 sp->context = srcv->sinfo_context;
12733 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12735 sp->stream = srcv->sinfo_stream;
12736 #if defined(__APPLE__)
12737 #if defined(APPLE_LEOPARD)
12738 sp->length = min(uio->uio_resid, max_send_len);
12740 sp->length = min(uio_resid(uio), max_send_len);
12743 sp->length = min(uio->uio_resid, max_send_len);
12745 #if defined(__APPLE__)
12746 #if defined(APPLE_LEOPARD)
12747 if ((sp->length == (uint32_t)uio->uio_resid) &&
12749 if ((sp->length == (uint32_t)uio_resid(uio)) &&
12752 if ((sp->length == (uint32_t)uio->uio_resid) &&
12754 ((user_marks_eor == 0) ||
12755 (srcv->sinfo_flags & SCTP_EOF) ||
12756 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12757 sp->msg_is_complete = 1;
12759 sp->msg_is_complete = 0;
12761 sp->sender_all_done = 0;
12762 sp->some_taken = 0;
12763 sp->put_last_out = 0;
12764 resv_in_first = sizeof(struct sctp_data_chunk);
12765 sp->data = sp->tail_mbuf = NULL;
12766 if (sp->length == 0) {
12770 if (srcv->sinfo_keynumber_valid) {
12771 sp->auth_keyid = srcv->sinfo_keynumber;
12773 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12775 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12776 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12777 sp->holds_key_ref = 1;
12779 #if defined(__APPLE__)
12780 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
12782 *error = sctp_copy_one(sp, uio, resv_in_first);
12783 #if defined(__APPLE__)
12784 SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
12788 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12791 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12793 atomic_add_int(&sp->net->ref_count, 1);
12797 sctp_set_prsctp_policy(sp);
12805 sctp_sosend(struct socket *so,
12806 struct sockaddr *addr,
12809 pakhandle_type top,
12810 pakhandle_type icontrol,
12813 struct mbuf *control,
12815 #if defined(__APPLE__) || defined(__Panda__)
12819 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
12821 #elif defined(__Windows__)
12824 #if defined(__Userspace__)
12826 * proc is a dummy in __Userspace__ and will not be passed
12827 * to sctp_lower_sosend
12836 struct mbuf *control = NULL;
12838 #if defined(__APPLE__)
12839 struct proc *p = current_proc();
12841 int error, use_sndinfo = 0;
12842 struct sctp_sndrcvinfo sndrcvninfo;
12843 struct sockaddr *addr_to_use;
12844 #if defined(INET) && defined(INET6)
12845 struct sockaddr_in sin;
12848 #if defined(__APPLE__)
12849 SCTP_SOCKET_LOCK(so, 1);
12852 control = SCTP_HEADER_TO_CHAIN(icontrol);
12855 /* process cmsg snd/rcv info (maybe a assoc-id) */
12856 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12857 sizeof(sndrcvninfo))) {
12862 addr_to_use = addr;
12863 #if defined(INET) && defined(INET6)
12864 if ((addr) && (addr->sa_family == AF_INET6)) {
12865 struct sockaddr_in6 *sin6;
12867 sin6 = (struct sockaddr_in6 *)addr;
12868 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12869 in6_sin6_2_sin(&sin, sin6);
12870 addr_to_use = (struct sockaddr *)&sin;
12874 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12881 use_sndinfo ? &sndrcvninfo: NULL
12882 #if !(defined(__Panda__) || defined(__Userspace__))
12886 #if defined(__APPLE__)
12887 SCTP_SOCKET_UNLOCK(so, 1);
12894 sctp_lower_sosend(struct socket *so,
12895 struct sockaddr *addr,
12898 pakhandle_type i_pak,
12899 pakhandle_type i_control,
12901 struct mbuf *i_pak,
12902 struct mbuf *control,
12905 struct sctp_sndrcvinfo *srcv
12906 #if !(defined( __Panda__) || defined(__Userspace__))
12908 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
12910 #elif defined(__Windows__)
12918 unsigned int sndlen = 0, max_len;
12920 struct mbuf *top = NULL;
12922 struct mbuf *control = NULL;
12924 int queue_only = 0, queue_only_for_init = 0;
12925 int free_cnt_applied = 0;
12927 int now_filled = 0;
12928 unsigned int inqueue_bytes = 0;
12929 struct sctp_block_entry be;
12930 struct sctp_inpcb *inp;
12931 struct sctp_tcb *stcb = NULL;
12932 struct timeval now;
12933 struct sctp_nets *net;
12934 struct sctp_association *asoc;
12935 struct sctp_inpcb *t_inp;
12936 int user_marks_eor;
12937 int create_lock_applied = 0;
12938 int nagle_applies = 0;
12939 int some_on_control = 0;
12940 int got_all_of_the_send = 0;
12941 int hold_tcblock = 0;
12942 int non_blocking = 0;
12943 uint32_t local_add_more, local_soresv = 0;
12945 uint16_t sinfo_flags;
12946 sctp_assoc_t sinfo_assoc_id;
12953 #if defined(__APPLE__)
12954 sctp_lock_assert(so);
12956 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12958 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12961 SCTP_RELEASE_PKT(i_pak);
12965 if ((uio == NULL) && (i_pak == NULL)) {
12966 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12969 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12970 atomic_add_int(&inp->total_sends, 1);
12972 #if defined(__APPLE__)
12973 #if defined(APPLE_LEOPARD)
12974 if (uio->uio_resid < 0) {
12976 if (uio_resid(uio) < 0) {
12979 if (uio->uio_resid < 0) {
12981 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12984 #if defined(__APPLE__)
12985 #if defined(APPLE_LEOPARD)
12986 sndlen = uio->uio_resid;
12988 sndlen = uio_resid(uio);
12991 sndlen = uio->uio_resid;
12994 top = SCTP_HEADER_TO_CHAIN(i_pak);
12997 * app len indicates the datalen, dgsize for cases
12998 * of SCTP_EOF/ABORT will not have the right len
13000 sndlen = SCTP_APP_DATA_LEN(i_pak);
13002 * Set the particle len also to zero to match
13003 * up with app len. We only have one particle
13004 * if app len is zero for Panda. This is ensured
13005 * in the socket lib
13008 SCTP_BUF_LEN(top) = 0;
13011 * We delink the chain from header, but keep
13012 * the header around as we will need it in
13015 SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
13017 sndlen = SCTP_HEADER_LEN(i_pak);
13020 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
13025 control = SCTP_HEADER_TO_CHAIN(i_control);
13028 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13029 (inp->sctp_socket->so_qlimit)) {
13030 /* The listener can NOT send */
13031 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13036 * Pre-screen address, if one is given the sin-len
13037 * must be set correctly!
13040 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13041 switch (raddr->sa.sa_family) {
13044 #ifdef HAVE_SIN_LEN
13045 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13046 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13051 port = raddr->sin.sin_port;
13056 #ifdef HAVE_SIN6_LEN
13057 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13058 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13063 port = raddr->sin6.sin6_port;
13066 #if defined(__Userspace__)
13068 #ifdef HAVE_SCONN_LEN
13069 if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13070 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13075 port = raddr->sconn.sconn_port;
13079 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13080 error = EAFNOSUPPORT;
13087 sinfo_flags = srcv->sinfo_flags;
13088 sinfo_assoc_id = srcv->sinfo_assoc_id;
13089 if (INVALID_SINFO_FLAG(sinfo_flags) ||
13090 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13091 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13095 if (srcv->sinfo_flags)
13096 SCTP_STAT_INCR(sctps_sends_with_flags);
13098 sinfo_flags = inp->def_send.sinfo_flags;
13099 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13101 if (sinfo_flags & SCTP_SENDALL) {
13102 /* its a sendall */
13103 error = sctp_sendall(inp, uio, top, srcv);
13107 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13108 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13112 /* now we must find the assoc */
13113 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13114 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13115 SCTP_INP_RLOCK(inp);
13116 stcb = LIST_FIRST(&inp->sctp_asoc_list);
13118 SCTP_TCB_LOCK(stcb);
13121 SCTP_INP_RUNLOCK(inp);
13122 } else if (sinfo_assoc_id) {
13123 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
13126 * Since we did not use findep we must
13127 * increment it, and if we don't find a tcb
13130 SCTP_INP_WLOCK(inp);
13131 SCTP_INP_INCR_REF(inp);
13132 SCTP_INP_WUNLOCK(inp);
13133 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13134 if (stcb == NULL) {
13135 SCTP_INP_WLOCK(inp);
13136 SCTP_INP_DECR_REF(inp);
13137 SCTP_INP_WUNLOCK(inp);
13142 if ((stcb == NULL) && (addr)) {
13143 /* Possible implicit send? */
13144 SCTP_ASOC_CREATE_LOCK(inp);
13145 create_lock_applied = 1;
13146 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13147 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13148 /* Should I really unlock ? */
13149 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13154 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13155 (addr->sa_family == AF_INET6)) {
13156 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13160 SCTP_INP_WLOCK(inp);
13161 SCTP_INP_INCR_REF(inp);
13162 SCTP_INP_WUNLOCK(inp);
13163 /* With the lock applied look again */
13164 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13165 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13166 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13168 if (stcb == NULL) {
13169 SCTP_INP_WLOCK(inp);
13170 SCTP_INP_DECR_REF(inp);
13171 SCTP_INP_WUNLOCK(inp);
13178 if (t_inp != inp) {
13179 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13184 if (stcb == NULL) {
13185 if (addr == NULL) {
13186 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13190 /* We must go ahead and start the INIT process */
13193 if ((sinfo_flags & SCTP_ABORT) ||
13194 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13196 * User asks to abort a non-existant assoc,
13197 * or EOF a non-existant assoc with no data
13199 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13203 /* get an asoc/stcb struct */
13204 vrf_id = inp->def_vrf_id;
13206 if (create_lock_applied == 0) {
13207 panic("Error, should hold create lock and I don't?");
13210 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13211 #if !(defined( __Panda__) || defined(__Userspace__))
13214 (struct proc *)NULL
13217 if (stcb == NULL) {
13218 /* Error is setup for us in the call */
13221 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13222 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13223 /* Set the connected flag so we can queue data */
13224 soisconnecting(so);
13227 if (create_lock_applied) {
13228 SCTP_ASOC_CREATE_UNLOCK(inp);
13229 create_lock_applied = 0;
13231 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13233 /* Turn on queue only flag to prevent data from being sent */
13235 asoc = &stcb->asoc;
13236 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13237 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13239 /* initialize authentication params for the assoc */
13240 sctp_initialize_auth_params(inp, stcb);
13243 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13244 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
13250 /* out with the INIT */
13251 queue_only_for_init = 1;
13253 * we may want to dig in after this call and adjust the MTU
13254 * value. It defaulted to 1500 (constant) but the ro
13255 * structure may now have an update and thus we may need to
13256 * change it BEFORE we append the message.
13260 asoc = &stcb->asoc;
13262 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13263 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
13265 net = sctp_findnet(stcb, addr);
13268 if ((net == NULL) ||
13269 ((port != 0) && (port != stcb->rport))) {
13270 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13275 if (stcb->asoc.alternate) {
13276 net = stcb->asoc.alternate;
13278 net = stcb->asoc.primary_destination;
13281 atomic_add_int(&stcb->total_sends, 1);
13282 /* Keep the stcb from being freed under our feet */
13283 atomic_add_int(&asoc->refcnt, 1);
13284 free_cnt_applied = 1;
13286 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13287 if (sndlen > asoc->smallest_mtu) {
13288 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13293 #if defined(__Userspace__)
13294 if (inp->recv_callback) {
13298 if (SCTP_SO_IS_NBIO(so)
13299 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13300 || (flags & MSG_NBIO)
13306 /* would we block? */
13307 if (non_blocking) {
13308 if (hold_tcblock == 0) {
13309 SCTP_TCB_LOCK(stcb);
13312 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13313 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13314 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13315 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13316 if (sndlen > SCTP_SB_LIMIT_SND(so))
13319 error = EWOULDBLOCK;
13322 stcb->asoc.sb_send_resv += sndlen;
13323 SCTP_TCB_UNLOCK(stcb);
13326 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13328 local_soresv = sndlen;
13329 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13331 error = ECONNRESET;
13334 if (create_lock_applied) {
13335 SCTP_ASOC_CREATE_UNLOCK(inp);
13336 create_lock_applied = 0;
13338 if (asoc->stream_reset_outstanding) {
13340 * Can't queue any data while stream reset is underway.
13342 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
13346 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13347 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13350 /* we are now done with all control */
13352 sctp_m_freem(control);
13355 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
13356 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13357 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13358 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13359 if (srcv->sinfo_flags & SCTP_ABORT) {
13362 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13363 error = ECONNRESET;
13367 /* Ok, we will attempt a msgsnd :> */
13368 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
13370 #if defined(__FreeBSD__) && __FreeBSD_version >= 603000
13371 p->td_ru.ru_msgsnd++;
13372 #elif defined(__FreeBSD__) && __FreeBSD_version >= 500000
13373 p->td_proc->p_stats->p_ru.ru_msgsnd++;
13375 p->p_stats->p_ru.ru_msgsnd++;
13379 /* Are we aborting? */
13380 if (srcv->sinfo_flags & SCTP_ABORT) {
13382 int tot_demand, tot_out = 0, max_out;
13384 SCTP_STAT_INCR(sctps_sends_with_abort);
13385 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13386 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13387 /* It has to be up before we abort */
13388 /* how big is the user initiated abort? */
13389 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13393 if (hold_tcblock) {
13394 SCTP_TCB_UNLOCK(stcb);
13398 struct mbuf *cntm = NULL;
13400 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13402 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13403 tot_out += SCTP_BUF_LEN(cntm);
13407 /* Must fit in a MTU */
13409 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13410 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13412 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13416 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
13419 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13423 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13424 max_out -= sizeof(struct sctp_abort_msg);
13425 if (tot_out > max_out) {
13429 struct sctp_paramhdr *ph;
13431 /* now move forward the data pointer */
13432 ph = mtod(mm, struct sctp_paramhdr *);
13433 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13434 ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out);
13436 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
13438 #if defined(__APPLE__)
13439 SCTP_SOCKET_UNLOCK(so, 0);
13441 error = uiomove((caddr_t)ph, (int)tot_out, uio);
13442 #if defined(__APPLE__)
13443 SCTP_SOCKET_LOCK(so, 0);
13447 * Here if we can't get his data we
13448 * still abort we just don't get to
13449 * send the users note :-0
13456 SCTP_BUF_NEXT(mm) = top;
13460 if (hold_tcblock == 0) {
13461 SCTP_TCB_LOCK(stcb);
13463 atomic_add_int(&stcb->asoc.refcnt, -1);
13464 free_cnt_applied = 0;
13465 /* release this lock, otherwise we hang on ourselves */
13466 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13467 /* now relock the stcb so everything is sane */
13470 /* In this case top is already chained to mm
13471 * avoid double free, since we free it below if
13472 * top != NULL and driver would free it after sending
13480 /* Calculate the maximum we can send */
13481 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13482 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13483 if (non_blocking) {
13484 /* we already checked for non-blocking above. */
13487 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13492 if (hold_tcblock) {
13493 SCTP_TCB_UNLOCK(stcb);
13496 /* Is the stream no. valid? */
13497 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13498 /* Invalid stream number */
13499 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13503 if (asoc->strmout == NULL) {
13504 /* huh? software error */
13505 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13510 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13511 if ((user_marks_eor == 0) &&
13512 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13513 /* It will NEVER fit */
13514 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13518 if ((uio == NULL) && user_marks_eor) {
13520 * We do not support eeor mode for
13521 * sending with mbuf chains (like sendfile).
13523 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13528 if (user_marks_eor) {
13529 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13532 * For non-eeor the whole message must fit in
13533 * the socket send buffer.
13535 local_add_more = sndlen;
13538 if (non_blocking) {
13539 goto skip_preblock;
13541 if (((max_len <= local_add_more) &&
13542 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13544 ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13545 /* No room right now ! */
13546 SOCKBUF_LOCK(&so->so_snd);
13547 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13548 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13549 ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13550 SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13551 (unsigned int)SCTP_SB_LIMIT_SND(so),
13554 stcb->asoc.stream_queue_cnt,
13555 stcb->asoc.chunks_on_out_queue,
13556 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13557 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13558 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13561 #if !defined(__Panda__) && !defined(__Windows__)
13562 stcb->block_entry = &be;
13564 error = sbwait(&so->so_snd);
13565 stcb->block_entry = NULL;
13566 if (error || so->so_error || be.error) {
13569 error = so->so_error;
13574 SOCKBUF_UNLOCK(&so->so_snd);
13577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13578 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13579 asoc, stcb->asoc.total_output_queue_size);
13581 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13584 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13586 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13587 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13591 SOCKBUF_UNLOCK(&so->so_snd);
13595 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13598 #if defined(__APPLE__)
13599 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
13601 /* sndlen covers for mbuf case
13602 * uio_resid covers for the non-mbuf case
13603 * NOTE: uio will be null when top/mbuf is passed
13606 if (srcv->sinfo_flags & SCTP_EOF) {
13607 got_all_of_the_send = 1;
13610 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13616 struct sctp_stream_queue_pending *sp;
13617 struct sctp_stream_out *strm;
13620 SCTP_TCB_SEND_LOCK(stcb);
13621 if ((asoc->stream_locked) &&
13622 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13623 SCTP_TCB_SEND_UNLOCK(stcb);
13624 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13628 SCTP_TCB_SEND_UNLOCK(stcb);
13630 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13631 if (strm->last_msg_incomplete == 0) {
13633 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13634 if ((sp == NULL) || (error)) {
13637 SCTP_TCB_SEND_LOCK(stcb);
13638 if (sp->msg_is_complete) {
13639 strm->last_msg_incomplete = 0;
13640 asoc->stream_locked = 0;
13642 /* Just got locked to this guy in
13643 * case of an interrupt.
13645 strm->last_msg_incomplete = 1;
13646 asoc->stream_locked = 1;
13647 asoc->stream_locked_on = srcv->sinfo_stream;
13648 sp->sender_all_done = 0;
13650 sctp_snd_sb_alloc(stcb, sp->length);
13651 atomic_add_int(&asoc->stream_queue_cnt, 1);
13652 if (srcv->sinfo_flags & SCTP_UNORDERED) {
13653 SCTP_STAT_INCR(sctps_sends_with_unord);
13655 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13656 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13657 SCTP_TCB_SEND_UNLOCK(stcb);
13659 SCTP_TCB_SEND_LOCK(stcb);
13660 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13661 SCTP_TCB_SEND_UNLOCK(stcb);
13663 /* ???? Huh ??? last msg is gone */
13665 panic("Warning: Last msg marked incomplete, yet nothing left?");
13667 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13668 strm->last_msg_incomplete = 0;
13674 #if defined(__APPLE__)
13675 #if defined(APPLE_LEOPARD)
13676 while (uio->uio_resid > 0) {
13678 while (uio_resid(uio) > 0) {
13681 while (uio->uio_resid > 0) {
13683 /* How much room do we have? */
13684 struct mbuf *new_tail, *mm;
13686 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13687 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13691 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13692 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13693 #if defined(__APPLE__)
13694 #if defined(APPLE_LEOPARD)
13695 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13697 (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) {
13700 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13704 if (hold_tcblock) {
13705 SCTP_TCB_UNLOCK(stcb);
13708 #if defined(__APPLE__)
13709 SCTP_SOCKET_UNLOCK(so, 0);
13711 #if defined(__FreeBSD__) && __FreeBSD_version > 602000
13712 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13714 mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail);
13716 #if defined(__APPLE__)
13717 SCTP_SOCKET_LOCK(so, 0);
13719 if ((mm == NULL) || error) {
13725 /* Update the mbuf and count */
13726 SCTP_TCB_SEND_LOCK(stcb);
13727 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13728 /* we need to get out.
13729 * Peer probably aborted.
13732 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13733 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13734 error = ECONNRESET;
13736 SCTP_TCB_SEND_UNLOCK(stcb);
13739 if (sp->tail_mbuf) {
13740 /* tack it to the end */
13741 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13742 sp->tail_mbuf = new_tail;
13744 /* A stolen mbuf */
13746 sp->tail_mbuf = new_tail;
13748 sctp_snd_sb_alloc(stcb, sndout);
13749 atomic_add_int(&sp->length,sndout);
13752 /* Did we reach EOR? */
13753 #if defined(__APPLE__)
13754 #if defined(APPLE_LEOPARD)
13755 if ((uio->uio_resid == 0) &&
13757 if ((uio_resid(uio) == 0) &&
13760 if ((uio->uio_resid == 0) &&
13762 ((user_marks_eor == 0) ||
13763 (srcv->sinfo_flags & SCTP_EOF) ||
13764 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13765 sp->msg_is_complete = 1;
13767 sp->msg_is_complete = 0;
13769 SCTP_TCB_SEND_UNLOCK(stcb);
13771 #if defined(__APPLE__)
13772 #if defined(APPLE_LEOPARD)
13773 if (uio->uio_resid == 0) {
13775 if (uio_resid(uio) == 0) {
13778 if (uio->uio_resid == 0) {
13784 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
13785 /* This is ugly but we must assure locking order */
13786 if (hold_tcblock == 0) {
13787 SCTP_TCB_LOCK(stcb);
13790 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13791 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13792 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13793 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13799 SCTP_TCB_UNLOCK(stcb);
13802 /* wait for space now */
13803 if (non_blocking) {
13804 /* Non-blocking io in place out */
13807 /* What about the INIT, send it maybe */
13808 if (queue_only_for_init) {
13809 if (hold_tcblock == 0) {
13810 SCTP_TCB_LOCK(stcb);
13813 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13814 /* a collision took us forward? */
13817 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13818 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13822 if ((net->flight_size > net->cwnd) &&
13823 (asoc->sctp_cmt_on_off == 0)) {
13824 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13826 } else if (asoc->ifp_had_enobuf) {
13827 SCTP_STAT_INCR(sctps_ifnomemqueued);
13828 if (net->flight_size > (2 * net->mtu)) {
13831 asoc->ifp_had_enobuf = 0;
13833 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13834 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13835 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13836 (stcb->asoc.total_flight > 0) &&
13837 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13838 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13841 * Ok, Nagle is set on and we have data outstanding.
13842 * Don't send anything and let SACKs drive out the
13843 * data unless wen have a "full" segment to send.
13845 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13846 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13848 SCTP_STAT_INCR(sctps_naglequeued);
13851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13852 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13853 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13855 SCTP_STAT_INCR(sctps_naglesent);
13858 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13860 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13861 nagle_applies, un_sent);
13862 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13863 stcb->asoc.total_flight,
13864 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13866 if (queue_only_for_init)
13867 queue_only_for_init = 0;
13868 if ((queue_only == 0) && (nagle_applies == 0)) {
13870 * need to start chunk output
13871 * before blocking.. note that if
13872 * a lock is already applied, then
13873 * the input via the net is happening
13874 * and I don't need to start output :-D
13876 if (hold_tcblock == 0) {
13877 if (SCTP_TCB_TRYLOCK(stcb)) {
13879 sctp_chunk_output(inp,
13881 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13884 sctp_chunk_output(inp,
13886 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13888 if (hold_tcblock == 1) {
13889 SCTP_TCB_UNLOCK(stcb);
13893 SOCKBUF_LOCK(&so->so_snd);
13895 * This is a bit strange, but I think it will
13896 * work. The total_output_queue_size is locked and
13897 * protected by the TCB_LOCK, which we just released.
13898 * There is a race that can occur between releasing it
13899 * above, and me getting the socket lock, where sacks
13900 * come in but we have not put the SB_WAIT on the
13901 * so_snd buffer to get the wakeup. After the LOCK
13902 * is applied the sack_processing will also need to
13903 * LOCK the so->so_snd to do the actual sowwakeup(). So
13904 * once we have the socket buffer lock if we recheck the
13905 * size we KNOW we will get to sleep safely with the
13906 * wakeup flag in place.
13908 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13909 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13910 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13911 #if defined(__APPLE__)
13912 #if defined(APPLE_LEOPARD)
13913 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13914 asoc, uio->uio_resid);
13916 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13917 asoc, uio_resid(uio));
13920 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13921 asoc, uio->uio_resid);
13925 #if !defined(__Panda__) && !defined(__Windows__)
13926 stcb->block_entry = &be;
13928 #if defined(__APPLE__)
13929 sbunlock(&so->so_snd, 1);
13931 error = sbwait(&so->so_snd);
13932 stcb->block_entry = NULL;
13934 if (error || so->so_error || be.error) {
13937 error = so->so_error;
13942 SOCKBUF_UNLOCK(&so->so_snd);
13946 #if defined(__APPLE__)
13947 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
13949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13950 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13951 asoc, stcb->asoc.total_output_queue_size);
13954 SOCKBUF_UNLOCK(&so->so_snd);
13955 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13959 SCTP_TCB_SEND_LOCK(stcb);
13961 if (sp->msg_is_complete == 0) {
13962 strm->last_msg_incomplete = 1;
13963 asoc->stream_locked = 1;
13964 asoc->stream_locked_on = srcv->sinfo_stream;
13966 sp->sender_all_done = 1;
13967 strm->last_msg_incomplete = 0;
13968 asoc->stream_locked = 0;
13971 SCTP_PRINTF("Huh no sp TSNH?\n");
13972 strm->last_msg_incomplete = 0;
13973 asoc->stream_locked = 0;
13975 SCTP_TCB_SEND_UNLOCK(stcb);
13976 #if defined(__APPLE__)
13977 #if defined(APPLE_LEOPARD)
13978 if (uio->uio_resid == 0) {
13980 if (uio_resid(uio) == 0) {
13983 if (uio->uio_resid == 0) {
13985 got_all_of_the_send = 1;
13988 /* We send in a 0, since we do NOT have any locks */
13989 error = sctp_msg_append(stcb, net, top, srcv, 0);
13991 if (srcv->sinfo_flags & SCTP_EOF) {
13993 * This should only happen for Panda for the mbuf
13994 * send case, which does NOT yet support EEOR mode.
13995 * Thus, we can just set this flag to do the proper
13998 got_all_of_the_send = 1;
14006 if ((srcv->sinfo_flags & SCTP_EOF) &&
14007 (got_all_of_the_send == 1)) {
14009 SCTP_STAT_INCR(sctps_sends_with_eof);
14011 if (hold_tcblock == 0) {
14012 SCTP_TCB_LOCK(stcb);
14015 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
14016 if (TAILQ_EMPTY(&asoc->send_queue) &&
14017 TAILQ_EMPTY(&asoc->sent_queue) &&
14019 if (asoc->locked_on_sending) {
14022 /* there is nothing queued to send, so I'm done... */
14023 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
14024 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14025 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14026 struct sctp_nets *netp;
14028 /* only send SHUTDOWN the first time through */
14029 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
14030 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14032 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
14033 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
14034 sctp_stop_timers_for_shutdown(stcb);
14035 if (stcb->asoc.alternate) {
14036 netp = stcb->asoc.alternate;
14038 netp = stcb->asoc.primary_destination;
14040 sctp_send_shutdown(stcb, netp);
14041 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14043 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14044 asoc->primary_destination);
14048 * we still got (or just got) data to send, so set
14052 * XXX sockets draft says that SCTP_EOF should be
14053 * sent with no data. currently, we will allow user
14054 * data to be sent first and move to
14057 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
14058 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14059 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14060 if (hold_tcblock == 0) {
14061 SCTP_TCB_LOCK(stcb);
14064 if (asoc->locked_on_sending) {
14065 /* Locked to send out the data */
14066 struct sctp_stream_queue_pending *sp;
14067 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
14069 if ((sp->length == 0) && (sp->msg_is_complete == 0))
14070 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
14073 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
14074 if (TAILQ_EMPTY(&asoc->send_queue) &&
14075 TAILQ_EMPTY(&asoc->sent_queue) &&
14076 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14078 if (free_cnt_applied) {
14079 atomic_add_int(&stcb->asoc.refcnt, -1);
14080 free_cnt_applied = 0;
14082 sctp_abort_an_association(stcb->sctp_ep, stcb,
14083 NULL, SCTP_SO_LOCKED);
14084 /* now relock the stcb so everything is sane */
14089 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14090 asoc->primary_destination);
14091 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14096 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14097 some_on_control = 1;
14099 if (queue_only_for_init) {
14100 if (hold_tcblock == 0) {
14101 SCTP_TCB_LOCK(stcb);
14104 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
14105 /* a collision took us forward? */
14108 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14109 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
14113 if ((net->flight_size > net->cwnd) &&
14114 (stcb->asoc.sctp_cmt_on_off == 0)) {
14115 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14117 } else if (asoc->ifp_had_enobuf) {
14118 SCTP_STAT_INCR(sctps_ifnomemqueued);
14119 if (net->flight_size > (2 * net->mtu)) {
14122 asoc->ifp_had_enobuf = 0;
14124 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
14125 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
14126 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14127 (stcb->asoc.total_flight > 0) &&
14128 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14129 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14131 * Ok, Nagle is set on and we have data outstanding.
14132 * Don't send anything and let SACKs drive out the
14133 * data unless wen have a "full" segment to send.
14135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14136 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14138 SCTP_STAT_INCR(sctps_naglequeued);
14141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14142 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14143 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14145 SCTP_STAT_INCR(sctps_naglesent);
14148 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14149 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14150 nagle_applies, un_sent);
14151 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14152 stcb->asoc.total_flight,
14153 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14155 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14156 /* we can attempt to send too. */
14157 if (hold_tcblock == 0) {
14158 /* If there is activity recv'ing sacks no need to send */
14159 if (SCTP_TCB_TRYLOCK(stcb)) {
14160 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14164 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14166 } else if ((queue_only == 0) &&
14167 (stcb->asoc.peers_rwnd == 0) &&
14168 (stcb->asoc.total_flight == 0)) {
14169 /* We get to have a probe outstanding */
14170 if (hold_tcblock == 0) {
14172 SCTP_TCB_LOCK(stcb);
14174 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14175 } else if (some_on_control) {
14176 int num_out, reason, frag_point;
14178 /* Here we do control only */
14179 if (hold_tcblock == 0) {
14181 SCTP_TCB_LOCK(stcb);
14183 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14184 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14185 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14187 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14188 queue_only, stcb->asoc.peers_rwnd, un_sent,
14189 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14190 stcb->asoc.total_output_queue_size, error);
14193 #if defined(__APPLE__)
14194 sbunlock(&so->so_snd, 1);
14198 if (local_soresv && stcb) {
14199 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14201 if (create_lock_applied) {
14202 SCTP_ASOC_CREATE_UNLOCK(inp);
14204 if ((stcb) && hold_tcblock) {
14205 SCTP_TCB_UNLOCK(stcb);
14207 if (stcb && free_cnt_applied) {
14208 atomic_add_int(&stcb->asoc.refcnt, -1);
14211 #if !defined(__APPLE__)
14213 if (mtx_owned(&stcb->tcb_mtx)) {
14214 panic("Leaving with tcb mtx owned?");
14216 if (mtx_owned(&stcb->tcb_send_mtx)) {
14217 panic("Leaving with tcb send mtx owned?");
14224 * Handle the EAGAIN/ENOMEM cases to reattach the pak header
14225 * to particle when pak is passed in, so that caller
14226 * can try again with this pak
14228 * NOTE: For other cases, including success case,
14229 * we simply want to return the header back to free
14233 if ((error == EAGAIN) || (error == ENOMEM)) {
14234 SCTP_ATTACH_CHAIN(i_pak, top, sndlen);
14237 (void)SCTP_RELEASE_HEADER(i_pak);
14240 /* This is to handle cases when top has
14241 * been reset to NULL but pak might not
14245 (void)SCTP_RELEASE_HEADER(i_pak);
14251 sctp_validate_no_locks(inp);
14253 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
14260 sctp_m_freem(control);
14267 * generate an AUTHentication chunk, if required
14270 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
14271 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
14272 struct sctp_tcb *stcb, uint8_t chunk)
14274 struct mbuf *m_auth;
14275 struct sctp_auth_chunk *auth;
14279 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
14283 /* sysctl disabled auth? */
14284 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
14287 /* peer doesn't do auth... */
14288 if (!stcb->asoc.peer_supports_auth) {
14291 /* does the requested chunk require auth? */
14292 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
14295 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
14296 if (m_auth == NULL) {
14300 /* reserve some space if this will be the first mbuf */
14302 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
14303 /* fill in the AUTH chunk details */
14304 auth = mtod(m_auth, struct sctp_auth_chunk *);
14305 bzero(auth, sizeof(*auth));
14306 auth->ch.chunk_type = SCTP_AUTHENTICATION;
14307 auth->ch.chunk_flags = 0;
14308 chunk_len = sizeof(*auth) +
14309 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
14310 auth->ch.chunk_length = htons(chunk_len);
14311 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
14312 /* key id and hmac digest will be computed and filled in upon send */
14314 /* save the offset where the auth was inserted into the chain */
14316 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
14317 *offset += SCTP_BUF_LEN(cn);
14320 /* update length and return pointer to the auth chunk */
14321 SCTP_BUF_LEN(m_auth) = chunk_len;
14322 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
14323 if (auth_ret != NULL)
14329 #if defined(__FreeBSD__) || defined(__APPLE__)
14332 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14334 struct nd_prefix *pfx = NULL;
14335 struct nd_pfxrouter *pfxrtr = NULL;
14336 struct sockaddr_in6 gw6;
14338 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
14341 /* get prefix entry of address */
14342 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
14343 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
14345 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
14346 &src6->sin6_addr, &pfx->ndpr_mask))
14349 /* no prefix entry in the prefix list */
14351 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
14352 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14356 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
14357 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14359 /* search installed gateway from prefix entry */
14360 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
14361 memset(&gw6, 0, sizeof(struct sockaddr_in6));
14362 gw6.sin6_family = AF_INET6;
14363 #ifdef HAVE_SIN6_LEN
14364 gw6.sin6_len = sizeof(struct sockaddr_in6);
14366 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
14367 sizeof(struct in6_addr));
14368 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
14369 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
14370 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
14371 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14372 if (sctp_cmpaddr((struct sockaddr *)&gw6,
14373 ro->ro_rt->rt_gateway)) {
14374 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
14378 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
14384 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14387 struct sockaddr_in *sin, *mask;
14388 struct ifaddr *ifa;
14389 struct in_addr srcnetaddr, gwnetaddr;
14391 if (ro == NULL || ro->ro_rt == NULL ||
14392 sifa->address.sa.sa_family != AF_INET) {
14395 ifa = (struct ifaddr *)sifa->ifa;
14396 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
14397 sin = (struct sockaddr_in *)&sifa->address.sin;
14398 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14399 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
14400 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
14401 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
14403 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
14404 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14405 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
14406 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14407 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
14408 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
14414 #elif defined(__Userspace__)
14415 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
14417 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14422 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)