2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 362178 2020-06-14 16:05:08Z tuexen $");
40 #include <netinet/sctp_os.h>
41 #if defined(__FreeBSD__) && !defined(__Userspace__)
44 #include <netinet/sctp_var.h>
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_pcb.h>
48 #include <netinet/sctputil.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctputil.h>
52 #include <netinet/sctp_auth.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_asconf.h>
55 #include <netinet/sctp_indata.h>
56 #include <netinet/sctp_bsd_addr.h>
57 #include <netinet/sctp_input.h>
58 #include <netinet/sctp_crc32.h>
59 #if defined(__FreeBSD__) && !defined(__Userspace__)
60 #include <netinet/sctp_kdtrace.h>
62 #if defined(__linux__)
63 #define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
65 #if defined(INET) || defined(INET6)
67 #include <netinet/udp.h>
70 #if !defined(__Userspace__)
71 #if defined(__APPLE__)
72 #include <netinet/in.h>
74 #if defined(__FreeBSD__) && !defined(__Userspace__)
75 #include <netinet/udp_var.h>
76 #include <machine/in_cksum.h>
79 #if defined(__Userspace__) && defined(INET6)
80 #include <netinet6/sctp6_var.h>
83 #if defined(__APPLE__) && !defined(__Userspace__)
84 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
85 #define SCTP_MAX_LINKHDR 16
89 #define SCTP_MAX_GAPS_INARRAY 4
91 uint8_t right_edge; /* mergable on the right edge */
92 uint8_t left_edge; /* mergable on the left edge */
95 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
98 const struct sack_track sack_array[256] = {
99 {0, 0, 0, 0, /* 0x00 */
106 {1, 0, 1, 0, /* 0x01 */
113 {0, 0, 1, 0, /* 0x02 */
120 {1, 0, 1, 0, /* 0x03 */
127 {0, 0, 1, 0, /* 0x04 */
134 {1, 0, 2, 0, /* 0x05 */
141 {0, 0, 1, 0, /* 0x06 */
148 {1, 0, 1, 0, /* 0x07 */
155 {0, 0, 1, 0, /* 0x08 */
162 {1, 0, 2, 0, /* 0x09 */
169 {0, 0, 2, 0, /* 0x0a */
176 {1, 0, 2, 0, /* 0x0b */
183 {0, 0, 1, 0, /* 0x0c */
190 {1, 0, 2, 0, /* 0x0d */
197 {0, 0, 1, 0, /* 0x0e */
204 {1, 0, 1, 0, /* 0x0f */
211 {0, 0, 1, 0, /* 0x10 */
218 {1, 0, 2, 0, /* 0x11 */
225 {0, 0, 2, 0, /* 0x12 */
232 {1, 0, 2, 0, /* 0x13 */
239 {0, 0, 2, 0, /* 0x14 */
246 {1, 0, 3, 0, /* 0x15 */
253 {0, 0, 2, 0, /* 0x16 */
260 {1, 0, 2, 0, /* 0x17 */
267 {0, 0, 1, 0, /* 0x18 */
274 {1, 0, 2, 0, /* 0x19 */
281 {0, 0, 2, 0, /* 0x1a */
288 {1, 0, 2, 0, /* 0x1b */
295 {0, 0, 1, 0, /* 0x1c */
302 {1, 0, 2, 0, /* 0x1d */
309 {0, 0, 1, 0, /* 0x1e */
316 {1, 0, 1, 0, /* 0x1f */
323 {0, 0, 1, 0, /* 0x20 */
330 {1, 0, 2, 0, /* 0x21 */
337 {0, 0, 2, 0, /* 0x22 */
344 {1, 0, 2, 0, /* 0x23 */
351 {0, 0, 2, 0, /* 0x24 */
358 {1, 0, 3, 0, /* 0x25 */
365 {0, 0, 2, 0, /* 0x26 */
372 {1, 0, 2, 0, /* 0x27 */
379 {0, 0, 2, 0, /* 0x28 */
386 {1, 0, 3, 0, /* 0x29 */
393 {0, 0, 3, 0, /* 0x2a */
400 {1, 0, 3, 0, /* 0x2b */
407 {0, 0, 2, 0, /* 0x2c */
414 {1, 0, 3, 0, /* 0x2d */
421 {0, 0, 2, 0, /* 0x2e */
428 {1, 0, 2, 0, /* 0x2f */
435 {0, 0, 1, 0, /* 0x30 */
442 {1, 0, 2, 0, /* 0x31 */
449 {0, 0, 2, 0, /* 0x32 */
456 {1, 0, 2, 0, /* 0x33 */
463 {0, 0, 2, 0, /* 0x34 */
470 {1, 0, 3, 0, /* 0x35 */
477 {0, 0, 2, 0, /* 0x36 */
484 {1, 0, 2, 0, /* 0x37 */
491 {0, 0, 1, 0, /* 0x38 */
498 {1, 0, 2, 0, /* 0x39 */
505 {0, 0, 2, 0, /* 0x3a */
512 {1, 0, 2, 0, /* 0x3b */
519 {0, 0, 1, 0, /* 0x3c */
526 {1, 0, 2, 0, /* 0x3d */
533 {0, 0, 1, 0, /* 0x3e */
540 {1, 0, 1, 0, /* 0x3f */
547 {0, 0, 1, 0, /* 0x40 */
554 {1, 0, 2, 0, /* 0x41 */
561 {0, 0, 2, 0, /* 0x42 */
568 {1, 0, 2, 0, /* 0x43 */
575 {0, 0, 2, 0, /* 0x44 */
582 {1, 0, 3, 0, /* 0x45 */
589 {0, 0, 2, 0, /* 0x46 */
596 {1, 0, 2, 0, /* 0x47 */
603 {0, 0, 2, 0, /* 0x48 */
610 {1, 0, 3, 0, /* 0x49 */
617 {0, 0, 3, 0, /* 0x4a */
624 {1, 0, 3, 0, /* 0x4b */
631 {0, 0, 2, 0, /* 0x4c */
638 {1, 0, 3, 0, /* 0x4d */
645 {0, 0, 2, 0, /* 0x4e */
652 {1, 0, 2, 0, /* 0x4f */
659 {0, 0, 2, 0, /* 0x50 */
666 {1, 0, 3, 0, /* 0x51 */
673 {0, 0, 3, 0, /* 0x52 */
680 {1, 0, 3, 0, /* 0x53 */
687 {0, 0, 3, 0, /* 0x54 */
694 {1, 0, 4, 0, /* 0x55 */
701 {0, 0, 3, 0, /* 0x56 */
708 {1, 0, 3, 0, /* 0x57 */
715 {0, 0, 2, 0, /* 0x58 */
722 {1, 0, 3, 0, /* 0x59 */
729 {0, 0, 3, 0, /* 0x5a */
736 {1, 0, 3, 0, /* 0x5b */
743 {0, 0, 2, 0, /* 0x5c */
750 {1, 0, 3, 0, /* 0x5d */
757 {0, 0, 2, 0, /* 0x5e */
764 {1, 0, 2, 0, /* 0x5f */
771 {0, 0, 1, 0, /* 0x60 */
778 {1, 0, 2, 0, /* 0x61 */
785 {0, 0, 2, 0, /* 0x62 */
792 {1, 0, 2, 0, /* 0x63 */
799 {0, 0, 2, 0, /* 0x64 */
806 {1, 0, 3, 0, /* 0x65 */
813 {0, 0, 2, 0, /* 0x66 */
820 {1, 0, 2, 0, /* 0x67 */
827 {0, 0, 2, 0, /* 0x68 */
834 {1, 0, 3, 0, /* 0x69 */
841 {0, 0, 3, 0, /* 0x6a */
848 {1, 0, 3, 0, /* 0x6b */
855 {0, 0, 2, 0, /* 0x6c */
862 {1, 0, 3, 0, /* 0x6d */
869 {0, 0, 2, 0, /* 0x6e */
876 {1, 0, 2, 0, /* 0x6f */
883 {0, 0, 1, 0, /* 0x70 */
890 {1, 0, 2, 0, /* 0x71 */
897 {0, 0, 2, 0, /* 0x72 */
904 {1, 0, 2, 0, /* 0x73 */
911 {0, 0, 2, 0, /* 0x74 */
918 {1, 0, 3, 0, /* 0x75 */
925 {0, 0, 2, 0, /* 0x76 */
932 {1, 0, 2, 0, /* 0x77 */
939 {0, 0, 1, 0, /* 0x78 */
946 {1, 0, 2, 0, /* 0x79 */
953 {0, 0, 2, 0, /* 0x7a */
960 {1, 0, 2, 0, /* 0x7b */
967 {0, 0, 1, 0, /* 0x7c */
974 {1, 0, 2, 0, /* 0x7d */
981 {0, 0, 1, 0, /* 0x7e */
988 {1, 0, 1, 0, /* 0x7f */
995 {0, 1, 1, 0, /* 0x80 */
1002 {1, 1, 2, 0, /* 0x81 */
1009 {0, 1, 2, 0, /* 0x82 */
1016 {1, 1, 2, 0, /* 0x83 */
1023 {0, 1, 2, 0, /* 0x84 */
1030 {1, 1, 3, 0, /* 0x85 */
1037 {0, 1, 2, 0, /* 0x86 */
1044 {1, 1, 2, 0, /* 0x87 */
1051 {0, 1, 2, 0, /* 0x88 */
1058 {1, 1, 3, 0, /* 0x89 */
1065 {0, 1, 3, 0, /* 0x8a */
1072 {1, 1, 3, 0, /* 0x8b */
1079 {0, 1, 2, 0, /* 0x8c */
1086 {1, 1, 3, 0, /* 0x8d */
1093 {0, 1, 2, 0, /* 0x8e */
1100 {1, 1, 2, 0, /* 0x8f */
1107 {0, 1, 2, 0, /* 0x90 */
1114 {1, 1, 3, 0, /* 0x91 */
1121 {0, 1, 3, 0, /* 0x92 */
1128 {1, 1, 3, 0, /* 0x93 */
1135 {0, 1, 3, 0, /* 0x94 */
1142 {1, 1, 4, 0, /* 0x95 */
1149 {0, 1, 3, 0, /* 0x96 */
1156 {1, 1, 3, 0, /* 0x97 */
1163 {0, 1, 2, 0, /* 0x98 */
1170 {1, 1, 3, 0, /* 0x99 */
1177 {0, 1, 3, 0, /* 0x9a */
1184 {1, 1, 3, 0, /* 0x9b */
1191 {0, 1, 2, 0, /* 0x9c */
1198 {1, 1, 3, 0, /* 0x9d */
1205 {0, 1, 2, 0, /* 0x9e */
1212 {1, 1, 2, 0, /* 0x9f */
1219 {0, 1, 2, 0, /* 0xa0 */
1226 {1, 1, 3, 0, /* 0xa1 */
1233 {0, 1, 3, 0, /* 0xa2 */
1240 {1, 1, 3, 0, /* 0xa3 */
1247 {0, 1, 3, 0, /* 0xa4 */
1254 {1, 1, 4, 0, /* 0xa5 */
1261 {0, 1, 3, 0, /* 0xa6 */
1268 {1, 1, 3, 0, /* 0xa7 */
1275 {0, 1, 3, 0, /* 0xa8 */
1282 {1, 1, 4, 0, /* 0xa9 */
1289 {0, 1, 4, 0, /* 0xaa */
1296 {1, 1, 4, 0, /* 0xab */
1303 {0, 1, 3, 0, /* 0xac */
1310 {1, 1, 4, 0, /* 0xad */
1317 {0, 1, 3, 0, /* 0xae */
1324 {1, 1, 3, 0, /* 0xaf */
1331 {0, 1, 2, 0, /* 0xb0 */
1338 {1, 1, 3, 0, /* 0xb1 */
1345 {0, 1, 3, 0, /* 0xb2 */
1352 {1, 1, 3, 0, /* 0xb3 */
1359 {0, 1, 3, 0, /* 0xb4 */
1366 {1, 1, 4, 0, /* 0xb5 */
1373 {0, 1, 3, 0, /* 0xb6 */
1380 {1, 1, 3, 0, /* 0xb7 */
1387 {0, 1, 2, 0, /* 0xb8 */
1394 {1, 1, 3, 0, /* 0xb9 */
1401 {0, 1, 3, 0, /* 0xba */
1408 {1, 1, 3, 0, /* 0xbb */
1415 {0, 1, 2, 0, /* 0xbc */
1422 {1, 1, 3, 0, /* 0xbd */
1429 {0, 1, 2, 0, /* 0xbe */
1436 {1, 1, 2, 0, /* 0xbf */
1443 {0, 1, 1, 0, /* 0xc0 */
1450 {1, 1, 2, 0, /* 0xc1 */
1457 {0, 1, 2, 0, /* 0xc2 */
1464 {1, 1, 2, 0, /* 0xc3 */
1471 {0, 1, 2, 0, /* 0xc4 */
1478 {1, 1, 3, 0, /* 0xc5 */
1485 {0, 1, 2, 0, /* 0xc6 */
1492 {1, 1, 2, 0, /* 0xc7 */
1499 {0, 1, 2, 0, /* 0xc8 */
1506 {1, 1, 3, 0, /* 0xc9 */
1513 {0, 1, 3, 0, /* 0xca */
1520 {1, 1, 3, 0, /* 0xcb */
1527 {0, 1, 2, 0, /* 0xcc */
1534 {1, 1, 3, 0, /* 0xcd */
1541 {0, 1, 2, 0, /* 0xce */
1548 {1, 1, 2, 0, /* 0xcf */
1555 {0, 1, 2, 0, /* 0xd0 */
1562 {1, 1, 3, 0, /* 0xd1 */
1569 {0, 1, 3, 0, /* 0xd2 */
1576 {1, 1, 3, 0, /* 0xd3 */
1583 {0, 1, 3, 0, /* 0xd4 */
1590 {1, 1, 4, 0, /* 0xd5 */
1597 {0, 1, 3, 0, /* 0xd6 */
1604 {1, 1, 3, 0, /* 0xd7 */
1611 {0, 1, 2, 0, /* 0xd8 */
1618 {1, 1, 3, 0, /* 0xd9 */
1625 {0, 1, 3, 0, /* 0xda */
1632 {1, 1, 3, 0, /* 0xdb */
1639 {0, 1, 2, 0, /* 0xdc */
1646 {1, 1, 3, 0, /* 0xdd */
1653 {0, 1, 2, 0, /* 0xde */
1660 {1, 1, 2, 0, /* 0xdf */
1667 {0, 1, 1, 0, /* 0xe0 */
1674 {1, 1, 2, 0, /* 0xe1 */
1681 {0, 1, 2, 0, /* 0xe2 */
1688 {1, 1, 2, 0, /* 0xe3 */
1695 {0, 1, 2, 0, /* 0xe4 */
1702 {1, 1, 3, 0, /* 0xe5 */
1709 {0, 1, 2, 0, /* 0xe6 */
1716 {1, 1, 2, 0, /* 0xe7 */
1723 {0, 1, 2, 0, /* 0xe8 */
1730 {1, 1, 3, 0, /* 0xe9 */
1737 {0, 1, 3, 0, /* 0xea */
1744 {1, 1, 3, 0, /* 0xeb */
1751 {0, 1, 2, 0, /* 0xec */
1758 {1, 1, 3, 0, /* 0xed */
1765 {0, 1, 2, 0, /* 0xee */
1772 {1, 1, 2, 0, /* 0xef */
1779 {0, 1, 1, 0, /* 0xf0 */
1786 {1, 1, 2, 0, /* 0xf1 */
1793 {0, 1, 2, 0, /* 0xf2 */
1800 {1, 1, 2, 0, /* 0xf3 */
1807 {0, 1, 2, 0, /* 0xf4 */
1814 {1, 1, 3, 0, /* 0xf5 */
1821 {0, 1, 2, 0, /* 0xf6 */
1828 {1, 1, 2, 0, /* 0xf7 */
1835 {0, 1, 1, 0, /* 0xf8 */
1842 {1, 1, 2, 0, /* 0xf9 */
1849 {0, 1, 2, 0, /* 0xfa */
1856 {1, 1, 2, 0, /* 0xfb */
1863 {0, 1, 1, 0, /* 0xfc */
1870 {1, 1, 2, 0, /* 0xfd */
1877 {0, 1, 1, 0, /* 0xfe */
1884 {1, 1, 1, 0, /* 0xff */
1895 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1896 struct sctp_scoping *scope,
1899 if ((scope->loopback_scope == 0) &&
1900 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1902 * skip loopback if not in scope *
1906 switch (ifa->address.sa.sa_family) {
1909 if (scope->ipv4_addr_legal) {
1910 struct sockaddr_in *sin;
1912 sin = &ifa->address.sin;
1913 if (sin->sin_addr.s_addr == 0) {
1914 /* not in scope , unspecified */
1917 if ((scope->ipv4_local_scope == 0) &&
1918 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1919 /* private address not in scope */
1929 if (scope->ipv6_addr_legal) {
1930 struct sockaddr_in6 *sin6;
1932 /* Must update the flags, bummer, which
1933 * means any IFA locks must now be applied HERE <->
1936 sctp_gather_internal_ifa_flags(ifa);
1938 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1941 /* ok to use deprecated addresses? */
1942 sin6 = &ifa->address.sin6;
1943 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1944 /* skip unspecifed addresses */
1947 if ( /* (local_scope == 0) && */
1948 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1951 if ((scope->site_scope == 0) &&
1952 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1960 #if defined(__Userspace__)
1962 if (!scope->conn_addr_legal) {
1973 static struct mbuf *
1974 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1976 #if defined(INET) || defined(INET6)
1977 struct sctp_paramhdr *paramh;
1982 switch (ifa->address.sa.sa_family) {
1985 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1990 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1996 #if defined(INET) || defined(INET6)
1997 if (M_TRAILINGSPACE(m) >= plen) {
1998 /* easy side we just drop it on the end */
1999 paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
2002 /* Need more space */
2004 while (SCTP_BUF_NEXT(mret) != NULL) {
2005 mret = SCTP_BUF_NEXT(mret);
2007 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2008 if (SCTP_BUF_NEXT(mret) == NULL) {
2009 /* We are hosed, can't add more addresses */
2012 mret = SCTP_BUF_NEXT(mret);
2013 paramh = mtod(mret, struct sctp_paramhdr *);
2015 /* now add the parameter */
2016 switch (ifa->address.sa.sa_family) {
2020 struct sctp_ipv4addr_param *ipv4p;
2021 struct sockaddr_in *sin;
2023 sin = &ifa->address.sin;
2024 ipv4p = (struct sctp_ipv4addr_param *)paramh;
2025 paramh->param_type = htons(SCTP_IPV4_ADDRESS);
2026 paramh->param_length = htons(plen);
2027 ipv4p->addr = sin->sin_addr.s_addr;
2028 SCTP_BUF_LEN(mret) += plen;
2035 struct sctp_ipv6addr_param *ipv6p;
2036 struct sockaddr_in6 *sin6;
2038 sin6 = &ifa->address.sin6;
2039 ipv6p = (struct sctp_ipv6addr_param *)paramh;
2040 paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2041 paramh->param_length = htons(plen);
2042 memcpy(ipv6p->addr, &sin6->sin6_addr,
2043 sizeof(ipv6p->addr));
2044 #if defined(SCTP_EMBEDDED_V6_SCOPE)
2045 /* clear embedded scope in the address */
2046 in6_clearscope((struct in6_addr *)ipv6p->addr);
2048 SCTP_BUF_LEN(mret) += plen;
2064 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2065 struct sctp_scoping *scope,
2066 struct mbuf *m_at, int cnt_inits_to,
2067 uint16_t *padding_len, uint16_t *chunk_len)
2069 struct sctp_vrf *vrf = NULL;
2070 int cnt, limit_out = 0, total_count;
2073 vrf_id = inp->def_vrf_id;
2074 SCTP_IPI_ADDR_RLOCK();
2075 vrf = sctp_find_vrf(vrf_id);
2077 SCTP_IPI_ADDR_RUNLOCK();
2080 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2081 struct sctp_ifa *sctp_ifap;
2082 struct sctp_ifn *sctp_ifnp;
2085 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2087 cnt = SCTP_ADDRESS_LIMIT;
2090 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2091 if ((scope->loopback_scope == 0) &&
2092 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2094 * Skip loopback devices if loopback_scope
2099 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2100 #if defined(__FreeBSD__) && !defined(__Userspace__)
2102 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2103 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2104 &sctp_ifap->address.sin.sin_addr) != 0)) {
2109 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2110 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2111 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2116 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2119 #if defined(__Userspace__)
2120 if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2124 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2128 if (cnt > SCTP_ADDRESS_LIMIT) {
2132 if (cnt > SCTP_ADDRESS_LIMIT) {
2139 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2141 if ((scope->loopback_scope == 0) &&
2142 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2144 * Skip loopback devices if
2145 * loopback_scope not set
2149 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2150 #if defined(__FreeBSD__) && !defined(__Userspace__)
2152 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2153 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2154 &sctp_ifap->address.sin.sin_addr) != 0)) {
2159 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2160 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2161 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2166 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2169 #if defined(__Userspace__)
2170 if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2174 if (sctp_is_address_in_scope(sctp_ifap,
2178 if ((chunk_len != NULL) &&
2179 (padding_len != NULL) &&
2180 (*padding_len > 0)) {
2181 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2182 SCTP_BUF_LEN(m_at) += *padding_len;
2183 *chunk_len += *padding_len;
2186 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2191 /* two from each address */
2194 if (total_count > SCTP_ADDRESS_LIMIT) {
2195 /* No more addresses */
2203 struct sctp_laddr *laddr;
2206 /* First, how many ? */
2207 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2208 if (laddr->ifa == NULL) {
2211 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2212 /* Address being deleted by the system, dont
2216 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2217 /* Address being deleted on this ep
2222 #if defined(__Userspace__)
2223 if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2227 if (sctp_is_address_in_scope(laddr->ifa,
2234 * To get through a NAT we only list addresses if we have
2235 * more than one. That way if you just bind a single address
2236 * we let the source of the init dictate our address.
2240 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2241 if (laddr->ifa == NULL) {
2244 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2247 #if defined(__Userspace__)
2248 if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2252 if (sctp_is_address_in_scope(laddr->ifa,
2256 if ((chunk_len != NULL) &&
2257 (padding_len != NULL) &&
2258 (*padding_len > 0)) {
2259 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2260 SCTP_BUF_LEN(m_at) += *padding_len;
2261 *chunk_len += *padding_len;
2264 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2266 if (cnt >= SCTP_ADDRESS_LIMIT) {
2272 SCTP_IPI_ADDR_RUNLOCK();
2276 static struct sctp_ifa *
2277 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2278 uint8_t dest_is_loop,
2279 uint8_t dest_is_priv,
2282 uint8_t dest_is_global = 0;
2283 /* dest_is_priv is true if destination is a private address */
2284 /* dest_is_loop is true if destination is a loopback addresses */
2287 * Here we determine if its a preferred address. A preferred address
2288 * means it is the same scope or higher scope then the destination.
2289 * L = loopback, P = private, G = global
2290 * -----------------------------------------
2291 * src | dest | result
2292 * ----------------------------------------
2294 * -----------------------------------------
2295 * P | L | yes-v4 no-v6
2296 * -----------------------------------------
2297 * G | L | yes-v4 no-v6
2298 * -----------------------------------------
2300 * -----------------------------------------
2302 * -----------------------------------------
2304 * -----------------------------------------
2306 * -----------------------------------------
2308 * -----------------------------------------
2310 * -----------------------------------------
2313 if (ifa->address.sa.sa_family != fam) {
2314 /* forget mis-matched family */
2317 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2320 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2321 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2322 /* Ok the address may be ok */
2324 if (fam == AF_INET6) {
2325 /* ok to use deprecated addresses? no lets not! */
2326 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2327 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2330 if (ifa->src_is_priv && !ifa->src_is_loop) {
2332 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2336 if (ifa->src_is_glob) {
2338 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2344 /* Now that we know what is what, implement or table
2345 * this could in theory be done slicker (it used to be), but this
2346 * is straightforward and easier to validate :-)
2348 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2349 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2350 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2351 dest_is_loop, dest_is_priv, dest_is_global);
2353 if ((ifa->src_is_loop) && (dest_is_priv)) {
2354 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2357 if ((ifa->src_is_glob) && (dest_is_priv)) {
2358 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2361 if ((ifa->src_is_loop) && (dest_is_global)) {
2362 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2365 if ((ifa->src_is_priv) && (dest_is_global)) {
2366 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2369 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2370 /* its a preferred address */
2374 static struct sctp_ifa *
2375 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2376 uint8_t dest_is_loop,
2377 uint8_t dest_is_priv,
2380 uint8_t dest_is_global = 0;
2383 * Here we determine if its a acceptable address. A acceptable
2384 * address means it is the same scope or higher scope but we can
2385 * allow for NAT which means its ok to have a global dest and a
2388 * L = loopback, P = private, G = global
2389 * -----------------------------------------
2390 * src | dest | result
2391 * -----------------------------------------
2393 * -----------------------------------------
2394 * P | L | yes-v4 no-v6
2395 * -----------------------------------------
2397 * -----------------------------------------
2399 * -----------------------------------------
2401 * -----------------------------------------
2402 * G | P | yes - May not work
2403 * -----------------------------------------
2405 * -----------------------------------------
2406 * P | G | yes - May not work
2407 * -----------------------------------------
2409 * -----------------------------------------
2412 if (ifa->address.sa.sa_family != fam) {
2413 /* forget non matching family */
2414 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2415 ifa->address.sa.sa_family, fam);
2418 /* Ok the address may be ok */
2419 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2420 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2421 dest_is_loop, dest_is_priv);
2422 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2426 if (fam == AF_INET6) {
2427 /* ok to use deprecated addresses? */
2428 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2431 if (ifa->src_is_priv) {
2432 /* Special case, linklocal to loop */
2439 * Now that we know what is what, implement our table.
2440 * This could in theory be done slicker (it used to be), but this
2441 * is straightforward and easier to validate :-)
2443 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2446 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2449 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2452 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2455 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2456 /* its an acceptable address */
2461 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2463 struct sctp_laddr *laddr;
2466 /* There are no restrictions, no TCB :-) */
2469 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2470 if (laddr->ifa == NULL) {
2471 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2475 if (laddr->ifa == ifa) {
2476 /* Yes it is on the list */
2485 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2487 struct sctp_laddr *laddr;
2491 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2492 if (laddr->ifa == NULL) {
2493 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2497 if ((laddr->ifa == ifa) && laddr->action == 0)
2506 static struct sctp_ifa *
2507 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2510 int non_asoc_addr_ok,
2511 uint8_t dest_is_priv,
2512 uint8_t dest_is_loop,
2515 struct sctp_laddr *laddr, *starting_point;
2518 struct sctp_ifn *sctp_ifn;
2519 struct sctp_ifa *sctp_ifa, *sifa;
2520 struct sctp_vrf *vrf;
2523 vrf = sctp_find_vrf(vrf_id);
2527 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2528 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2529 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2531 * first question, is the ifn we will emit on in our list, if so, we
2532 * want such an address. Note that we first looked for a
2533 * preferred address.
2536 /* is a preferred one on the interface we route out? */
2537 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2538 #if defined(__FreeBSD__) && !defined(__Userspace__)
2540 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2541 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2542 &sctp_ifa->address.sin.sin_addr) != 0)) {
2547 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2548 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2549 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2554 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2555 (non_asoc_addr_ok == 0))
2557 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2562 if (sctp_is_addr_in_ep(inp, sifa)) {
2563 atomic_add_int(&sifa->refcount, 1);
2569 * ok, now we now need to find one on the list of the addresses.
2570 * We can't get one on the emitting interface so let's find first
2571 * a preferred one. If not that an acceptable one otherwise...
2574 starting_point = inp->next_addr_touse;
2576 if (inp->next_addr_touse == NULL) {
2577 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2580 for (laddr = inp->next_addr_touse; laddr;
2581 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2582 if (laddr->ifa == NULL) {
2583 /* address has been removed */
2586 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2587 /* address is being deleted */
2590 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2594 atomic_add_int(&sifa->refcount, 1);
2597 if (resettotop == 0) {
2598 inp->next_addr_touse = NULL;
2602 inp->next_addr_touse = starting_point;
2605 if (inp->next_addr_touse == NULL) {
2606 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2610 /* ok, what about an acceptable address in the inp */
2611 for (laddr = inp->next_addr_touse; laddr;
2612 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2613 if (laddr->ifa == NULL) {
2614 /* address has been removed */
2617 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2618 /* address is being deleted */
2621 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2625 atomic_add_int(&sifa->refcount, 1);
2628 if (resettotop == 0) {
2629 inp->next_addr_touse = NULL;
2630 goto once_again_too;
2634 * no address bound can be a source for the destination we are in
2642 static struct sctp_ifa *
2643 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2644 struct sctp_tcb *stcb,
2647 uint8_t dest_is_priv,
2648 uint8_t dest_is_loop,
2649 int non_asoc_addr_ok,
2652 struct sctp_laddr *laddr, *starting_point;
2654 struct sctp_ifn *sctp_ifn;
2655 struct sctp_ifa *sctp_ifa, *sifa;
2656 uint8_t start_at_beginning = 0;
2657 struct sctp_vrf *vrf;
2661 * first question, is the ifn we will emit on in our list, if so, we
2664 vrf = sctp_find_vrf(vrf_id);
2668 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2669 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2670 sctp_ifn = sctp_find_ifn( ifn, ifn_index);
2673 * first question, is the ifn we will emit on in our list? If so,
2674 * we want that one. First we look for a preferred. Second, we go
2675 * for an acceptable.
2678 /* first try for a preferred address on the ep */
2679 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2680 #if defined(__FreeBSD__) && !defined(__Userspace__)
2682 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2683 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2684 &sctp_ifa->address.sin.sin_addr) != 0)) {
2689 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2690 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2691 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2696 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2698 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2699 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2702 if (((non_asoc_addr_ok == 0) &&
2703 (sctp_is_addr_restricted(stcb, sifa))) ||
2704 (non_asoc_addr_ok &&
2705 (sctp_is_addr_restricted(stcb, sifa)) &&
2706 (!sctp_is_addr_pending(stcb, sifa)))) {
2707 /* on the no-no list */
2710 atomic_add_int(&sifa->refcount, 1);
2714 /* next try for an acceptable address on the ep */
2715 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2716 #if defined(__FreeBSD__) && !defined(__Userspace__)
2718 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2719 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2720 &sctp_ifa->address.sin.sin_addr) != 0)) {
2725 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2726 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2727 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2732 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2734 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2735 sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2738 if (((non_asoc_addr_ok == 0) &&
2739 (sctp_is_addr_restricted(stcb, sifa))) ||
2740 (non_asoc_addr_ok &&
2741 (sctp_is_addr_restricted(stcb, sifa)) &&
2742 (!sctp_is_addr_pending(stcb, sifa)))) {
2743 /* on the no-no list */
2746 atomic_add_int(&sifa->refcount, 1);
2753 * if we can't find one like that then we must look at all
2754 * addresses bound to pick one at first preferable then
2755 * secondly acceptable.
2757 starting_point = stcb->asoc.last_used_address;
2759 if (stcb->asoc.last_used_address == NULL) {
2760 start_at_beginning = 1;
2761 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2763 /* search beginning with the last used address */
2764 for (laddr = stcb->asoc.last_used_address; laddr;
2765 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2766 if (laddr->ifa == NULL) {
2767 /* address has been removed */
2770 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2771 /* address is being deleted */
2774 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2777 if (((non_asoc_addr_ok == 0) &&
2778 (sctp_is_addr_restricted(stcb, sifa))) ||
2779 (non_asoc_addr_ok &&
2780 (sctp_is_addr_restricted(stcb, sifa)) &&
2781 (!sctp_is_addr_pending(stcb, sifa)))) {
2782 /* on the no-no list */
2785 stcb->asoc.last_used_address = laddr;
2786 atomic_add_int(&sifa->refcount, 1);
2789 if (start_at_beginning == 0) {
2790 stcb->asoc.last_used_address = NULL;
2791 goto sctp_from_the_top;
2793 /* now try for any higher scope than the destination */
2794 stcb->asoc.last_used_address = starting_point;
2795 start_at_beginning = 0;
2797 if (stcb->asoc.last_used_address == NULL) {
2798 start_at_beginning = 1;
2799 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2801 /* search beginning with the last used address */
2802 for (laddr = stcb->asoc.last_used_address; laddr;
2803 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2804 if (laddr->ifa == NULL) {
2805 /* address has been removed */
2808 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2809 /* address is being deleted */
2812 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2816 if (((non_asoc_addr_ok == 0) &&
2817 (sctp_is_addr_restricted(stcb, sifa))) ||
2818 (non_asoc_addr_ok &&
2819 (sctp_is_addr_restricted(stcb, sifa)) &&
2820 (!sctp_is_addr_pending(stcb, sifa)))) {
2821 /* on the no-no list */
2824 stcb->asoc.last_used_address = laddr;
2825 atomic_add_int(&sifa->refcount, 1);
2828 if (start_at_beginning == 0) {
2829 stcb->asoc.last_used_address = NULL;
2830 goto sctp_from_the_top2;
2835 static struct sctp_ifa *
2836 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2837 #if defined(__FreeBSD__) && !defined(__Userspace__)
2838 struct sctp_inpcb *inp,
2840 struct sctp_inpcb *inp SCTP_UNUSED,
2842 struct sctp_tcb *stcb,
2843 int non_asoc_addr_ok,
2844 uint8_t dest_is_loop,
2845 uint8_t dest_is_priv,
2851 struct sctp_ifa *ifa, *sifa;
2852 int num_eligible_addr = 0;
2854 #ifdef SCTP_EMBEDDED_V6_SCOPE
2855 struct sockaddr_in6 sin6, lsa6;
2857 if (fam == AF_INET6) {
2858 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2860 (void)sa6_recoverscope(&sin6);
2862 (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2863 #endif /* SCTP_KAME */
2865 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2867 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2868 #if defined(__FreeBSD__) && !defined(__Userspace__)
2870 if ((ifa->address.sa.sa_family == AF_INET) &&
2871 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2872 &ifa->address.sin.sin_addr) != 0)) {
2877 if ((ifa->address.sa.sa_family == AF_INET6) &&
2878 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2879 &ifa->address.sin6.sin6_addr) != 0)) {
2884 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2885 (non_asoc_addr_ok == 0))
2887 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2892 if (fam == AF_INET6 &&
2894 sifa->src_is_loop && sifa->src_is_priv) {
2895 /* don't allow fe80::1 to be a src on loop ::1, we don't list it
2896 * to the peer so we will get an abort.
2900 #ifdef SCTP_EMBEDDED_V6_SCOPE
2901 if (fam == AF_INET6 &&
2902 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2903 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2904 /* link-local <-> link-local must belong to the same scope. */
2905 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2907 (void)sa6_recoverscope(&lsa6);
2909 (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2910 #endif /* SCTP_KAME */
2911 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2915 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2918 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2919 /* Check if the IPv6 address matches to next-hop.
2920 In the mobile case, old IPv6 address may be not deleted
2921 from the interface. Then, the interface has previous and
2922 new addresses. We should use one corresponding to the
2923 next-hop. (by micchie)
2926 if (stcb && fam == AF_INET6 &&
2927 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2928 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2935 /* Avoid topologically incorrect IPv4 address */
2936 if (stcb && fam == AF_INET &&
2937 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2938 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2945 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2948 if (((non_asoc_addr_ok == 0) &&
2949 (sctp_is_addr_restricted(stcb, sifa))) ||
2950 (non_asoc_addr_ok &&
2951 (sctp_is_addr_restricted(stcb, sifa)) &&
2952 (!sctp_is_addr_pending(stcb, sifa)))) {
2954 * It is restricted for some reason..
2955 * probably not yet added.
2960 if (num_eligible_addr >= addr_wanted) {
2963 num_eligible_addr++;
2970 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2971 #if defined(__FreeBSD__) && !defined(__Userspace__)
2972 struct sctp_inpcb *inp,
2974 struct sctp_inpcb *inp SCTP_UNUSED,
2976 struct sctp_tcb *stcb,
2977 int non_asoc_addr_ok,
2978 uint8_t dest_is_loop,
2979 uint8_t dest_is_priv,
2982 struct sctp_ifa *ifa, *sifa;
2983 int num_eligible_addr = 0;
2985 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2986 #if defined(__FreeBSD__) && !defined(__Userspace__)
2988 if ((ifa->address.sa.sa_family == AF_INET) &&
2989 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2990 &ifa->address.sin.sin_addr) != 0)) {
2995 if ((ifa->address.sa.sa_family == AF_INET6) &&
2997 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2998 &ifa->address.sin6.sin6_addr) != 0)) {
3003 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3004 (non_asoc_addr_ok == 0)) {
3007 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
3013 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
3016 if (((non_asoc_addr_ok == 0) &&
3017 (sctp_is_addr_restricted(stcb, sifa))) ||
3018 (non_asoc_addr_ok &&
3019 (sctp_is_addr_restricted(stcb, sifa)) &&
3020 (!sctp_is_addr_pending(stcb, sifa)))) {
3022 * It is restricted for some reason..
3023 * probably not yet added.
3028 num_eligible_addr++;
3030 return (num_eligible_addr);
3033 static struct sctp_ifa *
3034 sctp_choose_boundall(struct sctp_inpcb *inp,
3035 struct sctp_tcb *stcb,
3036 struct sctp_nets *net,
3039 uint8_t dest_is_priv,
3040 uint8_t dest_is_loop,
3041 int non_asoc_addr_ok,
3044 int cur_addr_num = 0, num_preferred = 0;
3046 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
3047 struct sctp_ifa *sctp_ifa, *sifa;
3049 struct sctp_vrf *vrf;
3055 * For boundall we can use any address in the association.
3056 * If non_asoc_addr_ok is set we can use any address (at least in
3057 * theory). So we look for preferred addresses first. If we find one,
3058 * we use it. Otherwise we next try to get an address on the
3059 * interface, which we should be able to do (unless non_asoc_addr_ok
3060 * is false and we are routed out that way). In these cases where we
3061 * can't use the address of the interface we go through all the
3062 * ifn's looking for an address we can use and fill that in. Punting
3063 * means we send back address 0, which will probably cause problems
3064 * actually since then IP will fill in the address of the route ifn,
3065 * which means we probably already rejected it.. i.e. here comes an
3068 vrf = sctp_find_vrf(vrf_id);
3072 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3073 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
3074 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
3075 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
3076 if (sctp_ifn == NULL) {
3077 /* ?? We don't have this guy ?? */
3078 SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
3079 goto bound_all_plan_b;
3081 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
3082 ifn_index, sctp_ifn->ifn_name);
3085 cur_addr_num = net->indx_of_eligible_next_to_use;
3087 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3092 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3093 num_preferred, sctp_ifn->ifn_name);
3094 if (num_preferred == 0) {
3096 * no eligible addresses, we must use some other interface
3097 * address if we can find one.
3099 goto bound_all_plan_b;
3102 * Ok we have num_eligible_addr set with how many we can use, this
3103 * may vary from call to call due to addresses being deprecated
3106 if (cur_addr_num >= num_preferred) {
3110 * select the nth address from the list (where cur_addr_num is the
3111 * nth) and 0 is the first one, 1 is the second one etc...
3113 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3115 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3116 dest_is_priv, cur_addr_num, fam, ro);
3118 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3120 atomic_add_int(&sctp_ifa->refcount, 1);
3122 /* save off where the next one we will want */
3123 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3128 * plan_b: Look at all interfaces and find a preferred address. If
3129 * no preferred fall through to plan_c.
3132 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3133 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3134 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3135 sctp_ifn->ifn_name);
3136 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3137 /* wrong base scope */
3138 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3141 if ((sctp_ifn == looked_at) && looked_at) {
3142 /* already looked at this guy */
3143 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3146 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3147 dest_is_loop, dest_is_priv, fam);
3148 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3149 "Found ifn:%p %d preferred source addresses\n",
3150 ifn, num_preferred);
3151 if (num_preferred == 0) {
3152 /* None on this interface. */
3153 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3156 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3157 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3158 num_preferred, (void *)sctp_ifn, cur_addr_num);
3161 * Ok we have num_eligible_addr set with how many we can
3162 * use, this may vary from call to call due to addresses
3163 * being deprecated etc..
3165 if (cur_addr_num >= num_preferred) {
3168 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3169 dest_is_priv, cur_addr_num, fam, ro);
3173 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3174 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3176 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3177 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3178 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3179 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3181 atomic_add_int(&sifa->refcount, 1);
3185 again_with_private_addresses_allowed:
3187 /* plan_c: do we have an acceptable address on the emit interface */
3189 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3190 if (emit_ifn == NULL) {
3191 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3194 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3195 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3196 #if defined(__FreeBSD__) && !defined(__Userspace__)
3198 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3199 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3200 &sctp_ifa->address.sin.sin_addr) != 0)) {
3201 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3206 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3207 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3208 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3209 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3214 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3215 (non_asoc_addr_ok == 0)) {
3216 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3219 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3222 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3226 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3227 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3231 if (((non_asoc_addr_ok == 0) &&
3232 (sctp_is_addr_restricted(stcb, sifa))) ||
3233 (non_asoc_addr_ok &&
3234 (sctp_is_addr_restricted(stcb, sifa)) &&
3235 (!sctp_is_addr_pending(stcb, sifa)))) {
3237 * It is restricted for some
3238 * reason.. probably not yet added.
3240 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3245 atomic_add_int(&sifa->refcount, 1);
3250 * plan_d: We are in trouble. No preferred address on the emit
3251 * interface. And not even a preferred address on all interfaces.
3252 * Go out and see if we can find an acceptable address somewhere
3253 * amongst all interfaces.
3255 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3256 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3257 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3258 /* wrong base scope */
3261 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3262 #if defined(__FreeBSD__) && !defined(__Userspace__)
3264 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3265 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3266 &sctp_ifa->address.sin.sin_addr) != 0)) {
3271 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3272 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3273 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3278 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3279 (non_asoc_addr_ok == 0))
3281 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3287 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3291 if (((non_asoc_addr_ok == 0) &&
3292 (sctp_is_addr_restricted(stcb, sifa))) ||
3293 (non_asoc_addr_ok &&
3294 (sctp_is_addr_restricted(stcb, sifa)) &&
3295 (!sctp_is_addr_pending(stcb, sifa)))) {
3297 * It is restricted for some
3298 * reason.. probably not yet added.
3309 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3310 stcb->asoc.scope.ipv4_local_scope = 1;
3312 goto again_with_private_addresses_allowed;
3313 } else if (retried == 1) {
3314 stcb->asoc.scope.ipv4_local_scope = 0;
3322 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3323 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3324 /* wrong base scope */
3327 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3328 struct sctp_ifa *tmp_sifa;
3330 #if defined(__FreeBSD__) && !defined(__Userspace__)
3332 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3333 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3334 &sctp_ifa->address.sin.sin_addr) != 0)) {
3339 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3340 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3341 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3346 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3347 (non_asoc_addr_ok == 0))
3349 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3352 if (tmp_sifa == NULL) {
3355 if (tmp_sifa == sifa) {
3359 if (sctp_is_address_in_scope(tmp_sifa,
3360 &stcb->asoc.scope, 0) == 0) {
3363 if (((non_asoc_addr_ok == 0) &&
3364 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3365 (non_asoc_addr_ok &&
3366 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3367 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3369 * It is restricted for some
3370 * reason.. probably not yet added.
3375 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3376 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3377 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3382 atomic_add_int(&sifa->refcount, 1);
3390 /* tcb may be NULL */
3392 sctp_source_address_selection(struct sctp_inpcb *inp,
3393 struct sctp_tcb *stcb,
3395 struct sctp_nets *net,
3396 int non_asoc_addr_ok, uint32_t vrf_id)
3398 struct sctp_ifa *answer;
3399 uint8_t dest_is_priv, dest_is_loop;
3402 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3405 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3410 * - Find the route if needed, cache if I can.
3411 * - Look at interface address in route, Is it in the bound list. If so we
3412 * have the best source.
3413 * - If not we must rotate amongst the addresses.
3417 * Do we need to pay attention to scope. We can have a private address
3418 * or a global address we are sourcing or sending to. So if we draw
3420 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3422 * ------------------------------------------
3423 * source * dest * result
3424 * -----------------------------------------
3425 * <a> Private * Global * NAT
3426 * -----------------------------------------
3427 * <b> Private * Private * No problem
3428 * -----------------------------------------
3429 * <c> Global * Private * Huh, How will this work?
3430 * -----------------------------------------
3431 * <d> Global * Global * No Problem
3432 *------------------------------------------
3433 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3435 *------------------------------------------
3436 * source * dest * result
3437 * -----------------------------------------
3438 * <a> Linklocal * Global *
3439 * -----------------------------------------
3440 * <b> Linklocal * Linklocal * No problem
3441 * -----------------------------------------
3442 * <c> Global * Linklocal * Huh, How will this work?
3443 * -----------------------------------------
3444 * <d> Global * Global * No Problem
3445 *------------------------------------------
3446 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3448 * And then we add to that what happens if there are multiple addresses
3449 * assigned to an interface. Remember the ifa on a ifn is a linked
3450 * list of addresses. So one interface can have more than one IP
3451 * address. What happens if we have both a private and a global
3452 * address? Do we then use context of destination to sort out which
3453 * one is best? And what about NAT's sending P->G may get you a NAT
3454 * translation, or should you select the G thats on the interface in
3459 * - count the number of addresses on the interface.
3460 * - if it is one, no problem except case <c>.
3461 * For <a> we will assume a NAT out there.
3462 * - if there are more than one, then we need to worry about scope P
3463 * or G. We should prefer G -> G and P -> P if possible.
3464 * Then as a secondary fall back to mixed types G->P being a last
3466 * - The above all works for bound all, but bound specific we need to
3467 * use the same concept but instead only consider the bound
3468 * addresses. If the bound set is NOT assigned to the interface then
3469 * we must use rotation amongst the bound addresses..
3471 #if defined(__FreeBSD__) && !defined(__Userspace__)
3472 if (ro->ro_nh == NULL) {
3474 if (ro->ro_rt == NULL) {
3477 * Need a route to cache.
3479 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3481 #if defined(__FreeBSD__) && !defined(__Userspace__)
3482 if (ro->ro_nh == NULL) {
3484 if (ro->ro_rt == NULL) {
3489 /* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
3490 fam = (sa_family_t)ro->ro_dst.sa_family;
3492 fam = ro->ro_dst.sa_family;
3494 dest_is_priv = dest_is_loop = 0;
3495 /* Setup our scopes for the destination */
3499 /* Scope based on outbound address */
3500 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3503 /* mark it as local */
3504 net->addr_is_local = 1;
3506 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3513 /* Scope based on outbound address */
3515 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3517 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3518 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3521 * If the address is a loopback address, which
3522 * consists of "::1" OR "fe80::1%lo0", we are loopback
3523 * scope. But we don't use dest_is_priv (link local
3528 /* mark it as local */
3529 net->addr_is_local = 1;
3531 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3537 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3538 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3539 SCTP_IPI_ADDR_RLOCK();
3540 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3544 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3545 dest_is_priv, dest_is_loop,
3546 non_asoc_addr_ok, fam);
3547 SCTP_IPI_ADDR_RUNLOCK();
3554 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3555 vrf_id, dest_is_priv,
3557 non_asoc_addr_ok, fam);
3559 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3564 SCTP_IPI_ADDR_RUNLOCK();
3569 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3576 struct sctp_sndinfo sndinfo;
3577 struct sctp_prinfo prinfo;
3578 struct sctp_authinfo authinfo;
3579 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3583 * Independent of how many mbufs, find the c_type inside the control
3584 * structure and copy out the data.
3587 tot_len = SCTP_BUF_LEN(control);
3588 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3589 rem_len = tot_len - off;
3590 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3591 /* There is not enough room for one more. */
3594 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3595 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3596 /* We dont't have a complete CMSG header. */
3599 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3600 /* We don't have the complete CMSG. */
3603 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3604 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3605 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3606 ((c_type == cmh.cmsg_type) ||
3607 ((c_type == SCTP_SNDRCV) &&
3608 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3609 (cmh.cmsg_type == SCTP_PRINFO) ||
3610 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3611 if (c_type == cmh.cmsg_type) {
3612 if (cpsize > INT_MAX) {
3615 if (cmsg_data_len < (int)cpsize) {
3618 /* It is exactly what we want. Copy it out. */
3619 m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3622 struct sctp_sndrcvinfo *sndrcvinfo;
3624 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3626 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3629 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3631 switch (cmh.cmsg_type) {
3633 if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3636 m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3637 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3638 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3639 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3640 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3641 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3644 if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3647 m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3648 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3649 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3651 sndrcvinfo->sinfo_timetolive = 0;
3653 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3656 if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3659 m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3660 sndrcvinfo->sinfo_keynumber_valid = 1;
3661 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3674 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3681 struct sctp_initmsg initmsg;
3683 struct sockaddr_in sin;
3686 struct sockaddr_in6 sin6;
3688 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3690 tot_len = SCTP_BUF_LEN(control);
3691 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3692 rem_len = tot_len - off;
3693 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3694 /* There is not enough room for one more. */
3698 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3699 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3700 /* We dont't have a complete CMSG header. */
3704 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3705 /* We don't have the complete CMSG. */
3709 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3710 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3711 if (cmh.cmsg_level == IPPROTO_SCTP) {
3712 switch (cmh.cmsg_type) {
3714 if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3718 m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3719 if (initmsg.sinit_max_attempts)
3720 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3721 if (initmsg.sinit_num_ostreams)
3722 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3723 if (initmsg.sinit_max_instreams)
3724 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3725 if (initmsg.sinit_max_init_timeo)
3726 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3727 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3728 struct sctp_stream_out *tmp_str;
3730 #if defined(SCTP_DETAILED_STR_STATS)
3734 /* Default is NOT correct */
3735 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3736 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3737 SCTP_TCB_UNLOCK(stcb);
3738 SCTP_MALLOC(tmp_str,
3739 struct sctp_stream_out *,
3740 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3742 SCTP_TCB_LOCK(stcb);
3743 if (tmp_str != NULL) {
3744 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3745 stcb->asoc.strmout = tmp_str;
3746 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3748 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3750 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3751 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3752 stcb->asoc.strmout[i].chunks_on_queues = 0;
3753 stcb->asoc.strmout[i].next_mid_ordered = 0;
3754 stcb->asoc.strmout[i].next_mid_unordered = 0;
3755 #if defined(SCTP_DETAILED_STR_STATS)
3756 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3757 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3758 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3761 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3762 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3764 stcb->asoc.strmout[i].sid = i;
3765 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3766 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3767 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3772 case SCTP_DSTADDRV4:
3773 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3777 memset(&sin, 0, sizeof(struct sockaddr_in));
3778 sin.sin_family = AF_INET;
3780 sin.sin_len = sizeof(struct sockaddr_in);
3782 sin.sin_port = stcb->rport;
3783 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3784 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3785 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3786 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3790 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3791 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3798 case SCTP_DSTADDRV6:
3799 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3803 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3804 sin6.sin6_family = AF_INET6;
3805 #ifdef HAVE_SIN6_LEN
3806 sin6.sin6_len = sizeof(struct sockaddr_in6);
3808 sin6.sin6_port = stcb->rport;
3809 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3810 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3811 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3816 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3817 in6_sin6_2_sin(&sin, &sin6);
3818 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3819 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3820 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3824 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3825 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3831 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3832 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3846 #if defined(INET) || defined(INET6)
3847 static struct sctp_tcb *
3848 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3850 struct mbuf *control,
3851 struct sctp_nets **net_p,
3859 struct sctp_tcb *stcb;
3860 struct sockaddr *addr;
3862 struct sockaddr_in sin;
3865 struct sockaddr_in6 sin6;
3867 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3869 tot_len = SCTP_BUF_LEN(control);
3870 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3871 rem_len = tot_len - off;
3872 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3873 /* There is not enough room for one more. */
3877 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3878 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3879 /* We dont't have a complete CMSG header. */
3883 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3884 /* We don't have the complete CMSG. */
3888 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3889 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3890 if (cmh.cmsg_level == IPPROTO_SCTP) {
3891 switch (cmh.cmsg_type) {
3893 case SCTP_DSTADDRV4:
3894 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3898 memset(&sin, 0, sizeof(struct sockaddr_in));
3899 sin.sin_family = AF_INET;
3901 sin.sin_len = sizeof(struct sockaddr_in);
3903 sin.sin_port = port;
3904 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3905 addr = (struct sockaddr *)&sin;
3909 case SCTP_DSTADDRV6:
3910 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3914 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3915 sin6.sin6_family = AF_INET6;
3916 #ifdef HAVE_SIN6_LEN
3917 sin6.sin6_len = sizeof(struct sockaddr_in6);
3919 sin6.sin6_port = port;
3920 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3922 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3923 in6_sin6_2_sin(&sin, &sin6);
3924 addr = (struct sockaddr *)&sin;
3927 addr = (struct sockaddr *)&sin6;
3935 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3946 static struct mbuf *
3947 sctp_add_cookie(struct mbuf *init, int init_offset,
3948 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3950 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3951 struct sctp_state_cookie *stc;
3952 struct sctp_paramhdr *ph;
3955 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3956 sizeof(struct sctp_paramhdr)), 0,
3957 M_NOWAIT, 1, MT_DATA);
3961 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3962 if (copy_init == NULL) {
3966 #ifdef SCTP_MBUF_LOGGING
3967 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3968 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3971 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3973 if (copy_initack == NULL) {
3975 sctp_m_freem(copy_init);
3978 #ifdef SCTP_MBUF_LOGGING
3979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3980 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3983 /* easy side we just drop it on the end */
3984 ph = mtod(mret, struct sctp_paramhdr *);
3985 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3986 sizeof(struct sctp_paramhdr);
3987 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3988 sizeof(struct sctp_paramhdr));
3989 ph->param_type = htons(SCTP_STATE_COOKIE);
3990 ph->param_length = 0; /* fill in at the end */
3991 /* Fill in the stc cookie data */
3992 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3994 /* tack the INIT and then the INIT-ACK onto the chain */
3996 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3997 cookie_sz += SCTP_BUF_LEN(m_at);
3998 if (SCTP_BUF_NEXT(m_at) == NULL) {
3999 SCTP_BUF_NEXT(m_at) = copy_init;
4003 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4004 cookie_sz += SCTP_BUF_LEN(m_at);
4005 if (SCTP_BUF_NEXT(m_at) == NULL) {
4006 SCTP_BUF_NEXT(m_at) = copy_initack;
4010 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4011 cookie_sz += SCTP_BUF_LEN(m_at);
4012 if (SCTP_BUF_NEXT(m_at) == NULL) {
4016 sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA);
4018 /* no space, so free the entire chain */
4022 SCTP_BUF_NEXT(m_at) = sig;
4023 SCTP_BUF_LEN(sig) = SCTP_SIGNATURE_SIZE;
4024 cookie_sz += SCTP_SIGNATURE_SIZE;
4025 ph->param_length = htons(cookie_sz);
4026 *signature = (uint8_t *)mtod(sig, caddr_t);
4027 memset(*signature, 0, SCTP_SIGNATURE_SIZE);
4032 sctp_get_ect(struct sctp_tcb *stcb)
4034 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
4035 return (SCTP_ECT0_BIT);
4041 #if defined(INET) || defined(INET6)
4043 sctp_handle_no_route(struct sctp_tcb *stcb,
4044 struct sctp_nets *net,
4047 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
4050 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
4051 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
4052 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
4053 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
4054 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
4055 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
4059 net->dest_state &= ~SCTP_ADDR_REACHABLE;
4060 net->dest_state &= ~SCTP_ADDR_PF;
4064 if (net == stcb->asoc.primary_destination) {
4065 /* need a new primary */
4066 struct sctp_nets *alt;
4068 alt = sctp_find_alternate_net(stcb, net, 0);
4070 if (stcb->asoc.alternate) {
4071 sctp_free_remote_addr(stcb->asoc.alternate);
4073 stcb->asoc.alternate = alt;
4074 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
4075 if (net->ro._s_addr) {
4076 sctp_free_ifa(net->ro._s_addr);
4077 net->ro._s_addr = NULL;
4079 net->src_addr_selected = 0;
4088 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
4089 struct sctp_tcb *stcb, /* may be NULL */
4090 struct sctp_nets *net,
4091 struct sockaddr *to,
4093 uint32_t auth_offset,
4094 struct sctp_auth_chunk *auth,
4095 uint16_t auth_keyid,
4096 int nofragment_flag,
4103 union sctp_sockstore *over_addr,
4104 #if defined(__FreeBSD__) && !defined(__Userspace__)
4105 uint8_t mflowtype, uint32_t mflowid,
4108 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4111 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4112 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4113 * - fill in the HMAC digest of any AUTH chunk in the packet.
4114 * - calculate and fill in the SCTP checksum.
4115 * - prepend an IP address header.
4116 * - if boundall use INADDR_ANY.
4117 * - if boundspecific do source address selection.
4118 * - set fragmentation option for ipV4.
4119 * - On return from IP output, check/adjust mtu size of output
4120 * interface and smallest_mtu size as well.
4122 /* Will need ifdefs around this */
4124 struct sctphdr *sctphdr;
4127 #if defined(INET) || defined(INET6)
4130 #if defined(INET) || defined(INET6)
4132 sctp_route_t *ro = NULL;
4133 struct udphdr *udp = NULL;
4136 #if defined(__APPLE__) && !defined(__Userspace__)
4137 struct socket *so = NULL;
4140 #if defined(__APPLE__) && !defined(__Userspace__)
4142 sctp_lock_assert(SCTP_INP_SO(inp));
4143 SCTP_TCB_LOCK_ASSERT(stcb);
4145 sctp_unlock_assert(SCTP_INP_SO(inp));
4148 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4149 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4153 #if defined(INET) || defined(INET6)
4155 vrf_id = stcb->asoc.vrf_id;
4157 vrf_id = inp->def_vrf_id;
4160 /* fill in the HMAC digest for any AUTH chunk in the packet */
4161 if ((auth != NULL) && (stcb != NULL)) {
4162 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4166 tos_value = net->dscp;
4168 tos_value = stcb->asoc.default_dscp;
4170 tos_value = inp->sctp_ep.default_dscp;
4173 switch (to->sa_family) {
4177 struct ip *ip = NULL;
4178 sctp_route_t iproute;
4181 len = SCTP_MIN_V4_OVERHEAD;
4183 len += sizeof(struct udphdr);
4185 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4188 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4191 SCTP_ALIGN_TO_END(newm, len);
4192 SCTP_BUF_LEN(newm) = len;
4193 SCTP_BUF_NEXT(newm) = m;
4195 #if defined(__FreeBSD__) && !defined(__Userspace__)
4197 m->m_pkthdr.flowid = net->flowid;
4198 M_HASHTYPE_SET(m, net->flowtype);
4200 m->m_pkthdr.flowid = mflowid;
4201 M_HASHTYPE_SET(m, mflowtype);
4204 packet_length = sctp_calculate_len(m);
4205 ip = mtod(m, struct ip *);
4206 ip->ip_v = IPVERSION;
4207 ip->ip_hl = (sizeof(struct ip) >> 2);
4208 if (tos_value == 0) {
4210 * This means especially, that it is not set at the
4211 * SCTP layer. So use the value from the IP layer.
4213 tos_value = inp->ip_inp.inp.inp_ip_tos;
4217 tos_value |= sctp_get_ect(stcb);
4219 if ((nofragment_flag) && (port == 0)) {
4220 #if defined(__FreeBSD__) && !defined(__Userspace__)
4221 ip->ip_off = htons(IP_DF);
4222 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
4225 ip->ip_off = htons(IP_DF);
4228 #if defined(__FreeBSD__) && !defined(__Userspace__)
4229 ip->ip_off = htons(0);
4234 #if defined(__Userspace__)
4235 ip->ip_id = htons(SCTP_IP_ID(inp)++);
4236 #elif defined(__FreeBSD__)
4237 /* FreeBSD has a function for ip_id's */
4239 #elif defined(__APPLE__)
4241 ip->ip_id = ip_randomid();
4243 ip->ip_id = htons(ip_id++);
4246 ip->ip_id = SCTP_IP_ID(inp)++;
4249 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4250 #if defined(__FreeBSD__) && !defined(__Userspace__)
4251 ip->ip_len = htons(packet_length);
4253 ip->ip_len = packet_length;
4255 ip->ip_tos = tos_value;
4257 ip->ip_p = IPPROTO_UDP;
4259 ip->ip_p = IPPROTO_SCTP;
4264 memset(&iproute, 0, sizeof(iproute));
4266 memcpy(&ro->ro_dst, to, to->sa_len);
4268 memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4271 ro = (sctp_route_t *)&net->ro;
4273 /* Now the address selection part */
4274 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4276 /* call the routine to select the src address */
4277 if (net && out_of_asoc_ok == 0) {
4278 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4279 sctp_free_ifa(net->ro._s_addr);
4280 net->ro._s_addr = NULL;
4281 net->src_addr_selected = 0;
4282 #if defined(__FreeBSD__) && !defined(__Userspace__)
4291 if (net->src_addr_selected == 0) {
4292 /* Cache the source address */
4293 net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4296 net->src_addr_selected = 1;
4298 if (net->ro._s_addr == NULL) {
4299 /* No route to host */
4300 net->src_addr_selected = 0;
4301 sctp_handle_no_route(stcb, net, so_locked);
4302 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4304 return (EHOSTUNREACH);
4306 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4308 if (over_addr == NULL) {
4309 struct sctp_ifa *_lsrc;
4311 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4315 if (_lsrc == NULL) {
4316 sctp_handle_no_route(stcb, net, so_locked);
4317 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4319 return (EHOSTUNREACH);
4321 ip->ip_src = _lsrc->address.sin.sin_addr;
4322 sctp_free_ifa(_lsrc);
4324 ip->ip_src = over_addr->sin.sin_addr;
4325 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4329 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4330 sctp_handle_no_route(stcb, net, so_locked);
4331 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4333 return (EHOSTUNREACH);
4335 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4336 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4337 udp->uh_dport = port;
4338 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4339 #if !defined(__Userspace__)
4340 #if defined(__FreeBSD__)
4342 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4347 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4352 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4354 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4357 sctphdr->src_port = src_port;
4358 sctphdr->dest_port = dest_port;
4359 sctphdr->v_tag = v_tag;
4360 sctphdr->checksum = 0;
4363 * If source address selection fails and we find no route
4364 * then the ip_output should fail as well with a
4365 * NO_ROUTE_TO_HOST type error. We probably should catch
4366 * that somewhere and abort the association right away
4367 * (assuming this is an INIT being sent).
4369 #if defined(__FreeBSD__) && !defined(__Userspace__)
4370 if (ro->ro_nh == NULL) {
4372 if (ro->ro_rt == NULL) {
4375 * src addr selection failed to find a route (or
4376 * valid source addr), so we can't get there from
4379 sctp_handle_no_route(stcb, net, so_locked);
4380 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4382 return (EHOSTUNREACH);
4384 if (ro != &iproute) {
4385 memcpy(&iproute, ro, sizeof(*ro));
4387 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4388 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4389 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4390 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4391 #if defined(__FreeBSD__) && !defined(__Userspace__)
4392 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4395 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4399 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4400 /* failed to prepend data, give up */
4401 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4405 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4407 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4408 SCTP_STAT_INCR(sctps_sendswcrc);
4409 #if !defined(__Userspace__)
4410 #if defined(__FreeBSD__)
4412 SCTP_ENABLE_UDP_CSUM(o_pak);
4415 SCTP_ENABLE_UDP_CSUM(o_pak);
4419 #if defined(__FreeBSD__) && !defined(__Userspace__)
4420 m->m_pkthdr.csum_flags = CSUM_SCTP;
4421 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4422 SCTP_STAT_INCR(sctps_sendhwcrc);
4424 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4425 (stcb) && (stcb->asoc.scope.loopback_scope))) {
4426 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4427 SCTP_STAT_INCR(sctps_sendswcrc);
4429 SCTP_STAT_INCR(sctps_sendhwcrc);
4433 #ifdef SCTP_PACKET_LOGGING
4434 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4435 sctp_packet_log(o_pak);
4437 /* send it out. table id is taken from stcb */
4438 #if defined(__APPLE__) && !defined(__Userspace__)
4439 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4440 so = SCTP_INP_SO(inp);
4441 SCTP_SOCKET_UNLOCK(so, 0);
4444 #if defined(__FreeBSD__) && !defined(__Userspace__)
4445 SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
4447 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4448 #if defined(__APPLE__) && !defined(__Userspace__)
4449 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4450 atomic_add_int(&stcb->asoc.refcnt, 1);
4451 SCTP_TCB_UNLOCK(stcb);
4452 SCTP_SOCKET_LOCK(so, 0);
4453 SCTP_TCB_LOCK(stcb);
4454 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4457 #if defined(__FreeBSD__) && !defined(__Userspace__)
4459 UDPSTAT_INC(udps_opackets);
4462 SCTP_STAT_INCR(sctps_sendpackets);
4463 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4465 SCTP_STAT_INCR(sctps_senderrors);
4467 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4469 /* free tempy routes */
4470 #if defined(__FreeBSD__) && !defined(__Userspace__)
4479 #if defined(__FreeBSD__) && !defined(__Userspace__)
4480 if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4482 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4484 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4487 #if defined(__FreeBSD__) && !defined(__Userspace__)
4488 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4490 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4494 mtu -= sizeof(struct udphdr);
4496 if (mtu < net->mtu) {
4497 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4498 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4503 #if defined(__FreeBSD__) && !defined(__Userspace__)
4504 } else if (ro->ro_nh == NULL) {
4506 } else if (ro->ro_rt == NULL) {
4508 /* route was freed */
4509 if (net->ro._s_addr &&
4510 net->src_addr_selected) {
4511 sctp_free_ifa(net->ro._s_addr);
4512 net->ro._s_addr = NULL;
4514 net->src_addr_selected = 0;
4523 uint32_t flowlabel, flowinfo;
4524 struct ip6_hdr *ip6h;
4525 struct route_in6 ip6route;
4526 #if !defined(__Userspace__)
4529 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4531 #ifdef SCTP_EMBEDDED_V6_SCOPE
4532 struct sockaddr_in6 lsa6_storage;
4535 u_short prev_port = 0;
4539 flowlabel = net->flowlabel;
4541 flowlabel = stcb->asoc.default_flowlabel;
4543 flowlabel = inp->sctp_ep.default_flowlabel;
4545 if (flowlabel == 0) {
4547 * This means especially, that it is not set at the
4548 * SCTP layer. So use the value from the IP layer.
4550 #if defined(__APPLE__) && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4551 flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
4553 flowlabel = ntohl(((struct inpcb *)inp)->inp_flow);
4556 flowlabel &= 0x000fffff;
4557 len = SCTP_MIN_OVERHEAD;
4559 len += sizeof(struct udphdr);
4561 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4564 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4567 SCTP_ALIGN_TO_END(newm, len);
4568 SCTP_BUF_LEN(newm) = len;
4569 SCTP_BUF_NEXT(newm) = m;
4571 #if defined(__FreeBSD__) && !defined(__Userspace__)
4573 m->m_pkthdr.flowid = net->flowid;
4574 M_HASHTYPE_SET(m, net->flowtype);
4576 m->m_pkthdr.flowid = mflowid;
4577 M_HASHTYPE_SET(m, mflowtype);
4580 packet_length = sctp_calculate_len(m);
4582 ip6h = mtod(m, struct ip6_hdr *);
4583 /* protect *sin6 from overwrite */
4584 sin6 = (struct sockaddr_in6 *)to;
4588 #ifdef SCTP_EMBEDDED_V6_SCOPE
4589 /* KAME hack: embed scopeid */
4590 #if defined(__APPLE__) && !defined(__Userspace__)
4591 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4592 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4594 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4596 #elif defined(SCTP_KAME)
4597 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4599 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4602 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4606 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4608 memset(&ip6route, 0, sizeof(ip6route));
4609 ro = (sctp_route_t *)&ip6route;
4610 #ifdef HAVE_SIN6_LEN
4611 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4613 memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4616 ro = (sctp_route_t *)&net->ro;
4619 * We assume here that inp_flow is in host byte order within
4622 if (tos_value == 0) {
4624 * This means especially, that it is not set at the
4625 * SCTP layer. So use the value from the IP layer.
4627 #if defined(__APPLE__) && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4628 tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
4630 tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff;
4635 tos_value |= sctp_get_ect(stcb);
4639 flowinfo |= tos_value;
4641 flowinfo |= flowlabel;
4642 ip6h->ip6_flow = htonl(flowinfo);
4644 ip6h->ip6_nxt = IPPROTO_UDP;
4646 ip6h->ip6_nxt = IPPROTO_SCTP;
4648 ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4649 ip6h->ip6_dst = sin6->sin6_addr;
4652 * Add SRC address selection here: we can only reuse to a
4653 * limited degree the kame src-addr-sel, since we can try
4654 * their selection but it may not be bound.
4656 memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4657 lsa6_tmp.sin6_family = AF_INET6;
4658 #ifdef HAVE_SIN6_LEN
4659 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4662 if (net && out_of_asoc_ok == 0) {
4663 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4664 sctp_free_ifa(net->ro._s_addr);
4665 net->ro._s_addr = NULL;
4666 net->src_addr_selected = 0;
4667 #if defined(__FreeBSD__) && !defined(__Userspace__)
4676 if (net->src_addr_selected == 0) {
4677 #ifdef SCTP_EMBEDDED_V6_SCOPE
4678 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4679 /* KAME hack: embed scopeid */
4680 #if defined(__APPLE__) && !defined(__Userspace__)
4681 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4682 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4684 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4686 #elif defined(SCTP_KAME)
4687 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4689 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4692 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4696 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4697 /* Cache the source address */
4698 net->ro._s_addr = sctp_source_address_selection(inp,
4704 #ifdef SCTP_EMBEDDED_V6_SCOPE
4706 (void)sa6_recoverscope(sin6);
4708 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4709 #endif /* SCTP_KAME */
4710 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4711 net->src_addr_selected = 1;
4713 if (net->ro._s_addr == NULL) {
4714 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4715 net->src_addr_selected = 0;
4716 sctp_handle_no_route(stcb, net, so_locked);
4717 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4719 return (EHOSTUNREACH);
4721 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4723 #ifdef SCTP_EMBEDDED_V6_SCOPE
4724 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4725 /* KAME hack: embed scopeid */
4726 #if defined(__APPLE__) && !defined(__Userspace__)
4727 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4728 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4730 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4732 #elif defined(SCTP_KAME)
4733 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4735 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4738 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4742 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4743 if (over_addr == NULL) {
4744 struct sctp_ifa *_lsrc;
4746 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4750 if (_lsrc == NULL) {
4751 sctp_handle_no_route(stcb, net, so_locked);
4752 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4754 return (EHOSTUNREACH);
4756 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4757 sctp_free_ifa(_lsrc);
4759 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4760 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4762 #ifdef SCTP_EMBEDDED_V6_SCOPE
4764 (void)sa6_recoverscope(sin6);
4766 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4767 #endif /* SCTP_KAME */
4768 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4770 lsa6->sin6_port = inp->sctp_lport;
4772 #if defined(__FreeBSD__) && !defined(__Userspace__)
4773 if (ro->ro_nh == NULL) {
4775 if (ro->ro_rt == NULL) {
4778 * src addr selection failed to find a route (or
4779 * valid source addr), so we can't get there from
4782 sctp_handle_no_route(stcb, net, so_locked);
4783 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4785 return (EHOSTUNREACH);
4787 #ifndef SCOPEDROUTING
4788 #ifdef SCTP_EMBEDDED_V6_SCOPE
4790 * XXX: sa6 may not have a valid sin6_scope_id in the
4791 * non-SCOPEDROUTING case.
4793 memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4794 lsa6_storage.sin6_family = AF_INET6;
4795 #ifdef HAVE_SIN6_LEN
4796 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4799 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4800 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4802 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4804 #endif /* SCTP_KAME */
4805 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4810 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4811 lsa6_storage.sin6_port = inp->sctp_lport;
4812 lsa6 = &lsa6_storage;
4813 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4814 #endif /* SCOPEDROUTING */
4815 ip6h->ip6_src = lsa6->sin6_addr;
4818 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4819 sctp_handle_no_route(stcb, net, so_locked);
4820 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4822 return (EHOSTUNREACH);
4824 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4825 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4826 udp->uh_dport = port;
4827 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4829 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4831 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4834 sctphdr->src_port = src_port;
4835 sctphdr->dest_port = dest_port;
4836 sctphdr->v_tag = v_tag;
4837 sctphdr->checksum = 0;
4840 * We set the hop limit now since there is a good chance
4841 * that our ro pointer is now filled
4843 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4844 #if !defined(__Userspace__)
4845 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4849 /* Copy to be sure something bad is not happening */
4850 sin6->sin6_addr = ip6h->ip6_dst;
4851 lsa6->sin6_addr = ip6h->ip6_src;
4854 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4855 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4856 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4857 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4858 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4860 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4861 /* preserve the port and scope for link local send */
4862 prev_scope = sin6->sin6_scope_id;
4863 prev_port = sin6->sin6_port;
4866 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4867 /* failed to prepend data, give up */
4869 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4872 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4874 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4875 SCTP_STAT_INCR(sctps_sendswcrc);
4876 #if !defined(__Userspace__)
4880 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4881 udp->uh_sum = 0xffff;
4886 #if defined(__FreeBSD__) && !defined(__Userspace__)
4887 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4888 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4889 SCTP_STAT_INCR(sctps_sendhwcrc);
4891 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4892 (stcb) && (stcb->asoc.scope.loopback_scope))) {
4893 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4894 SCTP_STAT_INCR(sctps_sendswcrc);
4896 SCTP_STAT_INCR(sctps_sendhwcrc);
4900 /* send it out. table id is taken from stcb */
4901 #if defined(__APPLE__) && !defined(__Userspace__)
4902 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4903 so = SCTP_INP_SO(inp);
4904 SCTP_SOCKET_UNLOCK(so, 0);
4907 #ifdef SCTP_PACKET_LOGGING
4908 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4909 sctp_packet_log(o_pak);
4911 #if !defined(__Userspace__)
4912 #if defined(__FreeBSD__)
4913 SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
4915 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4917 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4919 #if defined(__APPLE__) && !defined(__Userspace__)
4920 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4921 atomic_add_int(&stcb->asoc.refcnt, 1);
4922 SCTP_TCB_UNLOCK(stcb);
4923 SCTP_SOCKET_LOCK(so, 0);
4924 SCTP_TCB_LOCK(stcb);
4925 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4929 /* for link local this must be done */
4930 sin6->sin6_scope_id = prev_scope;
4931 sin6->sin6_port = prev_port;
4933 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4934 #if defined(__FreeBSD__) && !defined(__Userspace__)
4936 UDPSTAT_INC(udps_opackets);
4939 SCTP_STAT_INCR(sctps_sendpackets);
4940 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4942 SCTP_STAT_INCR(sctps_senderrors);
4945 /* Now if we had a temp route free it */
4946 #if defined(__FreeBSD__) && !defined(__Userspace__)
4955 /* PMTU check versus smallest asoc MTU goes here */
4956 #if defined(__FreeBSD__) && !defined(__Userspace__)
4957 if (ro->ro_nh == NULL) {
4959 if (ro->ro_rt == NULL) {
4961 /* Route was freed */
4962 if (net->ro._s_addr &&
4963 net->src_addr_selected) {
4964 sctp_free_ifa(net->ro._s_addr);
4965 net->ro._s_addr = NULL;
4967 net->src_addr_selected = 0;
4969 #if defined(__FreeBSD__) && !defined(__Userspace__)
4970 if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4972 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4974 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4977 #if defined(__FreeBSD__) && !defined(__Userspace__)
4978 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4980 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4984 mtu -= sizeof(struct udphdr);
4986 if (mtu < net->mtu) {
4987 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4988 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4994 #if !defined(__Userspace__)
4997 #define ND_IFINFO(ifp) (ifp)
4998 #define linkmtu if_mtu
5000 if (ND_IFINFO(ifp)->linkmtu &&
5001 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
5002 sctp_mtu_size_reset(inp,
5004 ND_IFINFO(ifp)->linkmtu);
5012 #if defined(__Userspace__)
5016 struct sockaddr_conn *sconn;
5019 sconn = (struct sockaddr_conn *)to;
5020 len = sizeof(struct sctphdr);
5021 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
5024 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5027 SCTP_ALIGN_TO_END(newm, len);
5028 SCTP_BUF_LEN(newm) = len;
5029 SCTP_BUF_NEXT(newm) = m;
5031 packet_length = sctp_calculate_len(m);
5032 sctphdr = mtod(m, struct sctphdr *);
5033 sctphdr->src_port = src_port;
5034 sctphdr->dest_port = dest_port;
5035 sctphdr->v_tag = v_tag;
5036 sctphdr->checksum = 0;
5037 if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
5038 sctphdr->checksum = sctp_calculate_cksum(m, 0);
5039 SCTP_STAT_INCR(sctps_sendswcrc);
5041 SCTP_STAT_INCR(sctps_sendhwcrc);
5043 if (tos_value == 0) {
5044 tos_value = inp->ip_inp.inp.inp_ip_tos;
5048 tos_value |= sctp_get_ect(stcb);
5050 /* Don't alloc/free for each packet */
5051 if ((buffer = malloc(packet_length)) != NULL) {
5052 m_copydata(m, 0, packet_length, buffer);
5053 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
5063 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
5064 ((struct sockaddr *)to)->sa_family);
5066 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
5073 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked)
5075 struct mbuf *m, *m_last;
5076 struct sctp_nets *net;
5077 struct sctp_init_chunk *init;
5078 struct sctp_supported_addr_param *sup_addr;
5079 struct sctp_adaptation_layer_indication *ali;
5080 struct sctp_supported_chunk_types_param *pr_supported;
5081 struct sctp_paramhdr *ph;
5082 int cnt_inits_to = 0;
5084 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5086 #if defined(__APPLE__) && !defined(__Userspace__)
5088 sctp_lock_assert(SCTP_INP_SO(inp));
5090 sctp_unlock_assert(SCTP_INP_SO(inp));
5093 /* INIT's always go to the primary (and usually ONLY address) */
5094 net = stcb->asoc.primary_destination;
5096 net = TAILQ_FIRST(&stcb->asoc.nets);
5101 /* we confirm any address we send an INIT to */
5102 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5103 (void)sctp_set_primary_addr(stcb, NULL, net);
5105 /* we confirm any address we send an INIT to */
5106 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5108 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
5110 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
5112 * special hook, if we are sending to link local it will not
5113 * show up in our private address count.
5115 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
5119 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5120 /* This case should not happen */
5121 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
5124 /* start the INIT timer */
5125 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
5127 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
5129 /* No memory, INIT timer will re-attempt. */
5130 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
5133 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
5135 /* Now lets put the chunk header in place */
5136 init = mtod(m, struct sctp_init_chunk *);
5137 /* now the chunk header */
5138 init->ch.chunk_type = SCTP_INITIATION;
5139 init->ch.chunk_flags = 0;
5140 /* fill in later from mbuf we build */
5141 init->ch.chunk_length = 0;
5142 /* place in my tag */
5143 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
5144 /* set up some of the credits. */
5145 init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
5146 SCTP_MINIMAL_RWND));
5147 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
5148 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
5149 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
5151 /* Adaptation layer indication parameter */
5152 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5153 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5154 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
5155 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5156 ali->ph.param_length = htons(parameter_len);
5157 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5158 chunk_len += parameter_len;
5162 if (stcb->asoc.ecn_supported == 1) {
5163 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5164 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5165 ph->param_type = htons(SCTP_ECN_CAPABLE);
5166 ph->param_length = htons(parameter_len);
5167 chunk_len += parameter_len;
5170 /* PR-SCTP supported parameter */
5171 if (stcb->asoc.prsctp_supported == 1) {
5172 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5173 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5174 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5175 ph->param_length = htons(parameter_len);
5176 chunk_len += parameter_len;
5179 /* Add NAT friendly parameter. */
5180 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
5181 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5182 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5183 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5184 ph->param_length = htons(parameter_len);
5185 chunk_len += parameter_len;
5188 /* And now tell the peer which extensions we support */
5190 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5191 if (stcb->asoc.prsctp_supported == 1) {
5192 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5193 if (stcb->asoc.idata_supported) {
5194 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5197 if (stcb->asoc.auth_supported == 1) {
5198 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5200 if (stcb->asoc.asconf_supported == 1) {
5201 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5202 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5204 if (stcb->asoc.reconfig_supported == 1) {
5205 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5207 if (stcb->asoc.idata_supported) {
5208 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5210 if (stcb->asoc.nrsack_supported == 1) {
5211 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5213 if (stcb->asoc.pktdrop_supported == 1) {
5214 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5217 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5218 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5219 pr_supported->ph.param_length = htons(parameter_len);
5220 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5221 chunk_len += parameter_len;
5223 /* add authentication parameters */
5224 if (stcb->asoc.auth_supported) {
5225 /* attach RANDOM parameter, if available */
5226 if (stcb->asoc.authinfo.random != NULL) {
5227 struct sctp_auth_random *randp;
5229 if (padding_len > 0) {
5230 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5231 chunk_len += padding_len;
5234 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5235 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5236 /* random key already contains the header */
5237 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5238 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5239 chunk_len += parameter_len;
5241 /* add HMAC_ALGO parameter */
5242 if (stcb->asoc.local_hmacs != NULL) {
5243 struct sctp_auth_hmac_algo *hmacs;
5245 if (padding_len > 0) {
5246 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5247 chunk_len += padding_len;
5250 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5251 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5252 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5253 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5254 hmacs->ph.param_length = htons(parameter_len);
5255 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5256 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5257 chunk_len += parameter_len;
5259 /* add CHUNKS parameter */
5260 if (stcb->asoc.local_auth_chunks != NULL) {
5261 struct sctp_auth_chunk_list *chunks;
5263 if (padding_len > 0) {
5264 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5265 chunk_len += padding_len;
5268 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5269 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5270 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5271 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5272 chunks->ph.param_length = htons(parameter_len);
5273 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5274 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5275 chunk_len += parameter_len;
5279 /* now any cookie time extensions */
5280 if (stcb->asoc.cookie_preserve_req) {
5281 struct sctp_cookie_perserve_param *cookie_preserve;
5283 if (padding_len > 0) {
5284 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5285 chunk_len += padding_len;
5288 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5289 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5290 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5291 cookie_preserve->ph.param_length = htons(parameter_len);
5292 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5293 stcb->asoc.cookie_preserve_req = 0;
5294 chunk_len += parameter_len;
5297 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
5300 if (padding_len > 0) {
5301 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5302 chunk_len += padding_len;
5305 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5306 if (stcb->asoc.scope.ipv4_addr_legal) {
5307 parameter_len += (uint16_t)sizeof(uint16_t);
5309 if (stcb->asoc.scope.ipv6_addr_legal) {
5310 parameter_len += (uint16_t)sizeof(uint16_t);
5312 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
5313 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
5314 sup_addr->ph.param_length = htons(parameter_len);
5316 if (stcb->asoc.scope.ipv4_addr_legal) {
5317 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
5319 if (stcb->asoc.scope.ipv6_addr_legal) {
5320 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
5322 padding_len = 4 - 2 * i;
5323 chunk_len += parameter_len;
5326 SCTP_BUF_LEN(m) = chunk_len;
5327 /* now the addresses */
5328 /* To optimize this we could put the scoping stuff
5329 * into a structure and remove the individual uint8's from
5330 * the assoc structure. Then we could just sifa in the
5331 * address within the stcb. But for now this is a quick
5332 * hack to get the address stuff teased apart.
5334 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
5336 &padding_len, &chunk_len);
5338 init->ch.chunk_length = htons(chunk_len);
5339 if (padding_len > 0) {
5340 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
5345 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5346 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5347 (struct sockaddr *)&net->ro._l_addr,
5348 m, 0, NULL, 0, 0, 0, 0,
5349 inp->sctp_lport, stcb->rport, htonl(0),
5351 #if defined(__FreeBSD__) && !defined(__Userspace__)
5355 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
5356 if (error == ENOBUFS) {
5357 stcb->asoc.ifp_had_enobuf = 1;
5358 SCTP_STAT_INCR(sctps_lowlevelerr);
5361 stcb->asoc.ifp_had_enobuf = 0;
5363 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5364 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5368 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5369 int param_offset, int *abort_processing,
5370 struct sctp_chunkhdr *cp,
5375 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5376 * being equal to the beginning of the params i.e. (iphlen +
5377 * sizeof(struct sctp_init_msg) parse through the parameters to the
5378 * end of the mbuf verifying that all parameters are known.
5380 * For unknown parameters build and return a mbuf with
5381 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5382 * processing this chunk stop, and set *abort_processing to 1.
5384 * By having param_offset be pre-set to where parameters begin it is
5385 * hoped that this routine may be reused in the future by new
5388 struct sctp_paramhdr *phdr, params;
5390 struct mbuf *mat, *m_tmp, *op_err, *op_err_last;
5391 int at, limit, pad_needed;
5392 uint16_t ptype, plen, padded_size;
5394 *abort_processing = 0;
5395 if (cookie_found != NULL) {
5399 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5404 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5405 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5406 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5407 ptype = ntohs(phdr->param_type);
5408 plen = ntohs(phdr->param_length);
5409 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5410 /* wacked parameter */
5411 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5414 limit -= SCTP_SIZE32(plen);
5416 * All parameters for all chunks that we know/understand are
5417 * listed here. We process them other places and make
5418 * appropriate stop actions per the upper bits. However this
5419 * is the generic routine processor's can call to get back
5420 * an operr.. to either incorporate (init-ack) or send.
5422 padded_size = SCTP_SIZE32(plen);
5424 /* Param's with variable size */
5425 case SCTP_HEARTBEAT_INFO:
5426 case SCTP_UNRECOG_PARAM:
5427 case SCTP_ERROR_CAUSE_IND:
5431 case SCTP_STATE_COOKIE:
5432 if (cookie_found != NULL) {
5437 /* Param's with variable size within a range */
5438 case SCTP_CHUNK_LIST:
5439 case SCTP_SUPPORTED_CHUNK_EXT:
5440 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5441 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5446 case SCTP_SUPPORTED_ADDRTYPE:
5447 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5448 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5454 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5455 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5460 case SCTP_SET_PRIM_ADDR:
5461 case SCTP_DEL_IP_ADDRESS:
5462 case SCTP_ADD_IP_ADDRESS:
5463 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5464 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5465 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5470 /* Param's with a fixed size */
5471 case SCTP_IPV4_ADDRESS:
5472 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5473 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5478 case SCTP_IPV6_ADDRESS:
5479 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5480 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5485 case SCTP_COOKIE_PRESERVE:
5486 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5487 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5492 case SCTP_HAS_NAT_SUPPORT:
5495 case SCTP_PRSCTP_SUPPORTED:
5496 if (padded_size != sizeof(struct sctp_paramhdr)) {
5497 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5502 case SCTP_ECN_CAPABLE:
5503 if (padded_size != sizeof(struct sctp_paramhdr)) {
5504 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5509 case SCTP_ULP_ADAPTATION:
5510 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5511 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5516 case SCTP_SUCCESS_REPORT:
5517 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5518 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5523 case SCTP_HOSTNAME_ADDRESS:
5525 /* Hostname parameters are deprecated. */
5526 struct sctp_gen_error_cause *cause;
5529 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5530 *abort_processing = 1;
5531 sctp_m_freem(op_err);
5535 l_len = SCTP_MIN_OVERHEAD;
5537 l_len = SCTP_MIN_V4_OVERHEAD;
5539 l_len += sizeof(struct sctp_chunkhdr);
5540 l_len += sizeof(struct sctp_gen_error_cause);
5541 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5542 if (op_err != NULL) {
5544 * Pre-reserve space for IP, SCTP, and
5548 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5550 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5552 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5553 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5554 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5555 cause = mtod(op_err, struct sctp_gen_error_cause *);
5556 cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5557 cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5558 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5559 if (SCTP_BUF_NEXT(op_err) == NULL) {
5560 sctp_m_freem(op_err);
5569 * we do not recognize the parameter figure out what
5572 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5573 if ((ptype & 0x4000) == 0x4000) {
5574 /* Report bit is set?? */
5575 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5576 if (op_err == NULL) {
5578 /* Ok need to try to get an mbuf */
5580 l_len = SCTP_MIN_OVERHEAD;
5582 l_len = SCTP_MIN_V4_OVERHEAD;
5584 l_len += sizeof(struct sctp_chunkhdr);
5585 l_len += sizeof(struct sctp_paramhdr);
5586 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5588 SCTP_BUF_LEN(op_err) = 0;
5590 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5592 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5594 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5595 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5596 op_err_last = op_err;
5599 if (op_err != NULL) {
5600 /* If we have space */
5601 struct sctp_paramhdr *param;
5603 if (pad_needed > 0) {
5604 op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed);
5606 if (op_err_last == NULL) {
5607 sctp_m_freem(op_err);
5610 goto more_processing;
5612 if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) {
5613 m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
5614 if (m_tmp == NULL) {
5615 sctp_m_freem(op_err);
5618 goto more_processing;
5620 SCTP_BUF_LEN(m_tmp) = 0;
5621 SCTP_BUF_NEXT(m_tmp) = NULL;
5622 SCTP_BUF_NEXT(op_err_last) = m_tmp;
5623 op_err_last = m_tmp;
5625 param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t) + SCTP_BUF_LEN(op_err_last));
5626 param->param_type = htons(SCTP_UNRECOG_PARAM);
5627 param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5628 SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr);
5629 SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5630 if (SCTP_BUF_NEXT(op_err_last) == NULL) {
5631 sctp_m_freem(op_err);
5634 goto more_processing;
5636 while (SCTP_BUF_NEXT(op_err_last) != NULL) {
5637 op_err_last = SCTP_BUF_NEXT(op_err_last);
5640 if (plen % 4 != 0) {
5641 pad_needed = 4 - (plen % 4);
5648 if ((ptype & 0x8000) == 0x0000) {
5649 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5652 /* skip this chunk and continue processing */
5653 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5654 at += SCTP_SIZE32(plen);
5659 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5663 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5664 *abort_processing = 1;
5665 sctp_m_freem(op_err);
5669 struct sctp_paramhdr *param;
5672 l_len = SCTP_MIN_OVERHEAD;
5674 l_len = SCTP_MIN_V4_OVERHEAD;
5676 l_len += sizeof(struct sctp_chunkhdr);
5677 l_len += (2 * sizeof(struct sctp_paramhdr));
5678 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5680 SCTP_BUF_LEN(op_err) = 0;
5682 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5684 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5686 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5687 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5688 SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr);
5689 param = mtod(op_err, struct sctp_paramhdr *);
5690 param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5691 param->param_length = htons(2 * sizeof(struct sctp_paramhdr));
5693 param->param_type = htons(ptype);
5694 param->param_length = htons(plen);
5701 sctp_are_there_new_addresses(struct sctp_association *asoc,
5702 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5705 * Given a INIT packet, look through the packet to verify that there
5706 * are NO new addresses. As we go through the parameters add reports
5707 * of any un-understood parameters that require an error. Also we
5708 * must return (1) to drop the packet if we see a un-understood
5709 * parameter that tells us to drop the chunk.
5711 struct sockaddr *sa_touse;
5712 struct sockaddr *sa;
5713 struct sctp_paramhdr *phdr, params;
5714 uint16_t ptype, plen;
5716 struct sctp_nets *net;
5719 struct sockaddr_in sin4, *sa4;
5722 struct sockaddr_in6 sin6, *sa6;
5724 #if defined(__Userspace__)
5725 struct sockaddr_conn *sac;
5729 memset(&sin4, 0, sizeof(sin4));
5730 sin4.sin_family = AF_INET;
5732 sin4.sin_len = sizeof(sin4);
5736 memset(&sin6, 0, sizeof(sin6));
5737 sin6.sin6_family = AF_INET6;
5738 #ifdef HAVE_SIN6_LEN
5739 sin6.sin6_len = sizeof(sin6);
5742 /* First what about the src address of the pkt ? */
5744 switch (src->sa_family) {
5747 if (asoc->scope.ipv4_addr_legal) {
5754 if (asoc->scope.ipv6_addr_legal) {
5759 #if defined(__Userspace__)
5761 if (asoc->scope.conn_addr_legal) {
5772 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5773 sa = (struct sockaddr *)&net->ro._l_addr;
5774 if (sa->sa_family == src->sa_family) {
5776 if (sa->sa_family == AF_INET) {
5777 struct sockaddr_in *src4;
5779 sa4 = (struct sockaddr_in *)sa;
5780 src4 = (struct sockaddr_in *)src;
5781 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5788 if (sa->sa_family == AF_INET6) {
5789 struct sockaddr_in6 *src6;
5791 sa6 = (struct sockaddr_in6 *)sa;
5792 src6 = (struct sockaddr_in6 *)src;
5793 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5799 #if defined(__Userspace__)
5800 if (sa->sa_family == AF_CONN) {
5801 struct sockaddr_conn *srcc;
5803 sac = (struct sockaddr_conn *)sa;
5804 srcc = (struct sockaddr_conn *)src;
5805 if (sac->sconn_addr == srcc->sconn_addr) {
5814 /* New address added! no need to look further. */
5818 /* Ok so far lets munge through the rest of the packet */
5819 offset += sizeof(struct sctp_init_chunk);
5820 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5823 ptype = ntohs(phdr->param_type);
5824 plen = ntohs(phdr->param_length);
5827 case SCTP_IPV4_ADDRESS:
5829 struct sctp_ipv4addr_param *p4, p4_buf;
5831 if (plen != sizeof(struct sctp_ipv4addr_param)) {
5834 phdr = sctp_get_next_param(in_initpkt, offset,
5835 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5839 if (asoc->scope.ipv4_addr_legal) {
5840 p4 = (struct sctp_ipv4addr_param *)phdr;
5841 sin4.sin_addr.s_addr = p4->addr;
5842 sa_touse = (struct sockaddr *)&sin4;
5848 case SCTP_IPV6_ADDRESS:
5850 struct sctp_ipv6addr_param *p6, p6_buf;
5852 if (plen != sizeof(struct sctp_ipv6addr_param)) {
5855 phdr = sctp_get_next_param(in_initpkt, offset,
5856 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5860 if (asoc->scope.ipv6_addr_legal) {
5861 p6 = (struct sctp_ipv6addr_param *)phdr;
5862 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5864 sa_touse = (struct sockaddr *)&sin6;
5874 /* ok, sa_touse points to one to check */
5876 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5877 sa = (struct sockaddr *)&net->ro._l_addr;
5878 if (sa->sa_family != sa_touse->sa_family) {
5882 if (sa->sa_family == AF_INET) {
5883 sa4 = (struct sockaddr_in *)sa;
5884 if (sa4->sin_addr.s_addr ==
5885 sin4.sin_addr.s_addr) {
5892 if (sa->sa_family == AF_INET6) {
5893 sa6 = (struct sockaddr_in6 *)sa;
5894 if (SCTP6_ARE_ADDR_EQUAL(
5903 /* New addr added! no need to look further */
5907 offset += SCTP_SIZE32(plen);
5908 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5914 * Given a MBUF chain that was sent into us containing an INIT. Build a
5915 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5916 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5917 * message (i.e. the struct sctp_init_msg).
5920 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5921 struct sctp_nets *src_net, struct mbuf *init_pkt,
5922 int iphlen, int offset,
5923 struct sockaddr *src, struct sockaddr *dst,
5924 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5925 #if defined(__FreeBSD__) && !defined(__Userspace__)
5926 uint8_t mflowtype, uint32_t mflowid,
5928 uint32_t vrf_id, uint16_t port)
5930 struct sctp_association *asoc;
5931 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5932 struct sctp_init_ack_chunk *initack;
5933 struct sctp_adaptation_layer_indication *ali;
5934 struct sctp_supported_chunk_types_param *pr_supported;
5935 struct sctp_paramhdr *ph;
5936 union sctp_sockstore *over_addr;
5937 struct sctp_scoping scp;
5940 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5941 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5942 struct sockaddr_in *sin;
5945 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5946 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5947 struct sockaddr_in6 *sin6;
5949 #if defined(__Userspace__)
5950 struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5951 struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
5952 struct sockaddr_conn *sconn;
5954 struct sockaddr *to;
5955 struct sctp_state_cookie stc;
5956 struct sctp_nets *net = NULL;
5957 uint8_t *signature = NULL;
5958 int cnt_inits_to = 0;
5959 uint16_t his_limit, i_want;
5961 int nat_friendly = 0;
5964 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5971 if ((asoc != NULL) &&
5972 (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
5973 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5975 * new addresses, out of here in non-cookie-wait states
5977 * Send an ABORT, without the new address error cause.
5978 * This looks no different than if no listener
5981 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5983 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5984 #if defined(__FreeBSD__) && !defined(__Userspace__)
5985 mflowtype, mflowid, inp->fibnum,
5990 if (src_net != NULL && (src_net->port != port)) {
5992 * change of remote encapsulation port, out of here in
5993 * non-cookie-wait states
5995 * Send an ABORT, without an specific error cause.
5996 * This looks no different than if no listener
5999 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6000 "Remote encapsulation port changed");
6001 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
6002 #if defined(__FreeBSD__) && !defined(__Userspace__)
6003 mflowtype, mflowid, inp->fibnum,
6010 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
6011 (offset + sizeof(struct sctp_init_chunk)),
6013 (struct sctp_chunkhdr *)init_chk,
6014 &nat_friendly, NULL);
6017 if (op_err == NULL) {
6018 char msg[SCTP_DIAG_INFO_LEN];
6020 SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
6021 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6024 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
6025 init_chk->init.initiate_tag, op_err,
6026 #if defined(__FreeBSD__) && !defined(__Userspace__)
6027 mflowtype, mflowid, inp->fibnum,
6032 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
6034 /* No memory, INIT timer will re-attempt. */
6035 sctp_m_freem(op_err);
6038 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
6042 * We might not overwrite the identification[] completely and on
6043 * some platforms time_entered will contain some padding.
6044 * Therefore zero out the cookie to avoid putting
6045 * uninitialized memory on the wire.
6047 memset(&stc, 0, sizeof(struct sctp_state_cookie));
6049 /* the time I built cookie */
6050 (void)SCTP_GETTIME_TIMEVAL(&now);
6051 stc.time_entered.tv_sec = now.tv_sec;
6052 stc.time_entered.tv_usec = now.tv_usec;
6054 /* populate any tie tags */
6056 /* unlock before tag selections */
6057 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
6058 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
6059 stc.cookie_life = asoc->cookie_life;
6060 net = asoc->primary_destination;
6062 stc.tie_tag_my_vtag = 0;
6063 stc.tie_tag_peer_vtag = 0;
6064 /* life I will award this cookie */
6065 stc.cookie_life = inp->sctp_ep.def_cookie_life;
6068 /* copy in the ports for later check */
6069 stc.myport = sh->dest_port;
6070 stc.peerport = sh->src_port;
6073 * If we wanted to honor cookie life extensions, we would add to
6074 * stc.cookie_life. For now we should NOT honor any extension
6076 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
6077 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6078 stc.ipv6_addr_legal = 1;
6079 if (SCTP_IPV6_V6ONLY(inp)) {
6080 stc.ipv4_addr_legal = 0;
6082 stc.ipv4_addr_legal = 1;
6084 #if defined(__Userspace__)
6085 stc.conn_addr_legal = 0;
6088 stc.ipv6_addr_legal = 0;
6089 #if defined(__Userspace__)
6090 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6091 stc.conn_addr_legal = 1;
6092 stc.ipv4_addr_legal = 0;
6094 stc.conn_addr_legal = 0;
6095 stc.ipv4_addr_legal = 1;
6098 stc.ipv4_addr_legal = 1;
6104 switch (dst->sa_family) {
6108 /* lookup address */
6109 stc.address[0] = src4->sin_addr.s_addr;
6113 stc.addr_type = SCTP_IPV4_ADDRESS;
6114 /* local from address */
6115 stc.laddress[0] = dst4->sin_addr.s_addr;
6116 stc.laddress[1] = 0;
6117 stc.laddress[2] = 0;
6118 stc.laddress[3] = 0;
6119 stc.laddr_type = SCTP_IPV4_ADDRESS;
6120 /* scope_id is only for v6 */
6122 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
6123 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))){
6126 /* Must use the address in this case */
6127 if (sctp_is_address_on_local_host(src, vrf_id)) {
6128 stc.loopback_scope = 1;
6131 stc.local_scope = 0;
6139 stc.addr_type = SCTP_IPV6_ADDRESS;
6140 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
6141 #if defined(__FreeBSD__) && !defined(__Userspace__)
6142 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
6146 if (sctp_is_address_on_local_host(src, vrf_id)) {
6147 stc.loopback_scope = 1;
6148 stc.local_scope = 0;
6151 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
6152 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
6154 * If the new destination or source is a
6155 * LINK_LOCAL we must have common both site and
6156 * local scope. Don't set local scope though
6157 * since we must depend on the source to be
6158 * added implicitly. We cannot assure just
6159 * because we share one link that all links are
6162 #if defined(__APPLE__) && !defined(__Userspace__)
6163 /* Mac OS X currently doesn't have in6_getscope() */
6164 stc.scope_id = src6->sin6_addr.s6_addr16[1];
6166 stc.local_scope = 0;
6170 * we start counting for the private address
6171 * stuff at 1. since the link local we
6172 * source from won't show up in our scoped
6176 /* pull out the scope_id from incoming pkt */
6177 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
6178 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
6180 * If the new destination or source is
6181 * SITE_LOCAL then we must have site scope in
6186 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
6187 stc.laddr_type = SCTP_IPV6_ADDRESS;
6191 #if defined(__Userspace__)
6194 /* lookup address */
6199 memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
6200 stc.addr_type = SCTP_CONN_ADDRESS;
6201 /* local from address */
6202 stc.laddress[0] = 0;
6203 stc.laddress[1] = 0;
6204 stc.laddress[2] = 0;
6205 stc.laddress[3] = 0;
6206 memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
6207 stc.laddr_type = SCTP_CONN_ADDRESS;
6208 /* scope_id is only for v6 */
6219 /* set the scope per the existing tcb */
6222 struct sctp_nets *lnet;
6225 stc.loopback_scope = asoc->scope.loopback_scope;
6226 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
6227 stc.site_scope = asoc->scope.site_scope;
6228 stc.local_scope = asoc->scope.local_scope;
6230 /* Why do we not consider IPv4 LL addresses? */
6231 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
6232 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
6233 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
6235 * if we have a LL address, start
6243 /* use the net pointer */
6244 to = (struct sockaddr *)&net->ro._l_addr;
6245 switch (to->sa_family) {
6248 sin = (struct sockaddr_in *)to;
6249 stc.address[0] = sin->sin_addr.s_addr;
6253 stc.addr_type = SCTP_IPV4_ADDRESS;
6254 if (net->src_addr_selected == 0) {
6256 * strange case here, the INIT should have
6257 * did the selection.
6259 net->ro._s_addr = sctp_source_address_selection(inp,
6260 stcb, (sctp_route_t *)&net->ro,
6262 if (net->ro._s_addr == NULL) {
6263 sctp_m_freem(op_err);
6268 net->src_addr_selected = 1;
6271 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
6272 stc.laddress[1] = 0;
6273 stc.laddress[2] = 0;
6274 stc.laddress[3] = 0;
6275 stc.laddr_type = SCTP_IPV4_ADDRESS;
6276 /* scope_id is only for v6 */
6282 sin6 = (struct sockaddr_in6 *)to;
6283 memcpy(&stc.address, &sin6->sin6_addr,
6284 sizeof(struct in6_addr));
6285 stc.addr_type = SCTP_IPV6_ADDRESS;
6286 stc.scope_id = sin6->sin6_scope_id;
6287 if (net->src_addr_selected == 0) {
6289 * strange case here, the INIT should have
6290 * done the selection.
6292 net->ro._s_addr = sctp_source_address_selection(inp,
6293 stcb, (sctp_route_t *)&net->ro,
6295 if (net->ro._s_addr == NULL) {
6296 sctp_m_freem(op_err);
6301 net->src_addr_selected = 1;
6303 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6304 sizeof(struct in6_addr));
6305 stc.laddr_type = SCTP_IPV6_ADDRESS;
6308 #if defined(__Userspace__)
6310 sconn = (struct sockaddr_conn *)to;
6315 memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6316 stc.addr_type = SCTP_CONN_ADDRESS;
6317 stc.laddress[0] = 0;
6318 stc.laddress[1] = 0;
6319 stc.laddress[2] = 0;
6320 stc.laddress[3] = 0;
6321 memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6322 stc.laddr_type = SCTP_CONN_ADDRESS;
6328 /* Now lets put the SCTP header in place */
6329 initack = mtod(m, struct sctp_init_ack_chunk *);
6330 /* Save it off for quick ref */
6331 stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
6333 memcpy(stc.identification, SCTP_VERSION_STRING,
6334 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6335 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6336 /* now the chunk header */
6337 initack->ch.chunk_type = SCTP_INITIATION_ACK;
6338 initack->ch.chunk_flags = 0;
6339 /* fill in later from mbuf we build */
6340 initack->ch.chunk_length = 0;
6341 /* place in my tag */
6342 if ((asoc != NULL) &&
6343 ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
6344 (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
6345 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
6346 /* re-use the v-tags and init-seq here */
6347 initack->init.initiate_tag = htonl(asoc->my_vtag);
6348 initack->init.initial_tsn = htonl(asoc->init_seq_number);
6350 uint32_t vtag, itsn;
6353 atomic_add_int(&asoc->refcnt, 1);
6354 SCTP_TCB_UNLOCK(stcb);
6356 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6357 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
6358 /* Got a duplicate vtag on some guy behind a nat
6359 * make sure we don't use it.
6363 initack->init.initiate_tag = htonl(vtag);
6364 /* get a TSN to use too */
6365 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6366 initack->init.initial_tsn = htonl(itsn);
6367 SCTP_TCB_LOCK(stcb);
6368 atomic_add_int(&asoc->refcnt, -1);
6370 SCTP_INP_INCR_REF(inp);
6371 SCTP_INP_RUNLOCK(inp);
6372 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6373 initack->init.initiate_tag = htonl(vtag);
6374 /* get a TSN to use too */
6375 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6376 SCTP_INP_RLOCK(inp);
6377 SCTP_INP_DECR_REF(inp);
6380 /* save away my tag to */
6381 stc.my_vtag = initack->init.initiate_tag;
6383 /* set up some of the credits. */
6384 so = inp->sctp_socket;
6386 /* memory problem */
6387 sctp_m_freem(op_err);
6391 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6393 /* set what I want */
6394 his_limit = ntohs(init_chk->init.num_inbound_streams);
6395 /* choose what I want */
6397 if (asoc->streamoutcnt > asoc->pre_open_streams) {
6398 i_want = asoc->streamoutcnt;
6400 i_want = asoc->pre_open_streams;
6403 i_want = inp->sctp_ep.pre_open_stream_count;
6405 if (his_limit < i_want) {
6406 /* I Want more :< */
6407 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6409 /* I can have what I want :> */
6410 initack->init.num_outbound_streams = htons(i_want);
6412 /* tell him his limit. */
6413 initack->init.num_inbound_streams =
6414 htons(inp->sctp_ep.max_open_streams_intome);
6416 /* adaptation layer indication parameter */
6417 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6418 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
6419 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
6420 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6421 ali->ph.param_length = htons(parameter_len);
6422 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
6423 chunk_len += parameter_len;
6427 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
6428 ((asoc == NULL) && (inp->ecn_supported == 1))) {
6429 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6430 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6431 ph->param_type = htons(SCTP_ECN_CAPABLE);
6432 ph->param_length = htons(parameter_len);
6433 chunk_len += parameter_len;
6436 /* PR-SCTP supported parameter */
6437 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6438 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6439 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6440 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6441 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
6442 ph->param_length = htons(parameter_len);
6443 chunk_len += parameter_len;
6446 /* Add NAT friendly parameter */
6448 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6449 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6450 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6451 ph->param_length = htons(parameter_len);
6452 chunk_len += parameter_len;
6455 /* And now tell the peer which extensions we support */
6457 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
6458 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6459 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6460 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6461 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6462 ((asoc == NULL) && (inp->idata_supported == 1))) {
6463 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
6466 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6467 ((asoc == NULL) && (inp->auth_supported == 1))) {
6468 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6470 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
6471 ((asoc == NULL) && (inp->asconf_supported == 1))) {
6472 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6473 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6475 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
6476 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
6477 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6479 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6480 ((asoc == NULL) && (inp->idata_supported == 1))) {
6481 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
6483 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
6484 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
6485 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6487 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
6488 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
6489 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6492 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
6493 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6494 pr_supported->ph.param_length = htons(parameter_len);
6495 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6496 chunk_len += parameter_len;
6499 /* add authentication parameters */
6500 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6501 ((asoc == NULL) && (inp->auth_supported == 1))) {
6502 struct sctp_auth_random *randp;
6503 struct sctp_auth_hmac_algo *hmacs;
6504 struct sctp_auth_chunk_list *chunks;
6506 if (padding_len > 0) {
6507 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6508 chunk_len += padding_len;
6511 /* generate and add RANDOM parameter */
6512 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
6513 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6514 SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6515 randp->ph.param_type = htons(SCTP_RANDOM);
6516 randp->ph.param_length = htons(parameter_len);
6517 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6518 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6519 chunk_len += parameter_len;
6521 if (padding_len > 0) {
6522 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6523 chunk_len += padding_len;
6526 /* add HMAC_ALGO parameter */
6527 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
6528 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6529 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6530 (uint8_t *)hmacs->hmac_ids);
6531 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6532 hmacs->ph.param_length = htons(parameter_len);
6533 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6534 chunk_len += parameter_len;
6536 if (padding_len > 0) {
6537 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6538 chunk_len += padding_len;
6541 /* add CHUNKS parameter */
6542 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
6543 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6544 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6545 chunks->chunk_types);
6546 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6547 chunks->ph.param_length = htons(parameter_len);
6548 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6549 chunk_len += parameter_len;
6551 SCTP_BUF_LEN(m) = chunk_len;
6553 /* now the addresses */
6554 /* To optimize this we could put the scoping stuff
6555 * into a structure and remove the individual uint8's from
6556 * the stc structure. Then we could just sifa in the
6557 * address within the stc.. but for now this is a quick
6558 * hack to get the address stuff teased apart.
6560 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6561 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6562 #if defined(__Userspace__)
6563 scp.conn_addr_legal = stc.conn_addr_legal;
6565 scp.loopback_scope = stc.loopback_scope;
6566 scp.ipv4_local_scope = stc.ipv4_scope;
6567 scp.local_scope = stc.local_scope;
6568 scp.site_scope = stc.site_scope;
6569 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6571 &padding_len, &chunk_len);
6572 /* padding_len can only be positive, if no addresses have been added */
6573 if (padding_len > 0) {
6574 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6575 chunk_len += padding_len;
6576 SCTP_BUF_LEN(m) += padding_len;
6580 /* tack on the operational error if present */
6583 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6584 parameter_len += SCTP_BUF_LEN(m_tmp);
6586 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6587 SCTP_BUF_NEXT(m_last) = op_err;
6588 while (SCTP_BUF_NEXT(m_last) != NULL) {
6589 m_last = SCTP_BUF_NEXT(m_last);
6591 chunk_len += parameter_len;
6593 if (padding_len > 0) {
6594 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6595 if (m_last == NULL) {
6596 /* Houston we have a problem, no space */
6600 chunk_len += padding_len;
6603 /* Now we must build a cookie */
6604 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6605 if (m_cookie == NULL) {
6606 /* memory problem */
6610 /* Now append the cookie to the end and update the space/size */
6611 SCTP_BUF_NEXT(m_last) = m_cookie;
6613 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6614 parameter_len += SCTP_BUF_LEN(m_tmp);
6615 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6619 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6620 chunk_len += parameter_len;
6622 /* Place in the size, but we don't include
6623 * the last pad (if any) in the INIT-ACK.
6625 initack->ch.chunk_length = htons(chunk_len);
6627 /* Time to sign the cookie, we don't sign over the cookie
6628 * signature though thus we set trailer.
6630 (void)sctp_hmac_m(SCTP_HMAC,
6631 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6632 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6633 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6634 #if defined(__Userspace__)
6636 * Don't put AF_CONN addresses on the wire, in case this is critical
6637 * for the application. However, they are protected by the HMAC and
6638 * need to be reconstructed before checking the HMAC.
6639 * Clearing is only done in the mbuf chain, since the local stc is
6642 if (stc.addr_type == SCTP_CONN_ADDRESS) {
6643 const void *p = NULL;
6645 m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, address),
6646 (int)sizeof(void *), (caddr_t)&p);
6648 if (stc.laddr_type == SCTP_CONN_ADDRESS) {
6649 const void *p = NULL;
6651 m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, laddress),
6652 (int)sizeof(void *), (caddr_t)&p);
6656 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6657 * here since the timer will drive a retranmission.
6659 if (padding_len > 0) {
6660 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6665 if (stc.loopback_scope) {
6666 over_addr = (union sctp_sockstore *)dst;
6671 if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6673 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6675 #if defined(__FreeBSD__) && !defined(__Userspace__)
6678 SCTP_SO_NOT_LOCKED))) {
6679 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6680 if (error == ENOBUFS) {
6682 asoc->ifp_had_enobuf = 1;
6684 SCTP_STAT_INCR(sctps_lowlevelerr);
6688 asoc->ifp_had_enobuf = 0;
6691 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6696 sctp_prune_prsctp(struct sctp_tcb *stcb,
6697 struct sctp_association *asoc,
6698 struct sctp_sndrcvinfo *srcv,
6702 struct sctp_tmit_chunk *chk, *nchk;
6704 SCTP_TCB_LOCK_ASSERT(stcb);
6705 if ((asoc->prsctp_supported) &&
6706 (asoc->sent_queue_cnt_removeable > 0)) {
6707 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6709 * Look for chunks marked with the PR_SCTP flag AND
6710 * the buffer space flag. If the one being sent is
6711 * equal or greater priority then purge the old one
6712 * and free some space.
6714 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6716 * This one is PR-SCTP AND buffer space
6719 if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6721 * Lower numbers equates to higher
6722 * priority. So if the one we are
6723 * looking at has a larger priority,
6724 * we want to drop the data and NOT
6729 * We release the book_size
6730 * if the mbuf is here
6735 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6739 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6742 freed_spc += ret_spc;
6743 if (freed_spc >= dataout) {
6746 } /* if chunk was present */
6747 } /* if of sufficient priority */
6748 } /* if chunk has enabled */
6749 } /* tailqforeach */
6751 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6752 /* Here we must move to the sent queue and mark */
6753 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6754 if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6757 * We release the book_size
6758 * if the mbuf is here
6762 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6765 freed_spc += ret_spc;
6766 if (freed_spc >= dataout) {
6769 } /* end if chk->data */
6770 } /* end if right class */
6771 } /* end if chk pr-sctp */
6772 } /* tailqforeachsafe (chk) */
6773 } /* if enabled in asoc */
6777 sctp_get_frag_point(struct sctp_tcb *stcb,
6778 struct sctp_association *asoc)
6783 * For endpoints that have both v6 and v4 addresses we must reserve
6784 * room for the ipv6 header, for those that are only dealing with V4
6785 * we use a larger frag point.
6787 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6788 ovh = SCTP_MIN_OVERHEAD;
6790 #if defined(__Userspace__)
6791 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6792 ovh = sizeof(struct sctphdr);
6794 ovh = SCTP_MIN_V4_OVERHEAD;
6797 ovh = SCTP_MIN_V4_OVERHEAD;
6800 ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6801 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6802 siz = asoc->smallest_mtu - ovh;
6804 siz = (stcb->asoc.sctp_frag_point - ovh);
6806 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6808 /* A data chunk MUST fit in a cluster */
6809 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6812 /* adjust for an AUTH chunk if DATA requires auth */
6813 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6814 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6817 /* make it an even word boundary please */
6824 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6827 * We assume that the user wants PR_SCTP_TTL if the user
6828 * provides a positive lifetime but does not specify any
6831 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6832 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6833 } else if (sp->timetolive > 0) {
6834 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6835 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6839 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6840 case CHUNK_FLAGS_PR_SCTP_BUF:
6842 * Time to live is a priority stored in tv_sec when
6843 * doing the buffer drop thing.
6845 sp->ts.tv_sec = sp->timetolive;
6848 case CHUNK_FLAGS_PR_SCTP_TTL:
6851 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6852 tv.tv_sec = sp->timetolive / 1000;
6853 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6854 /* TODO sctp_constants.h needs alternative time macros when
6855 * _KERNEL is undefined.
6857 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
6858 timeradd(&sp->ts, &tv, &sp->ts);
6860 timevaladd(&sp->ts, &tv);
6864 case CHUNK_FLAGS_PR_SCTP_RTX:
6866 * Time to live is a the number or retransmissions
6869 sp->ts.tv_sec = sp->timetolive;
6873 SCTPDBG(SCTP_DEBUG_USRREQ1,
6874 "Unknown PR_SCTP policy %u.\n",
6875 PR_SCTP_POLICY(sp->sinfo_flags));
6881 sctp_msg_append(struct sctp_tcb *stcb,
6882 struct sctp_nets *net,
6884 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6888 struct sctp_stream_queue_pending *sp = NULL;
6889 struct sctp_stream_out *strm;
6891 /* Given an mbuf chain, put it
6892 * into the association send queue and
6893 * place it on the wheel
6895 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6896 /* Invalid stream number */
6897 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6901 if ((stcb->asoc.stream_locked) &&
6902 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6903 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6907 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6908 /* Now can we send this? */
6909 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
6910 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6911 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6912 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6913 /* got data while shutting down */
6914 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6918 sctp_alloc_a_strmoq(stcb, sp);
6920 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6924 sp->sinfo_flags = srcv->sinfo_flags;
6925 sp->timetolive = srcv->sinfo_timetolive;
6926 sp->ppid = srcv->sinfo_ppid;
6927 sp->context = srcv->sinfo_context;
6929 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6931 atomic_add_int(&sp->net->ref_count, 1);
6935 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6936 sp->sid = srcv->sinfo_stream;
6937 sp->msg_is_complete = 1;
6938 sp->sender_all_done = 1;
6941 sp->tail_mbuf = NULL;
6942 sctp_set_prsctp_policy(sp);
6943 /* We could in theory (for sendall) sifa the length
6944 * in, but we would still have to hunt through the
6945 * chain since we need to setup the tail_mbuf
6948 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6949 if (SCTP_BUF_NEXT(at) == NULL)
6951 sp->length += SCTP_BUF_LEN(at);
6953 if (srcv->sinfo_keynumber_valid) {
6954 sp->auth_keyid = srcv->sinfo_keynumber;
6956 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6958 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6959 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6960 sp->holds_key_ref = 1;
6962 if (hold_stcb_lock == 0) {
6963 SCTP_TCB_SEND_LOCK(stcb);
6965 sctp_snd_sb_alloc(stcb, sp->length);
6966 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6967 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6968 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6970 if (hold_stcb_lock == 0) {
6971 SCTP_TCB_SEND_UNLOCK(stcb);
6981 static struct mbuf *
6982 sctp_copy_mbufchain(struct mbuf *clonechain,
6983 struct mbuf *outchain,
6984 struct mbuf **endofchain,
6987 uint8_t copy_by_ref)
6990 struct mbuf *appendchain;
6994 if (endofchain == NULL) {
6998 sctp_m_freem(outchain);
7001 if (can_take_mbuf) {
7002 appendchain = clonechain;
7005 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) {
7006 /* Its not in a cluster */
7007 if (*endofchain == NULL) {
7008 /* lets get a mbuf cluster */
7009 if (outchain == NULL) {
7010 /* This is the general case */
7012 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7013 if (outchain == NULL) {
7016 SCTP_BUF_LEN(outchain) = 0;
7017 *endofchain = outchain;
7018 /* get the prepend space */
7019 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
7021 /* We really should not get a NULL in endofchain */
7025 if (SCTP_BUF_NEXT(m) == NULL) {
7029 m = SCTP_BUF_NEXT(m);
7032 if (*endofchain == NULL) {
7033 /* huh, TSNH XXX maybe we should panic */
7034 sctp_m_freem(outchain);
7038 /* get the new end of length */
7039 len = (int)M_TRAILINGSPACE(*endofchain);
7041 /* how much is left at the end? */
7042 len = (int)M_TRAILINGSPACE(*endofchain);
7044 /* Find the end of the data, for appending */
7045 cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
7047 /* Now lets copy it out */
7048 if (len >= sizeofcpy) {
7049 /* It all fits, copy it in */
7050 m_copydata(clonechain, 0, sizeofcpy, cp);
7051 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7053 /* fill up the end of the chain */
7055 m_copydata(clonechain, 0, len, cp);
7056 SCTP_BUF_LEN((*endofchain)) += len;
7057 /* now we need another one */
7060 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7065 SCTP_BUF_NEXT((*endofchain)) = m;
7067 cp = mtod((*endofchain), caddr_t);
7068 m_copydata(clonechain, len, sizeofcpy, cp);
7069 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7073 /* copy the old fashion way */
7074 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
7075 #ifdef SCTP_MBUF_LOGGING
7076 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7077 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
7082 if (appendchain == NULL) {
7085 sctp_m_freem(outchain);
7089 /* tack on to the end */
7090 if (*endofchain != NULL) {
7091 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
7095 if (SCTP_BUF_NEXT(m) == NULL) {
7096 SCTP_BUF_NEXT(m) = appendchain;
7099 m = SCTP_BUF_NEXT(m);
7103 * save off the end and update the end-chain
7108 if (SCTP_BUF_NEXT(m) == NULL) {
7112 m = SCTP_BUF_NEXT(m);
7116 /* save off the end and update the end-chain position */
7119 if (SCTP_BUF_NEXT(m) == NULL) {
7123 m = SCTP_BUF_NEXT(m);
7125 return (appendchain);
7130 sctp_med_chunk_output(struct sctp_inpcb *inp,
7131 struct sctp_tcb *stcb,
7132 struct sctp_association *asoc,
7135 int control_only, int from_where,
7136 struct timeval *now, int *now_filled, int frag_point, int so_locked);
7139 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
7140 uint32_t val SCTP_UNUSED)
7142 struct sctp_copy_all *ca;
7145 int added_control = 0;
7146 int un_sent, do_chunk_output = 1;
7147 struct sctp_association *asoc;
7148 struct sctp_nets *net;
7150 ca = (struct sctp_copy_all *)ptr;
7151 if (ca->m == NULL) {
7154 if (ca->inp != inp) {
7158 if (ca->sndlen > 0) {
7159 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
7161 /* can't copy so we are done */
7165 #ifdef SCTP_MBUF_LOGGING
7166 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7167 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
7173 SCTP_TCB_LOCK_ASSERT(stcb);
7174 if (stcb->asoc.alternate) {
7175 net = stcb->asoc.alternate;
7177 net = stcb->asoc.primary_destination;
7179 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
7180 /* Abort this assoc with m as the user defined reason */
7182 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
7184 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
7185 0, M_NOWAIT, 1, MT_DATA);
7186 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
7189 struct sctp_paramhdr *ph;
7191 ph = mtod(m, struct sctp_paramhdr *);
7192 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7193 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
7195 /* We add one here to keep the assoc from
7196 * dis-appearing on us.
7198 atomic_add_int(&stcb->asoc.refcnt, 1);
7199 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
7200 /* sctp_abort_an_association calls sctp_free_asoc()
7201 * free association will NOT free it since we
7202 * incremented the refcnt .. we do this to prevent
7203 * it being freed and things getting tricky since
7204 * we could end up (from free_asoc) calling inpcb_free
7205 * which would get a recursive lock call to the
7206 * iterator lock.. But as a consequence of that the
7207 * stcb will return to us un-locked.. since free_asoc
7208 * returns with either no TCB or the TCB unlocked, we
7209 * must relock.. to unlock in the iterator timer :-0
7211 SCTP_TCB_LOCK(stcb);
7212 atomic_add_int(&stcb->asoc.refcnt, -1);
7213 goto no_chunk_output;
7216 ret = sctp_msg_append(stcb, net, m,
7220 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
7221 /* shutdown this assoc */
7222 if (TAILQ_EMPTY(&asoc->send_queue) &&
7223 TAILQ_EMPTY(&asoc->sent_queue) &&
7224 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
7225 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7228 /* there is nothing queued to send, so I'm done... */
7229 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7230 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7231 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7232 /* only send SHUTDOWN the first time through */
7233 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
7234 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7236 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
7237 sctp_stop_timers_for_shutdown(stcb);
7238 sctp_send_shutdown(stcb, net);
7239 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
7241 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7244 do_chunk_output = 0;
7248 * we still got (or just got) data to send, so set
7252 * XXX sockets draft says that SCTP_EOF should be
7253 * sent with no data. currently, we will allow user
7254 * data to be sent first and move to
7257 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7258 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7259 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7260 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7261 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
7263 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7264 if (TAILQ_EMPTY(&asoc->send_queue) &&
7265 TAILQ_EMPTY(&asoc->sent_queue) &&
7266 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
7267 struct mbuf *op_err;
7268 char msg[SCTP_DIAG_INFO_LEN];
7271 SCTP_SNPRINTF(msg, sizeof(msg),
7272 "%s:%d at %s", __FILE__, __LINE__, __func__);
7273 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
7275 atomic_add_int(&stcb->asoc.refcnt, 1);
7276 sctp_abort_an_association(stcb->sctp_ep, stcb,
7277 op_err, SCTP_SO_NOT_LOCKED);
7278 atomic_add_int(&stcb->asoc.refcnt, -1);
7279 goto no_chunk_output;
7281 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7288 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7289 (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
7291 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
7292 (stcb->asoc.total_flight > 0) &&
7293 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
7294 do_chunk_output = 0;
7296 if (do_chunk_output)
7297 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
7298 else if (added_control) {
7299 int num_out, reason, now_filled = 0;
7303 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7304 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
7305 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
7316 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
7318 struct sctp_copy_all *ca;
7320 ca = (struct sctp_copy_all *)ptr;
7322 * Do a notify here? Kacheong suggests that the notify be done at
7323 * the send time.. so you would push up a notification if any send
7324 * failed. Don't know if this is feasible since the only failures we
7325 * have is "memory" related and if you cannot get an mbuf to send
7326 * the data you surely can't get an mbuf to send up to notify the
7327 * user you can't send the data :->
7330 /* now free everything */
7332 /* Lets clear the flag to allow others to run. */
7333 ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7335 sctp_m_freem(ca->m);
7336 SCTP_FREE(ca, SCTP_M_COPYAL);
7339 static struct mbuf *
7340 sctp_copy_out_all(struct uio *uio, ssize_t len)
7342 struct mbuf *ret, *at;
7343 ssize_t left, willcpy, cancpy, error;
7345 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7351 SCTP_BUF_LEN(ret) = 0;
7352 /* save space for the data chunk header */
7353 cancpy = (int)M_TRAILINGSPACE(ret);
7354 willcpy = min(cancpy, left);
7357 /* Align data to the end */
7358 error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
7364 SCTP_BUF_LEN(at) = (int)willcpy;
7365 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7368 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
7369 if (SCTP_BUF_NEXT(at) == NULL) {
7372 at = SCTP_BUF_NEXT(at);
7373 SCTP_BUF_LEN(at) = 0;
7374 cancpy = (int)M_TRAILINGSPACE(at);
7375 willcpy = min(cancpy, left);
7382 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7383 struct sctp_sndrcvinfo *srcv)
7386 struct sctp_copy_all *ca;
7388 if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) {
7389 /* There is another. */
7392 #if defined(__APPLE__) && !defined(__Userspace__)
7393 #if defined(APPLE_LEOPARD)
7394 if (uio->uio_resid > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7396 if (uio_resid(uio) > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7399 if (uio->uio_resid > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7401 /* You must not be larger than the limit! */
7404 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7408 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7411 memset(ca, 0, sizeof(struct sctp_copy_all));
7415 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7418 * take off the sendall flag, it would be bad if we failed to do
7421 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7422 /* get length and mbuf chain */
7424 #if defined(__APPLE__) && !defined(__Userspace__)
7425 #if defined(APPLE_LEOPARD)
7426 ca->sndlen = uio->uio_resid;
7428 ca->sndlen = uio_resid(uio);
7431 ca->sndlen = uio->uio_resid;
7433 #if defined(__APPLE__) && !defined(__Userspace__)
7434 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7436 ca->m = sctp_copy_out_all(uio, ca->sndlen);
7437 #if defined(__APPLE__) && !defined(__Userspace__)
7438 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7440 if (ca->m == NULL) {
7441 SCTP_FREE(ca, SCTP_M_COPYAL);
7442 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7446 /* Gather the length of the send */
7450 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7451 ca->sndlen += SCTP_BUF_LEN(mat);
7454 inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7455 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7456 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7457 SCTP_ASOC_ANY_STATE,
7459 sctp_sendall_completes, inp, 1);
7461 inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7462 SCTP_FREE(ca, SCTP_M_COPYAL);
7463 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7471 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7473 struct sctp_tmit_chunk *chk, *nchk;
7475 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7476 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7477 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7478 asoc->ctrl_queue_cnt--;
7480 sctp_m_freem(chk->data);
7483 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7489 sctp_toss_old_asconf(struct sctp_tcb *stcb)
7491 struct sctp_association *asoc;
7492 struct sctp_tmit_chunk *chk, *nchk;
7493 struct sctp_asconf_chunk *acp;
7496 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7497 /* find SCTP_ASCONF chunk in queue */
7498 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7500 acp = mtod(chk->data, struct sctp_asconf_chunk *);
7501 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7506 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7507 asoc->ctrl_queue_cnt--;
7509 sctp_m_freem(chk->data);
7512 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7519 sctp_clean_up_datalist(struct sctp_tcb *stcb,
7520 struct sctp_association *asoc,
7521 struct sctp_tmit_chunk **data_list,
7523 struct sctp_nets *net)
7526 struct sctp_tmit_chunk *tp1;
7528 for (i = 0; i < bundle_at; i++) {
7529 /* off of the send queue */
7530 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7531 asoc->send_queue_cnt--;
7534 * Any chunk NOT 0 you zap the time chunk 0 gets
7535 * zapped or set based on if a RTO measurment is
7538 data_list[i]->do_rtt = 0;
7541 data_list[i]->sent_rcv_time = net->last_sent_time;
7542 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7543 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
7544 if (data_list[i]->whoTo == NULL) {
7545 data_list[i]->whoTo = net;
7546 atomic_add_int(&net->ref_count, 1);
7548 /* on to the sent queue */
7549 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7550 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7551 struct sctp_tmit_chunk *tpp;
7553 /* need to move back */
7555 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7557 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7561 if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7564 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7566 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7571 /* This does not lower until the cum-ack passes it */
7572 asoc->sent_queue_cnt++;
7573 if ((asoc->peers_rwnd <= 0) &&
7574 (asoc->total_flight == 0) &&
7576 /* Mark the chunk as being a window probe */
7577 SCTP_STAT_INCR(sctps_windowprobed);
7579 #ifdef SCTP_AUDITING_ENABLED
7580 sctp_audit_log(0xC2, 3);
7582 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7583 data_list[i]->snd_count = 1;
7584 data_list[i]->rec.data.chunk_was_revoked = 0;
7585 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7586 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7587 data_list[i]->whoTo->flight_size,
7588 data_list[i]->book_size,
7589 (uint32_t)(uintptr_t)data_list[i]->whoTo,
7590 data_list[i]->rec.data.tsn);
7592 sctp_flight_size_increase(data_list[i]);
7593 sctp_total_flight_increase(stcb, data_list[i]);
7594 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7595 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7596 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7598 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7599 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7600 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7601 /* SWS sender side engages */
7602 asoc->peers_rwnd = 0;
7605 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7606 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7611 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked)
7613 struct sctp_tmit_chunk *chk, *nchk;
7615 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7616 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7617 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7618 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7619 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7620 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7621 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7622 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7623 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7624 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7625 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7626 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7627 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7628 /* Stray chunks must be cleaned up */
7630 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7631 asoc->ctrl_queue_cnt--;
7633 sctp_m_freem(chk->data);
7636 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7637 asoc->fwd_tsn_cnt--;
7639 sctp_free_a_chunk(stcb, chk, so_locked);
7640 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7641 /* special handling, we must look into the param */
7642 if (chk != asoc->str_reset) {
7643 goto clean_up_anyway;
7650 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7651 uint32_t space_left, uint32_t frag_point, int eeor_on)
7653 /* Make a decision on if I should split a
7654 * msg into multiple parts. This is only asked of
7655 * incomplete messages.
7658 /* If we are doing EEOR we need to always send
7659 * it if its the entire thing, since it might
7660 * be all the guy is putting in the hopper.
7662 if (space_left >= length) {
7664 * If we have data outstanding,
7665 * we get another chance when the sack
7666 * arrives to transmit - wait for more data
7668 if (stcb->asoc.total_flight == 0) {
7669 /* If nothing is in flight, we zero
7670 * the packet counter.
7677 /* You can fill the rest */
7678 return (space_left);
7682 * For those strange folk that make the send buffer
7683 * smaller than our fragmentation point, we can't
7684 * get a full msg in so we have to allow splitting.
7686 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7689 if ((length <= space_left) ||
7690 ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7691 /* Sub-optimial residual don't split in non-eeor mode. */
7694 /* If we reach here length is larger
7695 * than the space_left. Do we wish to split
7696 * it for the sake of packet putting together?
7698 if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7699 /* Its ok to split it */
7700 return (min(space_left, frag_point));
7702 /* Nope, can't split */
7707 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7708 struct sctp_stream_out *strq,
7709 uint32_t space_left,
7710 uint32_t frag_point,
7716 /* Move from the stream to the send_queue keeping track of the total */
7717 struct sctp_association *asoc;
7718 struct sctp_stream_queue_pending *sp;
7719 struct sctp_tmit_chunk *chk;
7720 struct sctp_data_chunk *dchkh=NULL;
7721 struct sctp_idata_chunk *ndchkh=NULL;
7722 uint32_t to_move, length;
7724 uint8_t rcv_flags = 0;
7726 uint8_t send_lock_up = 0;
7728 SCTP_TCB_LOCK_ASSERT(stcb);
7731 /*sa_ignore FREED_MEMORY*/
7732 sp = TAILQ_FIRST(&strq->outqueue);
7734 if (send_lock_up == 0) {
7735 SCTP_TCB_SEND_LOCK(stcb);
7738 sp = TAILQ_FIRST(&strq->outqueue);
7742 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7743 (stcb->asoc.idata_supported == 0) &&
7744 (strq->last_msg_incomplete)) {
7745 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7747 strq->last_msg_incomplete);
7748 strq->last_msg_incomplete = 0;
7752 SCTP_TCB_SEND_UNLOCK(stcb);
7757 if ((sp->msg_is_complete) && (sp->length == 0)) {
7758 if (sp->sender_all_done) {
7759 /* We are doing deferred cleanup. Last
7760 * time through when we took all the data
7761 * the sender_all_done was not set.
7763 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7764 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7765 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7766 sp->sender_all_done,
7768 sp->msg_is_complete,
7772 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7773 SCTP_TCB_SEND_LOCK(stcb);
7776 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7777 TAILQ_REMOVE(&strq->outqueue, sp, next);
7778 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7779 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7780 (strq->chunks_on_queues == 0) &&
7781 TAILQ_EMPTY(&strq->outqueue)) {
7782 stcb->asoc.trigger_reset = 1;
7785 sctp_free_remote_addr(sp->net);
7789 sctp_m_freem(sp->data);
7792 sctp_free_a_strmoq(stcb, sp, so_locked);
7793 /* we can't be locked to it */
7795 SCTP_TCB_SEND_UNLOCK(stcb);
7798 /* back to get the next msg */
7801 /* sender just finished this but
7802 * still holds a reference
7809 /* is there some to get */
7810 if (sp->length == 0) {
7815 } else if (sp->discard_rest) {
7816 if (send_lock_up == 0) {
7817 SCTP_TCB_SEND_LOCK(stcb);
7820 /* Whack down the size */
7821 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7822 if ((stcb->sctp_socket != NULL) &&
7823 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7824 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7825 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7828 sctp_m_freem(sp->data);
7830 sp->tail_mbuf = NULL;
7839 some_taken = sp->some_taken;
7841 length = sp->length;
7842 if (sp->msg_is_complete) {
7843 /* The message is complete */
7844 to_move = min(length, frag_point);
7845 if (to_move == length) {
7846 /* All of it fits in the MTU */
7847 if (sp->some_taken) {
7848 rcv_flags |= SCTP_DATA_LAST_FRAG;
7850 rcv_flags |= SCTP_DATA_NOT_FRAG;
7852 sp->put_last_out = 1;
7853 if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7854 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7857 /* Not all of it fits, we fragment */
7858 if (sp->some_taken == 0) {
7859 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7864 to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7867 * We use a snapshot of length in case it
7868 * is expanding during the compare.
7873 if (to_move >= llen) {
7875 if (send_lock_up == 0) {
7877 * We are taking all of an incomplete msg
7878 * thus we need a send lock.
7880 SCTP_TCB_SEND_LOCK(stcb);
7882 if (sp->msg_is_complete) {
7883 /* the sender finished the msg */
7888 if (sp->some_taken == 0) {
7889 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7893 /* Nothing to take. */
7900 /* If we reach here, we can copy out a chunk */
7901 sctp_alloc_a_chunk(stcb, chk);
7903 /* No chunk memory */
7908 /* Setup for unordered if needed by looking
7909 * at the user sent info flags.
7911 if (sp->sinfo_flags & SCTP_UNORDERED) {
7912 rcv_flags |= SCTP_DATA_UNORDERED;
7914 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7915 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7916 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7918 /* clear out the chunk before setting up */
7919 memset(chk, 0, sizeof(*chk));
7920 chk->rec.data.rcv_flags = rcv_flags;
7922 if (to_move >= length) {
7923 /* we think we can steal the whole thing */
7924 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7925 SCTP_TCB_SEND_LOCK(stcb);
7928 if (to_move < sp->length) {
7929 /* bail, it changed */
7932 chk->data = sp->data;
7933 chk->last_mbuf = sp->tail_mbuf;
7934 /* register the stealing */
7935 sp->data = sp->tail_mbuf = NULL;
7939 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7940 chk->last_mbuf = NULL;
7941 if (chk->data == NULL) {
7942 sp->some_taken = some_taken;
7943 sctp_free_a_chunk(stcb, chk, so_locked);
7948 #ifdef SCTP_MBUF_LOGGING
7949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7950 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7953 /* Pull off the data */
7954 m_adj(sp->data, to_move);
7955 /* Now lets work our way down and compact it */
7957 while (m && (SCTP_BUF_LEN(m) == 0)) {
7958 sp->data = SCTP_BUF_NEXT(m);
7959 SCTP_BUF_NEXT(m) = NULL;
7960 if (sp->tail_mbuf == m) {
7962 * Freeing tail? TSNH since
7963 * we supposedly were taking less
7964 * than the sp->length.
7967 panic("Huh, freing tail? - TSNH");
7969 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7970 sp->tail_mbuf = sp->data = NULL;
7979 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7980 chk->copy_by_ref = 1;
7982 chk->copy_by_ref = 0;
7984 /* get last_mbuf and counts of mb usage
7985 * This is ugly but hopefully its only one mbuf.
7987 if (chk->last_mbuf == NULL) {
7988 chk->last_mbuf = chk->data;
7989 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7990 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7994 if (to_move > length) {
7995 /*- This should not happen either
7996 * since we always lower to_move to the size
7997 * of sp->length if its larger.
8000 panic("Huh, how can to_move be larger?");
8002 SCTP_PRINTF("Huh, how can to_move be larger?\n");
8006 atomic_subtract_int(&sp->length, to_move);
8008 leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
8009 if (M_LEADINGSPACE(chk->data) < leading) {
8010 /* Not enough room for a chunk header, get some */
8013 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
8016 * we're in trouble here. _PREPEND below will free
8017 * all the data if there is no leading space, so we
8018 * must put the data back and restore.
8020 if (send_lock_up == 0) {
8021 SCTP_TCB_SEND_LOCK(stcb);
8024 if (sp->data == NULL) {
8025 /* unsteal the data */
8026 sp->data = chk->data;
8027 sp->tail_mbuf = chk->last_mbuf;
8030 /* reassemble the data */
8032 sp->data = chk->data;
8033 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
8035 sp->some_taken = some_taken;
8036 atomic_add_int(&sp->length, to_move);
8039 sctp_free_a_chunk(stcb, chk, so_locked);
8043 SCTP_BUF_LEN(m) = 0;
8044 SCTP_BUF_NEXT(m) = chk->data;
8046 M_ALIGN(chk->data, 4);
8049 SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
8050 if (chk->data == NULL) {
8051 /* HELP, TSNH since we assured it would not above? */
8053 panic("prepend failes HELP?");
8055 SCTP_PRINTF("prepend fails HELP?\n");
8056 sctp_free_a_chunk(stcb, chk, so_locked);
8062 sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
8063 chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
8064 chk->book_size_scale = 0;
8065 chk->sent = SCTP_DATAGRAM_UNSENT;
8068 chk->asoc = &stcb->asoc;
8069 chk->pad_inplace = 0;
8070 chk->no_fr_allowed = 0;
8071 if (stcb->asoc.idata_supported == 0) {
8072 if (rcv_flags & SCTP_DATA_UNORDERED) {
8073 /* Just use 0. The receiver ignores the values. */
8074 chk->rec.data.mid = 0;
8076 chk->rec.data.mid = strq->next_mid_ordered;
8077 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8078 strq->next_mid_ordered++;
8082 if (rcv_flags & SCTP_DATA_UNORDERED) {
8083 chk->rec.data.mid = strq->next_mid_unordered;
8084 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8085 strq->next_mid_unordered++;
8088 chk->rec.data.mid = strq->next_mid_ordered;
8089 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8090 strq->next_mid_ordered++;
8094 chk->rec.data.sid = sp->sid;
8095 chk->rec.data.ppid = sp->ppid;
8096 chk->rec.data.context = sp->context;
8097 chk->rec.data.doing_fast_retransmit = 0;
8099 chk->rec.data.timetodrop = sp->ts;
8100 chk->flags = sp->act_flags;
8103 chk->whoTo = sp->net;
8104 atomic_add_int(&chk->whoTo->ref_count, 1);
8108 if (sp->holds_key_ref) {
8109 chk->auth_keyid = sp->auth_keyid;
8110 sctp_auth_key_acquire(stcb, chk->auth_keyid);
8111 chk->holds_key_ref = 1;
8113 #if defined(__FreeBSD__) && !defined(__Userspace__)
8114 chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
8116 chk->rec.data.tsn = asoc->sending_seq++;
8118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
8119 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
8120 (uint32_t)(uintptr_t)stcb, sp->length,
8121 (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
8124 if (stcb->asoc.idata_supported == 0) {
8125 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8127 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
8130 * Put the rest of the things in place now. Size was done
8131 * earlier in previous loop prior to padding.
8134 #ifdef SCTP_ASOCLOG_OF_TSNS
8135 SCTP_TCB_LOCK_ASSERT(stcb);
8136 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
8137 asoc->tsn_out_at = 0;
8138 asoc->tsn_out_wrapped = 1;
8140 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
8141 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
8142 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
8143 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
8144 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
8145 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
8146 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
8147 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
8150 if (stcb->asoc.idata_supported == 0) {
8151 dchkh->ch.chunk_type = SCTP_DATA;
8152 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8153 dchkh->dp.tsn = htonl(chk->rec.data.tsn);
8154 dchkh->dp.sid = htons(strq->sid);
8155 dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
8156 dchkh->dp.ppid = chk->rec.data.ppid;
8157 dchkh->ch.chunk_length = htons(chk->send_size);
8159 ndchkh->ch.chunk_type = SCTP_IDATA;
8160 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8161 ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
8162 ndchkh->dp.sid = htons(strq->sid);
8163 ndchkh->dp.reserved = htons(0);
8164 ndchkh->dp.mid = htonl(chk->rec.data.mid);
8166 ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
8168 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
8170 ndchkh->ch.chunk_length = htons(chk->send_size);
8172 /* Now advance the chk->send_size by the actual pad needed. */
8173 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
8178 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
8179 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
8181 chk->last_mbuf = lm;
8182 chk->pad_inplace = 1;
8184 chk->send_size += pads;
8186 if (PR_SCTP_ENABLED(chk->flags)) {
8187 asoc->pr_sctp_cnt++;
8189 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
8190 /* All done pull and kill the message */
8191 if (sp->put_last_out == 0) {
8192 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
8193 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
8194 sp->sender_all_done,
8196 sp->msg_is_complete,
8200 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
8201 SCTP_TCB_SEND_LOCK(stcb);
8204 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
8205 TAILQ_REMOVE(&strq->outqueue, sp, next);
8206 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
8207 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
8208 (strq->chunks_on_queues == 0) &&
8209 TAILQ_EMPTY(&strq->outqueue)) {
8210 stcb->asoc.trigger_reset = 1;
8213 sctp_free_remote_addr(sp->net);
8217 sctp_m_freem(sp->data);
8220 sctp_free_a_strmoq(stcb, sp, so_locked);
8222 asoc->chunks_on_out_queue++;
8223 strq->chunks_on_queues++;
8224 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
8225 asoc->send_queue_cnt++;
8228 SCTP_TCB_SEND_UNLOCK(stcb);
8235 sctp_fill_outqueue(struct sctp_tcb *stcb,
8236 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked)
8238 struct sctp_association *asoc;
8239 struct sctp_stream_out *strq;
8240 uint32_t space_left, moved, total_moved;
8243 SCTP_TCB_LOCK_ASSERT(stcb);
8246 switch (net->ro._l_addr.sa.sa_family) {
8249 space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
8254 space_left = net->mtu - SCTP_MIN_OVERHEAD;
8257 #if defined(__Userspace__)
8259 space_left = net->mtu - sizeof(struct sctphdr);
8264 space_left = net->mtu;
8267 /* Need an allowance for the data chunk header too */
8268 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8270 /* must make even word boundary */
8271 space_left &= 0xfffffffc;
8272 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8275 while ((space_left > 0) && (strq != NULL)) {
8276 moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
8277 &giveup, eeor_mode, &bail, so_locked);
8278 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
8279 if ((giveup != 0) || (bail != 0)) {
8282 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8283 total_moved += moved;
8284 if (space_left >= moved) {
8285 space_left -= moved;
8289 if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
8290 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8294 space_left &= 0xfffffffc;
8299 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
8301 if (total_moved == 0) {
8302 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
8303 (net == stcb->asoc.primary_destination)) {
8304 /* ran dry for primary network net */
8305 SCTP_STAT_INCR(sctps_primary_randry);
8306 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
8307 /* ran dry with CMT on */
8308 SCTP_STAT_INCR(sctps_cmt_randry);
8314 sctp_fix_ecn_echo(struct sctp_association *asoc)
8316 struct sctp_tmit_chunk *chk;
8318 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8319 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8320 chk->sent = SCTP_DATAGRAM_UNSENT;
8326 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
8328 struct sctp_association *asoc;
8329 struct sctp_tmit_chunk *chk;
8330 struct sctp_stream_queue_pending *sp;
8337 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
8338 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
8339 if (sp->net == net) {
8340 sctp_free_remote_addr(sp->net);
8345 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8346 if (chk->whoTo == net) {
8347 sctp_free_remote_addr(chk->whoTo);
8354 sctp_med_chunk_output(struct sctp_inpcb *inp,
8355 struct sctp_tcb *stcb,
8356 struct sctp_association *asoc,
8359 int control_only, int from_where,
8360 struct timeval *now, int *now_filled, int frag_point, int so_locked)
8363 * Ok this is the generic chunk service queue. we must do the
8365 * - Service the stream queue that is next, moving any
8366 * message (note I must get a complete message i.e. FIRST/MIDDLE and
8367 * LAST to the out queue in one pass) and assigning TSN's. This
8368 * only applys though if the peer does not support NDATA. For NDATA
8369 * chunks its ok to not send the entire message ;-)
8370 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
8371 * fomulate and send the low level chunks. Making sure to combine
8372 * any control in the control chunk queue also.
8374 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8375 struct mbuf *outchain, *endoutchain;
8376 struct sctp_tmit_chunk *chk, *nchk;
8378 /* temp arrays for unlinking */
8379 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8380 int no_fragmentflg, error;
8381 unsigned int max_rwnd_per_dest, max_send_per_dest;
8382 int one_chunk, hbflag, skip_data_for_this_net;
8383 int asconf, cookie, no_out_cnt;
8384 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8385 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8387 uint32_t auth_offset;
8388 struct sctp_auth_chunk *auth;
8389 uint16_t auth_keyid;
8390 int override_ok = 1;
8391 int skip_fill_up = 0;
8392 int data_auth_reqd = 0;
8393 /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8397 #if defined(__APPLE__) && !defined(__Userspace__)
8399 sctp_lock_assert(SCTP_INP_SO(inp));
8401 sctp_unlock_assert(SCTP_INP_SO(inp));
8406 auth_keyid = stcb->asoc.authinfo.active_keyid;
8407 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8408 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
8409 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8414 ctl_cnt = no_out_cnt = asconf = cookie = 0;
8416 * First lets prime the pump. For each destination, if there is room
8417 * in the flight size, attempt to pull an MTU's worth out of the
8418 * stream queues into the general send_queue
8420 #ifdef SCTP_AUDITING_ENABLED
8421 sctp_audit_log(0xC2, 2);
8423 SCTP_TCB_LOCK_ASSERT(stcb);
8430 /* Nothing to possible to send? */
8431 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8432 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8433 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8434 TAILQ_EMPTY(&asoc->send_queue) &&
8435 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
8440 if (asoc->peers_rwnd == 0) {
8441 /* No room in peers rwnd */
8443 if (asoc->total_flight > 0) {
8444 /* we are allowed one chunk in flight */
8448 if (stcb->asoc.ecn_echo_cnt_onq) {
8449 /* Record where a sack goes, if any */
8450 if (no_data_chunks &&
8451 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8452 /* Nothing but ECNe to send - we don't do that */
8453 goto nothing_to_send;
8455 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8456 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8457 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8458 sack_goes_to = chk->whoTo;
8463 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8464 if (stcb->sctp_socket)
8465 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8467 max_send_per_dest = 0;
8468 if (no_data_chunks == 0) {
8469 /* How many non-directed chunks are there? */
8470 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8471 if (chk->whoTo == NULL) {
8472 /* We already have non-directed
8473 * chunks on the queue, no need
8482 if ((no_data_chunks == 0) &&
8483 (skip_fill_up == 0) &&
8484 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8485 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8487 * This for loop we are in takes in
8488 * each net, if its's got space in cwnd and
8489 * has data sent to it (when CMT is off) then it
8490 * calls sctp_fill_outqueue for the net. This gets
8491 * data on the send queue for that network.
8493 * In sctp_fill_outqueue TSN's are assigned and
8494 * data is copied out of the stream buffers. Note
8495 * mostly copy by reference (we hope).
8497 net->window_probe = 0;
8498 if ((net != stcb->asoc.alternate) &&
8499 ((net->dest_state & SCTP_ADDR_PF) ||
8500 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8501 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8503 sctp_log_cwnd(stcb, net, 1,
8504 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8508 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8509 (net->flight_size == 0)) {
8510 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8512 if (net->flight_size >= net->cwnd) {
8513 /* skip this network, no room - can't fill */
8514 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8515 sctp_log_cwnd(stcb, net, 3,
8516 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8521 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8523 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8525 /* memory alloc failure */
8531 /* now service each destination and send out what we can for it */
8532 /* Nothing to send? */
8533 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8534 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8535 TAILQ_EMPTY(&asoc->send_queue)) {
8540 if (asoc->sctp_cmt_on_off > 0) {
8541 /* get the last start point */
8542 start_at = asoc->last_net_cmt_send_started;
8543 if (start_at == NULL) {
8544 /* null so to beginning */
8545 start_at = TAILQ_FIRST(&asoc->nets);
8547 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8548 if (start_at == NULL) {
8549 start_at = TAILQ_FIRST(&asoc->nets);
8552 asoc->last_net_cmt_send_started = start_at;
8554 start_at = TAILQ_FIRST(&asoc->nets);
8556 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8557 if (chk->whoTo == NULL) {
8558 if (asoc->alternate) {
8559 chk->whoTo = asoc->alternate;
8561 chk->whoTo = asoc->primary_destination;
8563 atomic_add_int(&chk->whoTo->ref_count, 1);
8566 old_start_at = NULL;
8567 again_one_more_time:
8568 for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8569 /* how much can we send? */
8570 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8571 if (old_start_at && (old_start_at == net)) {
8572 /* through list ocmpletely. */
8576 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8577 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8578 (net->flight_size >= net->cwnd)) {
8579 /* Nothing on control or asconf and flight is full, we can skip
8580 * even in the CMT case.
8585 endoutchain = outchain = NULL;
8590 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8591 skip_data_for_this_net = 1;
8593 skip_data_for_this_net = 0;
8595 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8598 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8603 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8606 #if defined(__Userspace__)
8608 mtu = net->mtu - sizeof(struct sctphdr);
8618 if (mtu > asoc->peers_rwnd) {
8619 if (asoc->total_flight > 0) {
8620 /* We have a packet in flight somewhere */
8621 r_mtu = asoc->peers_rwnd;
8623 /* We are always allowed to send one MTU out */
8631 /************************/
8632 /* ASCONF transmission */
8633 /************************/
8634 /* Now first lets go through the asconf queue */
8635 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8636 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8639 if (chk->whoTo == NULL) {
8640 if (asoc->alternate == NULL) {
8641 if (asoc->primary_destination != net) {
8645 if (asoc->alternate != net) {
8650 if (chk->whoTo != net) {
8654 if (chk->data == NULL) {
8657 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8658 chk->sent != SCTP_DATAGRAM_RESEND) {
8662 * if no AUTH is yet included and this chunk
8663 * requires it, make sure to account for it. We
8664 * don't apply the size until the AUTH chunk is
8665 * actually added below in case there is no room for
8666 * this chunk. NOTE: we overload the use of "omtu"
8669 if ((auth == NULL) &&
8670 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8671 stcb->asoc.peer_auth_chunks)) {
8672 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8675 /* Here we do NOT factor the r_mtu */
8676 if ((chk->send_size < (int)(mtu - omtu)) ||
8677 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8679 * We probably should glom the mbuf chain
8680 * from the chk->data for control but the
8681 * problem is it becomes yet one more level
8682 * of tracking to do if for some reason
8683 * output fails. Then I have got to
8684 * reconstruct the merged control chain.. el
8685 * yucko.. for now we take the easy way and
8689 * Add an AUTH chunk, if chunk requires it
8690 * save the offset into the chain for AUTH
8692 if ((auth == NULL) &&
8693 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8694 stcb->asoc.peer_auth_chunks))) {
8695 outchain = sctp_add_auth_chunk(outchain,
8700 chk->rec.chunk_id.id);
8701 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8703 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8704 (int)chk->rec.chunk_id.can_take_data,
8705 chk->send_size, chk->copy_by_ref);
8706 if (outchain == NULL) {
8708 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8711 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8712 /* update our MTU size */
8713 if (mtu > (chk->send_size + omtu))
8714 mtu -= (chk->send_size + omtu);
8717 to_out += (chk->send_size + omtu);
8718 /* Do clear IP_DF ? */
8719 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8722 if (chk->rec.chunk_id.can_take_data)
8725 * set hb flag since we can
8731 * should sysctl this: don't
8732 * bundle data with ASCONF
8733 * since it requires AUTH
8736 chk->sent = SCTP_DATAGRAM_SENT;
8737 if (chk->whoTo == NULL) {
8739 atomic_add_int(&net->ref_count, 1);
8744 * Ok we are out of room but we can
8745 * output without effecting the
8746 * flight size since this little guy
8747 * is a control only packet.
8749 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8751 * do NOT clear the asconf
8752 * flag as it is used to do
8753 * appropriate source address
8756 if (*now_filled == 0) {
8757 (void)SCTP_GETTIME_TIMEVAL(now);
8760 net->last_sent_time = *now;
8762 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8763 (struct sockaddr *)&net->ro._l_addr,
8764 outchain, auth_offset, auth,
8765 stcb->asoc.authinfo.active_keyid,
8766 no_fragmentflg, 0, asconf,
8767 inp->sctp_lport, stcb->rport,
8768 htonl(stcb->asoc.peer_vtag),
8770 #if defined(__FreeBSD__) && !defined(__Userspace__)
8774 /* error, we could not output */
8775 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8776 if (from_where == 0) {
8777 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8779 if (error == ENOBUFS) {
8780 asoc->ifp_had_enobuf = 1;
8781 SCTP_STAT_INCR(sctps_lowlevelerr);
8783 /* error, could not output */
8784 if (error == EHOSTUNREACH) {
8790 sctp_move_chunks_from_net(stcb, net);
8795 asoc->ifp_had_enobuf = 0;
8798 * increase the number we sent, if a
8799 * cookie is sent we don't tell them
8802 outchain = endoutchain = NULL;
8806 *num_out += ctl_cnt;
8807 /* recalc a clean slate and setup */
8808 switch (net->ro._l_addr.sa.sa_family) {
8811 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8816 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8819 #if defined(__Userspace__)
8821 mtu = net->mtu - sizeof(struct sctphdr);
8838 /************************/
8839 /* Control transmission */
8840 /************************/
8841 /* Now first lets go through the control queue */
8842 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8843 if ((sack_goes_to) &&
8844 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8845 (chk->whoTo != sack_goes_to)) {
8847 * if we have a sack in queue, and we are looking at an
8848 * ecn echo that is NOT queued to where the sack is going..
8850 if (chk->whoTo == net) {
8851 /* Don't transmit it to where its going (current net) */
8853 } else if (sack_goes_to == net) {
8854 /* But do transmit it to this address */
8855 goto skip_net_check;
8858 if (chk->whoTo == NULL) {
8859 if (asoc->alternate == NULL) {
8860 if (asoc->primary_destination != net) {
8864 if (asoc->alternate != net) {
8869 if (chk->whoTo != net) {
8874 if (chk->data == NULL) {
8877 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8879 * It must be unsent. Cookies and ASCONF's
8880 * hang around but there timers will force
8881 * when marked for resend.
8886 * if no AUTH is yet included and this chunk
8887 * requires it, make sure to account for it. We
8888 * don't apply the size until the AUTH chunk is
8889 * actually added below in case there is no room for
8890 * this chunk. NOTE: we overload the use of "omtu"
8893 if ((auth == NULL) &&
8894 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8895 stcb->asoc.peer_auth_chunks)) {
8896 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8899 /* Here we do NOT factor the r_mtu */
8900 if ((chk->send_size <= (int)(mtu - omtu)) ||
8901 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8903 * We probably should glom the mbuf chain
8904 * from the chk->data for control but the
8905 * problem is it becomes yet one more level
8906 * of tracking to do if for some reason
8907 * output fails. Then I have got to
8908 * reconstruct the merged control chain.. el
8909 * yucko.. for now we take the easy way and
8913 * Add an AUTH chunk, if chunk requires it
8914 * save the offset into the chain for AUTH
8916 if ((auth == NULL) &&
8917 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8918 stcb->asoc.peer_auth_chunks))) {
8919 outchain = sctp_add_auth_chunk(outchain,
8924 chk->rec.chunk_id.id);
8925 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8927 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8928 (int)chk->rec.chunk_id.can_take_data,
8929 chk->send_size, chk->copy_by_ref);
8930 if (outchain == NULL) {
8932 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8935 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8936 /* update our MTU size */
8937 if (mtu > (chk->send_size + omtu))
8938 mtu -= (chk->send_size + omtu);
8941 to_out += (chk->send_size + omtu);
8942 /* Do clear IP_DF ? */
8943 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8946 if (chk->rec.chunk_id.can_take_data)
8948 /* Mark things to be removed, if needed */
8949 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8950 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8951 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8952 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8953 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8954 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8955 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8956 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8957 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8958 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8959 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8960 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8963 /* remove these chunks at the end */
8964 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8965 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8966 /* turn off the timer */
8967 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8968 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8970 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8976 * Other chunks, since they have
8977 * timers running (i.e. COOKIE)
8978 * we just "trust" that it
8979 * gets sent or retransmitted.
8982 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8985 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8987 * Increment ecne send count here
8988 * this means we may be over-zealous in
8989 * our counting if the send fails, but its
8990 * the best place to do it (we used to do
8991 * it in the queue of the chunk, but that did
8992 * not tell how many times it was sent.
8994 SCTP_STAT_INCR(sctps_sendecne);
8996 chk->sent = SCTP_DATAGRAM_SENT;
8997 if (chk->whoTo == NULL) {
8999 atomic_add_int(&net->ref_count, 1);
9005 * Ok we are out of room but we can
9006 * output without effecting the
9007 * flight size since this little guy
9008 * is a control only packet.
9011 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
9013 * do NOT clear the asconf
9014 * flag as it is used to do
9015 * appropriate source address
9020 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9023 /* Only HB or ASCONF advances time */
9025 if (*now_filled == 0) {
9026 (void)SCTP_GETTIME_TIMEVAL(now);
9029 net->last_sent_time = *now;
9032 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9033 (struct sockaddr *)&net->ro._l_addr,
9036 stcb->asoc.authinfo.active_keyid,
9037 no_fragmentflg, 0, asconf,
9038 inp->sctp_lport, stcb->rport,
9039 htonl(stcb->asoc.peer_vtag),
9041 #if defined(__FreeBSD__) && !defined(__Userspace__)
9045 /* error, we could not output */
9046 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9047 if (from_where == 0) {
9048 SCTP_STAT_INCR(sctps_lowlevelerrusr);
9050 if (error == ENOBUFS) {
9051 asoc->ifp_had_enobuf = 1;
9052 SCTP_STAT_INCR(sctps_lowlevelerr);
9054 if (error == EHOSTUNREACH) {
9060 sctp_move_chunks_from_net(stcb, net);
9065 asoc->ifp_had_enobuf = 0;
9068 * increase the number we sent, if a
9069 * cookie is sent we don't tell them
9072 outchain = endoutchain = NULL;
9076 *num_out += ctl_cnt;
9077 /* recalc a clean slate and setup */
9078 switch (net->ro._l_addr.sa.sa_family) {
9081 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9086 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9089 #if defined(__Userspace__)
9091 mtu = net->mtu - sizeof(struct sctphdr);
9108 /* JRI: if dest is in PF state, do not send data to it */
9109 if ((asoc->sctp_cmt_on_off > 0) &&
9110 (net != stcb->asoc.alternate) &&
9111 (net->dest_state & SCTP_ADDR_PF)) {
9114 if (net->flight_size >= net->cwnd) {
9117 if ((asoc->sctp_cmt_on_off > 0) &&
9118 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
9119 (net->flight_size > max_rwnd_per_dest)) {
9123 * We need a specific accounting for the usage of the
9124 * send buffer. We also need to check the number of messages
9125 * per net. For now, this is better than nothing and it
9126 * disabled by default...
9128 if ((asoc->sctp_cmt_on_off > 0) &&
9129 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
9130 (max_send_per_dest > 0) &&
9131 (net->flight_size > max_send_per_dest)) {
9134 /*********************/
9135 /* Data transmission */
9136 /*********************/
9138 * if AUTH for DATA is required and no AUTH has been added
9139 * yet, account for this in the mtu now... if no data can be
9140 * bundled, this adjustment won't matter anyways since the
9141 * packet will be going out...
9143 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
9144 stcb->asoc.peer_auth_chunks);
9145 if (data_auth_reqd && (auth == NULL)) {
9146 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9148 /* now lets add any data within the MTU constraints */
9149 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
9152 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
9153 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9160 if (net->mtu > SCTP_MIN_OVERHEAD)
9161 omtu = net->mtu - SCTP_MIN_OVERHEAD;
9166 #if defined(__Userspace__)
9168 if (net->mtu > sizeof(struct sctphdr)) {
9169 omtu = net->mtu - sizeof(struct sctphdr);
9180 if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
9181 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
9182 (skip_data_for_this_net == 0)) ||
9184 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
9185 if (no_data_chunks) {
9186 /* let only control go out */
9190 if (net->flight_size >= net->cwnd) {
9191 /* skip this net, no room for data */
9195 if ((chk->whoTo != NULL) &&
9196 (chk->whoTo != net)) {
9197 /* Don't send the chunk on this net */
9201 if (asoc->sctp_cmt_on_off == 0) {
9202 if ((asoc->alternate) &&
9203 (asoc->alternate != net) &&
9204 (chk->whoTo == NULL)) {
9206 } else if ((net != asoc->primary_destination) &&
9207 (asoc->alternate == NULL) &&
9208 (chk->whoTo == NULL)) {
9212 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
9214 * strange, we have a chunk that is
9215 * to big for its destination and
9216 * yet no fragment ok flag.
9217 * Something went wrong when the
9218 * PMTU changed...we did not mark
9219 * this chunk for some reason?? I
9220 * will fix it here by letting IP
9221 * fragment it for now and printing
9222 * a warning. This really should not
9225 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
9226 chk->send_size, mtu);
9227 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
9229 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
9230 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9231 struct sctp_data_chunk *dchkh;
9233 dchkh = mtod(chk->data, struct sctp_data_chunk *);
9234 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
9236 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
9237 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
9238 /* ok we will add this one */
9241 * Add an AUTH chunk, if chunk
9242 * requires it, save the offset into
9243 * the chain for AUTH
9245 if (data_auth_reqd) {
9247 outchain = sctp_add_auth_chunk(outchain,
9253 auth_keyid = chk->auth_keyid;
9255 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9256 } else if (override_ok) {
9257 /* use this data's keyid */
9258 auth_keyid = chk->auth_keyid;
9260 } else if (auth_keyid != chk->auth_keyid) {
9261 /* different keyid, so done bundling */
9265 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
9266 chk->send_size, chk->copy_by_ref);
9267 if (outchain == NULL) {
9268 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
9269 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9270 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9273 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9276 /* upate our MTU size */
9277 /* Do clear IP_DF ? */
9278 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9281 /* unsigned subtraction of mtu */
9282 if (mtu > chk->send_size)
9283 mtu -= chk->send_size;
9286 /* unsigned subtraction of r_mtu */
9287 if (r_mtu > chk->send_size)
9288 r_mtu -= chk->send_size;
9292 to_out += chk->send_size;
9293 if ((to_out > mx_mtu) && no_fragmentflg) {
9295 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
9297 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
9301 chk->window_probe = 0;
9302 data_list[bundle_at++] = chk;
9303 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9306 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
9307 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
9308 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
9310 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
9312 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
9313 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
9314 /* Count number of user msg's that were fragmented
9315 * we do this by counting when we see a LAST fragment
9318 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
9320 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
9321 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
9322 data_list[0]->window_probe = 1;
9323 net->window_probe = 1;
9329 * Must be sent in order of the
9330 * TSN's (on a network)
9334 } /* for (chunk gather loop for this net) */
9335 } /* if asoc.state OPEN */
9337 /* Is there something to send for this destination? */
9339 /* We may need to start a control timer or two */
9341 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9344 * do NOT clear the asconf flag as it is used
9345 * to do appropriate source address selection.
9349 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9352 /* must start a send timer if data is being sent */
9353 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9355 * no timer running on this destination
9358 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9360 if (bundle_at || hbflag) {
9361 /* For data/asconf and hb set time */
9362 if (*now_filled == 0) {
9363 (void)SCTP_GETTIME_TIMEVAL(now);
9366 net->last_sent_time = *now;
9368 /* Now send it, if there is anything to send :> */
9369 if ((error = sctp_lowlevel_chunk_output(inp,
9372 (struct sockaddr *)&net->ro._l_addr,
9380 inp->sctp_lport, stcb->rport,
9381 htonl(stcb->asoc.peer_vtag),
9383 #if defined(__FreeBSD__) && !defined(__Userspace__)
9387 /* error, we could not output */
9388 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9389 if (from_where == 0) {
9390 SCTP_STAT_INCR(sctps_lowlevelerrusr);
9392 if (error == ENOBUFS) {
9393 asoc->ifp_had_enobuf = 1;
9394 SCTP_STAT_INCR(sctps_lowlevelerr);
9396 if (error == EHOSTUNREACH) {
9398 * Destination went unreachable
9401 sctp_move_chunks_from_net(stcb, net);
9405 * I add this line to be paranoid. As far as
9406 * I can tell the continue, takes us back to
9407 * the top of the for, but just to make sure
9408 * I will reset these again here.
9410 ctl_cnt = bundle_at = 0;
9411 continue; /* This takes us back to the for() for the nets. */
9413 asoc->ifp_had_enobuf = 0;
9419 *num_out += (ctl_cnt + bundle_at);
9422 /* setup for a RTO measurement */
9423 tsns_sent = data_list[0]->rec.data.tsn;
9424 /* fill time if not already filled */
9425 if (*now_filled == 0) {
9426 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9428 *now = asoc->time_last_sent;
9430 asoc->time_last_sent = *now;
9432 if (net->rto_needed) {
9433 data_list[0]->do_rtt = 1;
9434 net->rto_needed = 0;
9436 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9437 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9444 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9447 if (old_start_at == NULL) {
9448 old_start_at = start_at;
9449 start_at = TAILQ_FIRST(&asoc->nets);
9451 goto again_one_more_time;
9455 * At the end there should be no NON timed chunks hanging on this
9458 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9459 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9461 if ((*num_out == 0) && (*reason_code == 0)) {
9466 sctp_clean_up_ctl(stcb, asoc, so_locked);
9471 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9474 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9475 * the control chunk queue.
9477 struct sctp_chunkhdr *hdr;
9478 struct sctp_tmit_chunk *chk;
9479 struct mbuf *mat, *last_mbuf;
9480 uint32_t chunk_length;
9481 uint16_t padding_length;
9483 SCTP_TCB_LOCK_ASSERT(stcb);
9484 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9485 if (op_err == NULL) {
9490 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
9491 chunk_length += SCTP_BUF_LEN(mat);
9492 if (SCTP_BUF_NEXT(mat) == NULL) {
9496 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
9497 sctp_m_freem(op_err);
9500 padding_length = chunk_length % 4;
9501 if (padding_length != 0) {
9502 padding_length = 4 - padding_length;
9504 if (padding_length != 0) {
9505 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
9506 sctp_m_freem(op_err);
9510 sctp_alloc_a_chunk(stcb, chk);
9513 sctp_m_freem(op_err);
9516 chk->copy_by_ref = 0;
9517 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9518 chk->rec.chunk_id.can_take_data = 0;
9520 chk->send_size = (uint16_t)chunk_length;
9521 chk->sent = SCTP_DATAGRAM_UNSENT;
9523 chk->asoc = &stcb->asoc;
9526 hdr = mtod(op_err, struct sctp_chunkhdr *);
9527 hdr->chunk_type = SCTP_OPERATION_ERROR;
9528 hdr->chunk_flags = 0;
9529 hdr->chunk_length = htons(chk->send_size);
9530 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9531 chk->asoc->ctrl_queue_cnt++;
9535 sctp_send_cookie_echo(struct mbuf *m,
9536 int offset, int limit,
9537 struct sctp_tcb *stcb,
9538 struct sctp_nets *net)
9541 * pull out the cookie and put it at the front of the control chunk
9545 struct mbuf *cookie;
9546 struct sctp_paramhdr param, *phdr;
9547 struct sctp_chunkhdr *hdr;
9548 struct sctp_tmit_chunk *chk;
9549 uint16_t ptype, plen;
9551 SCTP_TCB_LOCK_ASSERT(stcb);
9552 /* First find the cookie in the param area */
9554 at = offset + sizeof(struct sctp_init_chunk);
9556 phdr = sctp_get_next_param(m, at, ¶m, sizeof(param));
9560 ptype = ntohs(phdr->param_type);
9561 plen = ntohs(phdr->param_length);
9562 if (plen < sizeof(struct sctp_paramhdr)) {
9565 if (ptype == SCTP_STATE_COOKIE) {
9568 /* found the cookie */
9569 if (at + plen > limit) {
9572 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9573 if (cookie == NULL) {
9577 if ((pad = (plen % 4)) > 0) {
9581 if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) {
9585 #ifdef SCTP_MBUF_LOGGING
9586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9587 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9592 at += SCTP_SIZE32(plen);
9594 /* ok, we got the cookie lets change it into a cookie echo chunk */
9595 /* first the change from param to cookie */
9596 hdr = mtod(cookie, struct sctp_chunkhdr *);
9597 hdr->chunk_type = SCTP_COOKIE_ECHO;
9598 hdr->chunk_flags = 0;
9599 /* get the chunk stuff now and place it in the FRONT of the queue */
9600 sctp_alloc_a_chunk(stcb, chk);
9603 sctp_m_freem(cookie);
9606 chk->copy_by_ref = 0;
9607 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9608 chk->rec.chunk_id.can_take_data = 0;
9609 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9610 chk->send_size = SCTP_SIZE32(plen);
9611 chk->sent = SCTP_DATAGRAM_UNSENT;
9613 chk->asoc = &stcb->asoc;
9616 atomic_add_int(&chk->whoTo->ref_count, 1);
9617 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9618 chk->asoc->ctrl_queue_cnt++;
9623 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9627 struct sctp_nets *net)
9630 * take a HB request and make it into a HB ack and send it.
9632 struct mbuf *outchain;
9633 struct sctp_chunkhdr *chdr;
9634 struct sctp_tmit_chunk *chk;
9637 /* must have a net pointer */
9640 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9641 if (outchain == NULL) {
9642 /* gak out of memory */
9645 #ifdef SCTP_MBUF_LOGGING
9646 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9647 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9650 chdr = mtod(outchain, struct sctp_chunkhdr *);
9651 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9652 chdr->chunk_flags = 0;
9653 if (chk_length % 4 != 0) {
9654 sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL);
9656 sctp_alloc_a_chunk(stcb, chk);
9659 sctp_m_freem(outchain);
9662 chk->copy_by_ref = 0;
9663 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9664 chk->rec.chunk_id.can_take_data = 1;
9666 chk->send_size = chk_length;
9667 chk->sent = SCTP_DATAGRAM_UNSENT;
9669 chk->asoc = &stcb->asoc;
9670 chk->data = outchain;
9672 atomic_add_int(&chk->whoTo->ref_count, 1);
9673 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9674 chk->asoc->ctrl_queue_cnt++;
9678 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9680 /* formulate and queue a cookie-ack back to sender */
9681 struct mbuf *cookie_ack;
9682 struct sctp_chunkhdr *hdr;
9683 struct sctp_tmit_chunk *chk;
9685 SCTP_TCB_LOCK_ASSERT(stcb);
9687 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9688 if (cookie_ack == NULL) {
9692 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9693 sctp_alloc_a_chunk(stcb, chk);
9696 sctp_m_freem(cookie_ack);
9699 chk->copy_by_ref = 0;
9700 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9701 chk->rec.chunk_id.can_take_data = 1;
9703 chk->send_size = sizeof(struct sctp_chunkhdr);
9704 chk->sent = SCTP_DATAGRAM_UNSENT;
9706 chk->asoc = &stcb->asoc;
9707 chk->data = cookie_ack;
9708 if (chk->asoc->last_control_chunk_from != NULL) {
9709 chk->whoTo = chk->asoc->last_control_chunk_from;
9710 atomic_add_int(&chk->whoTo->ref_count, 1);
9714 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9715 hdr->chunk_type = SCTP_COOKIE_ACK;
9716 hdr->chunk_flags = 0;
9717 hdr->chunk_length = htons(chk->send_size);
9718 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9719 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9720 chk->asoc->ctrl_queue_cnt++;
9726 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9728 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9729 struct mbuf *m_shutdown_ack;
9730 struct sctp_shutdown_ack_chunk *ack_cp;
9731 struct sctp_tmit_chunk *chk;
9733 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9734 if (m_shutdown_ack == NULL) {
9738 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9739 sctp_alloc_a_chunk(stcb, chk);
9742 sctp_m_freem(m_shutdown_ack);
9745 chk->copy_by_ref = 0;
9746 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9747 chk->rec.chunk_id.can_take_data = 1;
9749 chk->send_size = sizeof(struct sctp_chunkhdr);
9750 chk->sent = SCTP_DATAGRAM_UNSENT;
9752 chk->asoc = &stcb->asoc;
9753 chk->data = m_shutdown_ack;
9756 atomic_add_int(&chk->whoTo->ref_count, 1);
9758 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9759 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9760 ack_cp->ch.chunk_flags = 0;
9761 ack_cp->ch.chunk_length = htons(chk->send_size);
9762 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9763 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9764 chk->asoc->ctrl_queue_cnt++;
9769 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9771 /* formulate and queue a SHUTDOWN to the sender */
9772 struct mbuf *m_shutdown;
9773 struct sctp_shutdown_chunk *shutdown_cp;
9774 struct sctp_tmit_chunk *chk;
9776 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9777 if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9778 /* We already have a SHUTDOWN queued. Reuse it. */
9780 sctp_free_remote_addr(chk->whoTo);
9787 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9788 if (m_shutdown == NULL) {
9792 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9793 sctp_alloc_a_chunk(stcb, chk);
9796 sctp_m_freem(m_shutdown);
9799 chk->copy_by_ref = 0;
9800 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9801 chk->rec.chunk_id.can_take_data = 1;
9803 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9804 chk->sent = SCTP_DATAGRAM_UNSENT;
9806 chk->asoc = &stcb->asoc;
9807 chk->data = m_shutdown;
9810 atomic_add_int(&chk->whoTo->ref_count, 1);
9812 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9813 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9814 shutdown_cp->ch.chunk_flags = 0;
9815 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9816 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9817 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9818 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9819 chk->asoc->ctrl_queue_cnt++;
9821 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9824 atomic_add_int(&chk->whoTo->ref_count, 1);
9826 shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9827 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9828 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9834 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9837 * formulate and queue an ASCONF to the peer.
9838 * ASCONF parameters should be queued on the assoc queue.
9840 struct sctp_tmit_chunk *chk;
9841 struct mbuf *m_asconf;
9844 SCTP_TCB_LOCK_ASSERT(stcb);
9846 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9847 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9848 /* can't send a new one if there is one in flight already */
9852 /* compose an ASCONF chunk, maximum length is PMTU */
9853 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9854 if (m_asconf == NULL) {
9858 sctp_alloc_a_chunk(stcb, chk);
9861 sctp_m_freem(m_asconf);
9865 chk->copy_by_ref = 0;
9866 chk->rec.chunk_id.id = SCTP_ASCONF;
9867 chk->rec.chunk_id.can_take_data = 0;
9868 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9869 chk->data = m_asconf;
9870 chk->send_size = len;
9871 chk->sent = SCTP_DATAGRAM_UNSENT;
9873 chk->asoc = &stcb->asoc;
9876 atomic_add_int(&chk->whoTo->ref_count, 1);
9878 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9879 chk->asoc->ctrl_queue_cnt++;
9884 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9887 * formulate and queue a asconf-ack back to sender.
9888 * the asconf-ack must be stored in the tcb.
9890 struct sctp_tmit_chunk *chk;
9891 struct sctp_asconf_ack *ack, *latest_ack;
9893 struct sctp_nets *net = NULL;
9895 SCTP_TCB_LOCK_ASSERT(stcb);
9896 /* Get the latest ASCONF-ACK */
9897 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9898 if (latest_ack == NULL) {
9901 if (latest_ack->last_sent_to != NULL &&
9902 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9903 /* we're doing a retransmission */
9904 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9907 if (stcb->asoc.last_control_chunk_from == NULL) {
9908 if (stcb->asoc.alternate) {
9909 net = stcb->asoc.alternate;
9911 net = stcb->asoc.primary_destination;
9914 net = stcb->asoc.last_control_chunk_from;
9919 if (stcb->asoc.last_control_chunk_from == NULL) {
9920 if (stcb->asoc.alternate) {
9921 net = stcb->asoc.alternate;
9923 net = stcb->asoc.primary_destination;
9926 net = stcb->asoc.last_control_chunk_from;
9929 latest_ack->last_sent_to = net;
9931 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9932 if (ack->data == NULL) {
9936 /* copy the asconf_ack */
9937 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9938 if (m_ack == NULL) {
9939 /* couldn't copy it */
9942 #ifdef SCTP_MBUF_LOGGING
9943 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9944 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9948 sctp_alloc_a_chunk(stcb, chk);
9952 sctp_m_freem(m_ack);
9955 chk->copy_by_ref = 0;
9956 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9957 chk->rec.chunk_id.can_take_data = 1;
9958 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9961 atomic_add_int(&chk->whoTo->ref_count, 1);
9964 chk->send_size = ack->len;
9965 chk->sent = SCTP_DATAGRAM_UNSENT;
9967 chk->asoc = &stcb->asoc;
9969 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9970 chk->asoc->ctrl_queue_cnt++;
9977 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9978 struct sctp_tcb *stcb,
9979 struct sctp_association *asoc,
9980 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked)
9983 * send out one MTU of retransmission. If fast_retransmit is
9984 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9985 * rwnd. For a Cookie or Asconf in the control chunk queue we
9986 * retransmit them by themselves.
9988 * For data chunks we will pick out the lowest TSN's in the sent_queue
9989 * marked for resend and bundle them all together (up to a MTU of
9990 * destination). The address to send to should have been
9991 * selected/changed where the retransmission was marked (i.e. in FR
9992 * or t3-timeout routines).
9994 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9995 struct sctp_tmit_chunk *chk, *fwd;
9996 struct mbuf *m, *endofchain;
9997 struct sctp_nets *net = NULL;
9998 uint32_t tsns_sent = 0;
9999 int no_fragmentflg, bundle_at, cnt_thru;
10001 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
10002 struct sctp_auth_chunk *auth = NULL;
10003 uint32_t auth_offset = 0;
10004 uint16_t auth_keyid;
10005 int override_ok = 1;
10006 int data_auth_reqd = 0;
10009 #if defined(__APPLE__) && !defined(__Userspace__)
10011 sctp_lock_assert(SCTP_INP_SO(inp));
10013 sctp_unlock_assert(SCTP_INP_SO(inp));
10016 SCTP_TCB_LOCK_ASSERT(stcb);
10017 tmr_started = ctl_cnt = bundle_at = error = 0;
10018 no_fragmentflg = 1;
10022 endofchain = m = NULL;
10023 auth_keyid = stcb->asoc.authinfo.active_keyid;
10024 #ifdef SCTP_AUDITING_ENABLED
10025 sctp_audit_log(0xC3, 1);
10027 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
10028 (TAILQ_EMPTY(&asoc->control_send_queue))) {
10029 SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
10030 asoc->sent_queue_retran_cnt);
10031 asoc->sent_queue_cnt = 0;
10032 asoc->sent_queue_cnt_removeable = 0;
10033 /* send back 0/0 so we enter normal transmission */
10037 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10038 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
10039 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
10040 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
10041 if (chk->sent != SCTP_DATAGRAM_RESEND) {
10044 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
10045 if (chk != asoc->str_reset) {
10047 * not eligible for retran if its
10054 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10058 * Add an AUTH chunk, if chunk requires it save the
10059 * offset into the chain for AUTH
10061 if ((auth == NULL) &&
10062 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
10063 stcb->asoc.peer_auth_chunks))) {
10064 m = sctp_add_auth_chunk(m, &endofchain,
10065 &auth, &auth_offset,
10067 chk->rec.chunk_id.id);
10068 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10070 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10076 /* do we have control chunks to retransmit? */
10078 /* Start a timer no matter if we succeed or fail */
10079 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
10080 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
10081 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
10082 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
10083 chk->snd_count++; /* update our count */
10084 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
10085 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
10086 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
10087 no_fragmentflg, 0, 0,
10088 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10089 chk->whoTo->port, NULL,
10090 #if defined(__FreeBSD__) && !defined(__Userspace__)
10094 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10095 if (error == ENOBUFS) {
10096 asoc->ifp_had_enobuf = 1;
10097 SCTP_STAT_INCR(sctps_lowlevelerr);
10101 asoc->ifp_had_enobuf = 0;
10107 * We don't want to mark the net->sent time here since this
10108 * we use this for HB and retrans cannot measure RTT
10110 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
10112 chk->sent = SCTP_DATAGRAM_SENT;
10113 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
10114 if (fwd_tsn == 0) {
10117 /* Clean up the fwd-tsn list */
10118 sctp_clean_up_ctl(stcb, asoc, so_locked);
10123 * Ok, it is just data retransmission we need to do or that and a
10124 * fwd-tsn with it all.
10126 if (TAILQ_EMPTY(&asoc->sent_queue)) {
10127 return (SCTP_RETRAN_DONE);
10129 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
10130 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
10131 /* not yet open, resend the cookie and that is it */
10134 #ifdef SCTP_AUDITING_ENABLED
10135 sctp_auditing(20, inp, stcb, NULL);
10137 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
10138 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
10139 if (chk->sent != SCTP_DATAGRAM_RESEND) {
10140 /* No, not sent to this net or not ready for rtx */
10143 if (chk->data == NULL) {
10144 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
10145 chk->rec.data.tsn, chk->snd_count, chk->sent);
10148 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
10149 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
10150 struct mbuf *op_err;
10151 char msg[SCTP_DIAG_INFO_LEN];
10153 SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
10154 chk->rec.data.tsn, chk->snd_count);
10155 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
10157 atomic_add_int(&stcb->asoc.refcnt, 1);
10158 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
10160 SCTP_TCB_LOCK(stcb);
10161 atomic_subtract_int(&stcb->asoc.refcnt, 1);
10162 return (SCTP_RETRAN_EXIT);
10164 /* pick up the net */
10166 switch (net->ro._l_addr.sa.sa_family) {
10169 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
10174 mtu = net->mtu - SCTP_MIN_OVERHEAD;
10177 #if defined(__Userspace__)
10179 mtu = net->mtu - sizeof(struct sctphdr);
10188 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
10189 /* No room in peers rwnd */
10192 tsn = asoc->last_acked_seq + 1;
10193 if (tsn == chk->rec.data.tsn) {
10195 * we make a special exception for this
10196 * case. The peer has no rwnd but is missing
10197 * the lowest chunk.. which is probably what
10198 * is holding up the rwnd.
10200 goto one_chunk_around;
10205 if (asoc->peers_rwnd < mtu) {
10207 if ((asoc->peers_rwnd == 0) &&
10208 (asoc->total_flight == 0)) {
10209 chk->window_probe = 1;
10210 chk->whoTo->window_probe = 1;
10213 #ifdef SCTP_AUDITING_ENABLED
10214 sctp_audit_log(0xC3, 2);
10218 net->fast_retran_ip = 0;
10219 if (chk->rec.data.doing_fast_retransmit == 0) {
10221 * if no FR in progress skip destination that have
10222 * flight_size > cwnd.
10224 if (net->flight_size >= net->cwnd) {
10229 * Mark the destination net to have FR recovery
10230 * limits put on it.
10233 net->fast_retran_ip = 1;
10237 * if no AUTH is yet included and this chunk requires it,
10238 * make sure to account for it. We don't apply the size
10239 * until the AUTH chunk is actually added below in case
10240 * there is no room for this chunk.
10242 if (data_auth_reqd && (auth == NULL)) {
10243 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10247 if ((chk->send_size <= (mtu - dmtu)) ||
10248 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
10249 /* ok we will add this one */
10250 if (data_auth_reqd) {
10251 if (auth == NULL) {
10252 m = sctp_add_auth_chunk(m,
10258 auth_keyid = chk->auth_keyid;
10260 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10261 } else if (override_ok) {
10262 auth_keyid = chk->auth_keyid;
10264 } else if (chk->auth_keyid != auth_keyid) {
10265 /* different keyid, so done bundling */
10269 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10271 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10274 /* Do clear IP_DF ? */
10275 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10276 no_fragmentflg = 0;
10278 /* upate our MTU size */
10279 if (mtu > (chk->send_size + dmtu))
10280 mtu -= (chk->send_size + dmtu);
10283 data_list[bundle_at++] = chk;
10284 if (one_chunk && (asoc->total_flight <= 0)) {
10285 SCTP_STAT_INCR(sctps_windowprobed);
10288 if (one_chunk == 0) {
10290 * now are there anymore forward from chk to pick
10293 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
10294 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
10295 /* Nope, not for retran */
10298 if (fwd->whoTo != net) {
10299 /* Nope, not the net in question */
10302 if (data_auth_reqd && (auth == NULL)) {
10303 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10306 if (fwd->send_size <= (mtu - dmtu)) {
10307 if (data_auth_reqd) {
10308 if (auth == NULL) {
10309 m = sctp_add_auth_chunk(m,
10315 auth_keyid = fwd->auth_keyid;
10317 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10318 } else if (override_ok) {
10319 auth_keyid = fwd->auth_keyid;
10321 } else if (fwd->auth_keyid != auth_keyid) {
10322 /* different keyid, so done bundling */
10326 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
10328 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10331 /* Do clear IP_DF ? */
10332 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10333 no_fragmentflg = 0;
10335 /* upate our MTU size */
10336 if (mtu > (fwd->send_size + dmtu))
10337 mtu -= (fwd->send_size + dmtu);
10340 data_list[bundle_at++] = fwd;
10341 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10345 /* can't fit so we are done */
10350 /* Is there something to send for this destination? */
10353 * No matter if we fail/or succeed we should start a
10354 * timer. A failure is like a lost IP packet :-)
10356 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10358 * no timer running on this destination
10361 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10364 /* Now lets send it, if there is anything to send :> */
10365 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10366 (struct sockaddr *)&net->ro._l_addr, m,
10367 auth_offset, auth, auth_keyid,
10368 no_fragmentflg, 0, 0,
10369 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10371 #if defined(__FreeBSD__) && !defined(__Userspace__)
10375 /* error, we could not output */
10376 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10377 if (error == ENOBUFS) {
10378 asoc->ifp_had_enobuf = 1;
10379 SCTP_STAT_INCR(sctps_lowlevelerr);
10383 asoc->ifp_had_enobuf = 0;
10390 * We don't want to mark the net->sent time here
10391 * since this we use this for HB and retrans cannot
10394 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10396 /* For auto-close */
10398 if (*now_filled == 0) {
10399 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10400 *now = asoc->time_last_sent;
10403 asoc->time_last_sent = *now;
10405 *cnt_out += bundle_at;
10406 #ifdef SCTP_AUDITING_ENABLED
10407 sctp_audit_log(0xC4, bundle_at);
10410 tsns_sent = data_list[0]->rec.data.tsn;
10412 for (i = 0; i < bundle_at; i++) {
10413 SCTP_STAT_INCR(sctps_sendretransdata);
10414 data_list[i]->sent = SCTP_DATAGRAM_SENT;
10416 * When we have a revoked data, and we
10417 * retransmit it, then we clear the revoked
10418 * flag since this flag dictates if we
10419 * subtracted from the fs
10421 if (data_list[i]->rec.data.chunk_was_revoked) {
10422 /* Deflate the cwnd */
10423 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10424 data_list[i]->rec.data.chunk_was_revoked = 0;
10426 data_list[i]->snd_count++;
10427 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10428 /* record the time */
10429 data_list[i]->sent_rcv_time = asoc->time_last_sent;
10430 if (data_list[i]->book_size_scale) {
10432 * need to double the book size on
10435 data_list[i]->book_size_scale = 0;
10436 /* Since we double the booksize, we must
10437 * also double the output queue size, since this
10438 * get shrunk when we free by this amount.
10440 atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10441 data_list[i]->book_size *= 2;
10445 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10446 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10447 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10449 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10450 (uint32_t) (data_list[i]->send_size +
10451 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10453 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10454 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10455 data_list[i]->whoTo->flight_size,
10456 data_list[i]->book_size,
10457 (uint32_t)(uintptr_t)data_list[i]->whoTo,
10458 data_list[i]->rec.data.tsn);
10460 sctp_flight_size_increase(data_list[i]);
10461 sctp_total_flight_increase(stcb, data_list[i]);
10462 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10463 /* SWS sender side engages */
10464 asoc->peers_rwnd = 0;
10467 (data_list[i]->rec.data.doing_fast_retransmit)) {
10468 SCTP_STAT_INCR(sctps_sendfastretrans);
10469 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10470 (tmr_started == 0)) {
10472 * ok we just fast-retrans'd
10473 * the lowest TSN, i.e the
10474 * first on the list. In
10475 * this case we want to give
10476 * some more time to get a
10477 * SACK back without a
10480 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10481 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
10482 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10487 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10489 #ifdef SCTP_AUDITING_ENABLED
10490 sctp_auditing(21, inp, stcb, NULL);
10493 /* None will fit */
10496 if (asoc->sent_queue_retran_cnt <= 0) {
10497 /* all done we have no more to retran */
10498 asoc->sent_queue_retran_cnt = 0;
10502 /* No more room in rwnd */
10505 /* stop the for loop here. we sent out a packet */
10512 sctp_timer_validation(struct sctp_inpcb *inp,
10513 struct sctp_tcb *stcb,
10514 struct sctp_association *asoc)
10516 struct sctp_nets *net;
10518 /* Validate that a timer is running somewhere */
10519 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10520 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10521 /* Here is a timer */
10525 SCTP_TCB_LOCK_ASSERT(stcb);
10526 /* Gak, we did not have a timer somewhere */
10527 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10528 if (asoc->alternate) {
10529 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10531 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10537 sctp_chunk_output(struct sctp_inpcb *inp,
10538 struct sctp_tcb *stcb,
10543 * Ok this is the generic chunk service queue. we must do the
10545 * - See if there are retransmits pending, if so we must
10547 * - Service the stream queue that is next, moving any
10548 * message (note I must get a complete message i.e.
10549 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10551 * - Check to see if the cwnd/rwnd allows any output, if so we
10552 * go ahead and fomulate and send the low level chunks. Making sure
10553 * to combine any control in the control chunk queue also.
10555 struct sctp_association *asoc;
10556 struct sctp_nets *net;
10557 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10558 unsigned int burst_cnt = 0;
10559 struct timeval now;
10560 int now_filled = 0;
10562 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10565 unsigned int tot_frs = 0;
10567 #if defined(__APPLE__) && !defined(__Userspace__)
10569 sctp_lock_assert(SCTP_INP_SO(inp));
10571 sctp_unlock_assert(SCTP_INP_SO(inp));
10574 asoc = &stcb->asoc;
10576 /* The Nagle algorithm is only applied when handling a send call. */
10577 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10578 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10586 SCTP_TCB_LOCK_ASSERT(stcb);
10588 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10590 if ((un_sent <= 0) &&
10591 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10592 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10593 (asoc->sent_queue_retran_cnt == 0) &&
10594 (asoc->trigger_reset == 0)) {
10595 /* Nothing to do unless there is something to be sent left */
10598 /* Do we have something to send, data or control AND
10599 * a sack timer running, if so piggy-back the sack.
10601 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10602 sctp_send_sack(stcb, so_locked);
10603 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
10604 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10606 while (asoc->sent_queue_retran_cnt) {
10608 * Ok, it is retransmission time only, we send out only ONE
10609 * packet with a single call off to the retran code.
10611 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10613 * Special hook for handling cookiess discarded
10614 * by peer that carried data. Send cookie-ack only
10615 * and then the next call with get the retran's.
10617 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10619 &now, &now_filled, frag_point, so_locked);
10621 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10622 /* if its not from a HB then do it */
10624 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10630 * its from any other place, we don't allow retran
10631 * output (only control)
10636 /* Can't send anymore */
10638 * now lets push out control by calling med-level
10639 * output once. this assures that we WILL send HB's
10642 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10644 &now, &now_filled, frag_point, so_locked);
10645 #ifdef SCTP_AUDITING_ENABLED
10646 sctp_auditing(8, inp, stcb, NULL);
10648 sctp_timer_validation(inp, stcb, asoc);
10653 * The count was off.. retran is not happening so do
10654 * the normal retransmission.
10656 #ifdef SCTP_AUDITING_ENABLED
10657 sctp_auditing(9, inp, stcb, NULL);
10659 if (ret == SCTP_RETRAN_EXIT) {
10664 if (from_where == SCTP_OUTPUT_FROM_T3) {
10665 /* Only one transmission allowed out of a timeout */
10666 #ifdef SCTP_AUDITING_ENABLED
10667 sctp_auditing(10, inp, stcb, NULL);
10669 /* Push out any control */
10670 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10671 &now, &now_filled, frag_point, so_locked);
10674 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10675 /* Hit FR burst limit */
10678 if ((num_out == 0) && (ret == 0)) {
10679 /* No more retrans to send */
10683 #ifdef SCTP_AUDITING_ENABLED
10684 sctp_auditing(12, inp, stcb, NULL);
10686 /* Check for bad destinations, if they exist move chunks around. */
10687 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10688 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10690 * if possible move things off of this address we
10691 * still may send below due to the dormant state but
10692 * we try to find an alternate address to send to
10693 * and if we have one we move all queued data on the
10694 * out wheel to this alternate address.
10696 if (net->ref_count > 1)
10697 sctp_move_chunks_from_net(stcb, net);
10700 * if ((asoc->sat_network) || (net->addr_is_local))
10701 * { burst_limit = asoc->max_burst *
10702 * SCTP_SAT_NETWORK_BURST_INCR; }
10704 if (asoc->max_burst > 0) {
10705 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10706 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10707 /* JRS - Use the congestion control given in the congestion control module */
10708 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10710 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10712 SCTP_STAT_INCR(sctps_maxburstqueued);
10714 net->fast_retran_ip = 0;
10716 if (net->flight_size == 0) {
10717 /* Should be decaying the cwnd here */
10727 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10728 &reason_code, 0, from_where,
10729 &now, &now_filled, frag_point, so_locked);
10731 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10732 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10733 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10735 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10736 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10737 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10741 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10743 tot_out += num_out;
10745 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10746 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10747 if (num_out == 0) {
10748 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10753 * When the Nagle algorithm is used, look at how much
10754 * is unsent, then if its smaller than an MTU and we
10755 * have data in flight we stop, except if we are
10756 * handling a fragmented user message.
10758 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10759 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10760 (stcb->asoc.total_flight > 0)) {
10761 /* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10765 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10766 TAILQ_EMPTY(&asoc->send_queue) &&
10767 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10768 /* Nothing left to send */
10771 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10772 /* Nothing left to send */
10775 } while (num_out &&
10776 ((asoc->max_burst == 0) ||
10777 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10778 (burst_cnt < asoc->max_burst)));
10780 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10781 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10782 SCTP_STAT_INCR(sctps_maxburstqueued);
10783 asoc->burst_limit_applied = 1;
10784 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10785 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10788 asoc->burst_limit_applied = 0;
10791 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10792 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10794 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10798 * Now we need to clean up the control chunk chain if a ECNE is on
10799 * it. It must be marked as UNSENT again so next call will continue
10800 * to send it until such time that we get a CWR, to remove it.
10802 if (stcb->asoc.ecn_echo_cnt_onq)
10803 sctp_fix_ecn_echo(asoc);
10805 if (stcb->asoc.trigger_reset) {
10806 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10816 struct sctp_inpcb *inp,
10818 struct sockaddr *addr,
10819 struct mbuf *control,
10820 #if defined(__FreeBSD__) && !defined(__Userspace__)
10822 #elif defined(_WIN32) && !defined(__Userspace__)
10825 #if defined(__APPLE__) && !defined(__Userspace__)
10826 struct proc *p SCTP_UNUSED,
10834 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10838 if (inp->sctp_socket == NULL) {
10839 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10842 return (sctp_sosend(inp->sctp_socket,
10844 (struct uio *)NULL,
10847 #if defined(__APPLE__) && !defined(__Userspace__)
10856 send_forward_tsn(struct sctp_tcb *stcb,
10857 struct sctp_association *asoc)
10859 struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10860 struct sctp_forward_tsn_chunk *fwdtsn;
10861 struct sctp_strseq *strseq;
10862 struct sctp_strseq_mid *strseq_m;
10863 uint32_t advance_peer_ack_point;
10864 unsigned int cnt_of_space, i, ovh;
10865 unsigned int space_needed;
10866 unsigned int cnt_of_skipped = 0;
10868 SCTP_TCB_LOCK_ASSERT(stcb);
10869 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10870 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10871 /* mark it to unsent */
10872 chk->sent = SCTP_DATAGRAM_UNSENT;
10873 chk->snd_count = 0;
10874 /* Do we correct its output location? */
10876 sctp_free_remote_addr(chk->whoTo);
10879 goto sctp_fill_in_rest;
10882 /* Ok if we reach here we must build one */
10883 sctp_alloc_a_chunk(stcb, chk);
10887 asoc->fwd_tsn_cnt++;
10888 chk->copy_by_ref = 0;
10890 * We don't do the old thing here since
10891 * this is used not for on-wire but to
10892 * tell if we are sending a fwd-tsn by
10893 * the stack during output. And if its
10894 * a IFORWARD or a FORWARD it is a fwd-tsn.
10896 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10897 chk->rec.chunk_id.can_take_data = 0;
10901 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10902 if (chk->data == NULL) {
10903 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10906 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10907 chk->sent = SCTP_DATAGRAM_UNSENT;
10908 chk->snd_count = 0;
10909 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10910 asoc->ctrl_queue_cnt++;
10913 * Here we go through and fill out the part that deals with
10914 * stream/seq of the ones we skip.
10916 SCTP_BUF_LEN(chk->data) = 0;
10917 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10918 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10919 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10920 /* no more to look at */
10923 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10924 /* We don't report these */
10929 if (asoc->idata_supported) {
10930 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10931 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10933 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10934 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10936 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10938 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10939 ovh = SCTP_MIN_OVERHEAD;
10941 ovh = SCTP_MIN_V4_OVERHEAD;
10943 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10944 /* trim to a mtu size */
10945 cnt_of_space = asoc->smallest_mtu - ovh;
10947 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10948 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10949 0xff, 0, cnt_of_skipped,
10950 asoc->advanced_peer_ack_point);
10952 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10953 if (cnt_of_space < space_needed) {
10955 * ok we must trim down the chunk by lowering the
10956 * advance peer ack point.
10958 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10959 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10960 0xff, 0xff, cnt_of_space,
10963 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10964 if (asoc->idata_supported) {
10965 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10967 cnt_of_skipped /= sizeof(struct sctp_strseq);
10970 * Go through and find the TSN that will be the one
10973 at = TAILQ_FIRST(&asoc->sent_queue);
10975 for (i = 0; i < cnt_of_skipped; i++) {
10976 tp1 = TAILQ_NEXT(at, sctp_next);
10983 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10984 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10985 0xff, cnt_of_skipped, at->rec.data.tsn,
10986 asoc->advanced_peer_ack_point);
10990 * last now points to last one I can report, update
10994 advance_peer_ack_point = last->rec.data.tsn;
10996 if (asoc->idata_supported) {
10997 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10998 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
11000 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
11001 cnt_of_skipped * sizeof(struct sctp_strseq);
11004 chk->send_size = space_needed;
11005 /* Setup the chunk */
11006 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
11007 fwdtsn->ch.chunk_length = htons(chk->send_size);
11008 fwdtsn->ch.chunk_flags = 0;
11009 if (asoc->idata_supported) {
11010 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
11012 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
11014 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
11015 SCTP_BUF_LEN(chk->data) = chk->send_size;
11018 * Move pointer to after the fwdtsn and transfer to the
11021 if (asoc->idata_supported) {
11022 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
11025 strseq = (struct sctp_strseq *)fwdtsn;
11029 * Now populate the strseq list. This is done blindly
11030 * without pulling out duplicate stream info. This is
11031 * inefficent but won't harm the process since the peer will
11032 * look at these in sequence and will thus release anything.
11033 * It could mean we exceed the PMTU and chop off some that
11034 * we could have included.. but this is unlikely (aka 1432/4
11035 * would mean 300+ stream seq's would have to be reported in
11036 * one FWD-TSN. With a bit of work we can later FIX this to
11037 * optimize and pull out duplicates.. but it does add more
11038 * overhead. So for now... not!
11041 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
11042 if (i >= cnt_of_skipped) {
11045 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
11046 /* We don't report these */
11049 if (at->rec.data.tsn == advance_peer_ack_point) {
11050 at->rec.data.fwd_tsn_cnt = 0;
11052 if (asoc->idata_supported) {
11053 strseq_m->sid = htons(at->rec.data.sid);
11054 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
11055 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
11057 strseq_m->flags = 0;
11059 strseq_m->mid = htonl(at->rec.data.mid);
11062 strseq->sid = htons(at->rec.data.sid);
11063 strseq->ssn = htons((uint16_t)at->rec.data.mid);
11072 sctp_send_sack(struct sctp_tcb *stcb, int so_locked)
11075 * Queue up a SACK or NR-SACK in the control queue.
11076 * We must first check to see if a SACK or NR-SACK is
11077 * somehow on the control queue.
11078 * If so, we will take and and remove the old one.
11080 struct sctp_association *asoc;
11081 struct sctp_tmit_chunk *chk, *a_chk;
11082 struct sctp_sack_chunk *sack;
11083 struct sctp_nr_sack_chunk *nr_sack;
11084 struct sctp_gap_ack_block *gap_descriptor;
11085 const struct sack_track *selector;
11090 int limit_reached = 0;
11091 unsigned int i, siz, j;
11092 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
11095 uint32_t highest_tsn;
11100 if (stcb->asoc.nrsack_supported == 1) {
11101 type = SCTP_NR_SELECTIVE_ACK;
11103 type = SCTP_SELECTIVE_ACK;
11106 asoc = &stcb->asoc;
11107 SCTP_TCB_LOCK_ASSERT(stcb);
11108 if (asoc->last_data_chunk_from == NULL) {
11109 /* Hmm we never received anything */
11112 sctp_slide_mapping_arrays(stcb);
11113 sctp_set_rwnd(stcb, asoc);
11114 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11115 if (chk->rec.chunk_id.id == type) {
11116 /* Hmm, found a sack already on queue, remove it */
11117 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
11118 asoc->ctrl_queue_cnt--;
11121 sctp_m_freem(a_chk->data);
11122 a_chk->data = NULL;
11124 if (a_chk->whoTo) {
11125 sctp_free_remote_addr(a_chk->whoTo);
11126 a_chk->whoTo = NULL;
11131 if (a_chk == NULL) {
11132 sctp_alloc_a_chunk(stcb, a_chk);
11133 if (a_chk == NULL) {
11134 /* No memory so we drop the idea, and set a timer */
11135 if (stcb->asoc.delayed_ack) {
11136 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11137 stcb->sctp_ep, stcb, NULL,
11138 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
11139 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11140 stcb->sctp_ep, stcb, NULL);
11142 stcb->asoc.send_sack = 1;
11146 a_chk->copy_by_ref = 0;
11147 a_chk->rec.chunk_id.id = type;
11148 a_chk->rec.chunk_id.can_take_data = 1;
11150 /* Clear our pkt counts */
11151 asoc->data_pkts_seen = 0;
11154 a_chk->asoc = asoc;
11155 a_chk->snd_count = 0;
11156 a_chk->send_size = 0; /* fill in later */
11157 a_chk->sent = SCTP_DATAGRAM_UNSENT;
11158 a_chk->whoTo = NULL;
11160 if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
11162 * Ok, the destination for the SACK is unreachable, lets see if
11163 * we can select an alternate to asoc->last_data_chunk_from
11165 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
11166 if (a_chk->whoTo == NULL) {
11167 /* Nope, no alternate */
11168 a_chk->whoTo = asoc->last_data_chunk_from;
11171 a_chk->whoTo = asoc->last_data_chunk_from;
11173 if (a_chk->whoTo) {
11174 atomic_add_int(&a_chk->whoTo->ref_count, 1);
11176 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
11177 highest_tsn = asoc->highest_tsn_inside_map;
11179 highest_tsn = asoc->highest_tsn_inside_nr_map;
11181 if (highest_tsn == asoc->cumulative_tsn) {
11183 if (type == SCTP_SELECTIVE_ACK) {
11184 space_req = sizeof(struct sctp_sack_chunk);
11186 space_req = sizeof(struct sctp_nr_sack_chunk);
11189 /* gaps get a cluster */
11190 space_req = MCLBYTES;
11192 /* Ok now lets formulate a MBUF with our sack */
11193 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
11194 if ((a_chk->data == NULL) ||
11195 (a_chk->whoTo == NULL)) {
11196 /* rats, no mbuf memory */
11198 /* was a problem with the destination */
11199 sctp_m_freem(a_chk->data);
11200 a_chk->data = NULL;
11202 sctp_free_a_chunk(stcb, a_chk, so_locked);
11203 /* sa_ignore NO_NULL_CHK */
11204 if (stcb->asoc.delayed_ack) {
11205 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11206 stcb->sctp_ep, stcb, NULL,
11207 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
11208 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11209 stcb->sctp_ep, stcb, NULL);
11211 stcb->asoc.send_sack = 1;
11215 /* ok, lets go through and fill it in */
11216 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
11217 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
11218 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
11219 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
11221 limit = mtod(a_chk->data, caddr_t);
11226 if ((asoc->sctp_cmt_on_off > 0) &&
11227 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
11229 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
11230 * received, then set high bit to 1, else 0. Reset
11233 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
11234 asoc->cmt_dac_pkts_rcvd = 0;
11236 #ifdef SCTP_ASOCLOG_OF_TSNS
11237 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
11238 stcb->asoc.cumack_log_atsnt++;
11239 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
11240 stcb->asoc.cumack_log_atsnt = 0;
11243 /* reset the readers interpretation */
11244 stcb->freed_by_sorcv_sincelast = 0;
11246 if (type == SCTP_SELECTIVE_ACK) {
11247 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
11249 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
11250 if (highest_tsn > asoc->mapping_array_base_tsn) {
11251 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11253 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8;
11257 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
11258 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
11259 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
11260 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11262 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
11266 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11269 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11271 if (((type == SCTP_SELECTIVE_ACK) &&
11272 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
11273 ((type == SCTP_NR_SELECTIVE_ACK) &&
11274 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
11275 /* we have a gap .. maybe */
11276 for (i = 0; i < siz; i++) {
11277 tsn_map = asoc->mapping_array[i];
11278 if (type == SCTP_SELECTIVE_ACK) {
11279 tsn_map |= asoc->nr_mapping_array[i];
11283 * Clear all bits corresponding to TSNs
11284 * smaller or equal to the cumulative TSN.
11286 tsn_map &= (~0U << (1 - offset));
11288 selector = &sack_array[tsn_map];
11289 if (mergeable && selector->right_edge) {
11291 * Backup, left and right edges were ok to
11297 if (selector->num_entries == 0)
11300 for (j = 0; j < selector->num_entries; j++) {
11301 if (mergeable && selector->right_edge) {
11303 * do a merge by NOT setting
11309 * no merge, set the left
11313 gap_descriptor->start = htons((selector->gaps[j].start + offset));
11315 gap_descriptor->end = htons((selector->gaps[j].end + offset));
11318 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11324 if (selector->left_edge) {
11328 if (limit_reached) {
11329 /* Reached the limit stop */
11335 if ((type == SCTP_NR_SELECTIVE_ACK) &&
11336 (limit_reached == 0)) {
11340 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
11341 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11343 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
11346 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11349 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11351 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
11352 /* we have a gap .. maybe */
11353 for (i = 0; i < siz; i++) {
11354 tsn_map = asoc->nr_mapping_array[i];
11357 * Clear all bits corresponding to TSNs
11358 * smaller or equal to the cumulative TSN.
11360 tsn_map &= (~0U << (1 - offset));
11362 selector = &sack_array[tsn_map];
11363 if (mergeable && selector->right_edge) {
11365 * Backup, left and right edges were ok to
11368 num_nr_gap_blocks--;
11371 if (selector->num_entries == 0)
11374 for (j = 0; j < selector->num_entries; j++) {
11375 if (mergeable && selector->right_edge) {
11377 * do a merge by NOT setting
11383 * no merge, set the left
11387 gap_descriptor->start = htons((selector->gaps[j].start + offset));
11389 gap_descriptor->end = htons((selector->gaps[j].end + offset));
11390 num_nr_gap_blocks++;
11392 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11398 if (selector->left_edge) {
11402 if (limit_reached) {
11403 /* Reached the limit stop */
11410 /* now we must add any dups we are going to report. */
11411 if ((limit_reached == 0) && (asoc->numduptsns)) {
11412 dup = (uint32_t *) gap_descriptor;
11413 for (i = 0; i < asoc->numduptsns; i++) {
11414 *dup = htonl(asoc->dup_tsns[i]);
11417 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11422 asoc->numduptsns = 0;
11425 * now that the chunk is prepared queue it to the control chunk
11428 if (type == SCTP_SELECTIVE_ACK) {
11429 a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
11430 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11431 num_dups * sizeof(int32_t));
11432 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11433 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11434 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11435 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11436 sack->sack.num_dup_tsns = htons(num_dups);
11437 sack->ch.chunk_type = type;
11438 sack->ch.chunk_flags = flags;
11439 sack->ch.chunk_length = htons(a_chk->send_size);
11441 a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
11442 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11443 num_dups * sizeof(int32_t));
11444 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11445 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11446 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11447 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11448 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11449 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11450 nr_sack->nr_sack.reserved = 0;
11451 nr_sack->ch.chunk_type = type;
11452 nr_sack->ch.chunk_flags = flags;
11453 nr_sack->ch.chunk_length = htons(a_chk->send_size);
11455 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11456 asoc->my_last_reported_rwnd = asoc->my_rwnd;
11457 asoc->ctrl_queue_cnt++;
11458 asoc->send_sack = 0;
11459 SCTP_STAT_INCR(sctps_sendsacks);
11464 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked)
11466 struct mbuf *m_abort, *m, *m_last;
11467 struct mbuf *m_out, *m_end = NULL;
11468 struct sctp_abort_chunk *abort;
11469 struct sctp_auth_chunk *auth = NULL;
11470 struct sctp_nets *net;
11472 uint32_t auth_offset = 0;
11474 uint16_t cause_len, chunk_len, padding_len;
11476 #if defined(__APPLE__) && !defined(__Userspace__)
11478 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11480 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11483 SCTP_TCB_LOCK_ASSERT(stcb);
11485 * Add an AUTH chunk, if chunk requires it and save the offset into
11486 * the chain for AUTH
11488 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11489 stcb->asoc.peer_auth_chunks)) {
11490 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11491 stcb, SCTP_ABORT_ASSOCIATION);
11492 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11496 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11497 if (m_abort == NULL) {
11499 sctp_m_freem(m_out);
11502 sctp_m_freem(operr);
11506 /* link in any error */
11507 SCTP_BUF_NEXT(m_abort) = operr;
11510 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11511 cause_len += (uint16_t)SCTP_BUF_LEN(m);
11512 if (SCTP_BUF_NEXT(m) == NULL) {
11516 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11517 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11518 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11519 if (m_out == NULL) {
11520 /* NO Auth chunk prepended, so reserve space in front */
11521 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11524 /* Put AUTH chunk at the front of the chain */
11525 SCTP_BUF_NEXT(m_end) = m_abort;
11527 if (stcb->asoc.alternate) {
11528 net = stcb->asoc.alternate;
11530 net = stcb->asoc.primary_destination;
11532 /* Fill in the ABORT chunk header. */
11533 abort = mtod(m_abort, struct sctp_abort_chunk *);
11534 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11535 if (stcb->asoc.peer_vtag == 0) {
11536 /* This happens iff the assoc is in COOKIE-WAIT state. */
11537 vtag = stcb->asoc.my_vtag;
11538 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11540 vtag = stcb->asoc.peer_vtag;
11541 abort->ch.chunk_flags = 0;
11543 abort->ch.chunk_length = htons(chunk_len);
11544 /* Add padding, if necessary. */
11545 if (padding_len > 0) {
11546 if ((m_last == NULL) ||
11547 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
11548 sctp_m_freem(m_out);
11552 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11553 (struct sockaddr *)&net->ro._l_addr,
11554 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11555 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11556 stcb->asoc.primary_destination->port, NULL,
11557 #if defined(__FreeBSD__) && !defined(__Userspace__)
11561 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11562 if (error == ENOBUFS) {
11563 stcb->asoc.ifp_had_enobuf = 1;
11564 SCTP_STAT_INCR(sctps_lowlevelerr);
11567 stcb->asoc.ifp_had_enobuf = 0;
11569 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11573 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11574 struct sctp_nets *net,
11577 /* formulate and SEND a SHUTDOWN-COMPLETE */
11578 struct mbuf *m_shutdown_comp;
11579 struct sctp_shutdown_complete_chunk *shutdown_complete;
11584 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11585 if (m_shutdown_comp == NULL) {
11589 if (reflect_vtag) {
11590 flags = SCTP_HAD_NO_TCB;
11591 vtag = stcb->asoc.my_vtag;
11594 vtag = stcb->asoc.peer_vtag;
11596 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11597 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11598 shutdown_complete->ch.chunk_flags = flags;
11599 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11600 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11601 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11602 (struct sockaddr *)&net->ro._l_addr,
11603 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11604 stcb->sctp_ep->sctp_lport, stcb->rport,
11607 #if defined(__FreeBSD__) && !defined(__Userspace__)
11610 SCTP_SO_NOT_LOCKED))) {
11611 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11612 if (error == ENOBUFS) {
11613 stcb->asoc.ifp_had_enobuf = 1;
11614 SCTP_STAT_INCR(sctps_lowlevelerr);
11617 stcb->asoc.ifp_had_enobuf = 0;
11619 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11623 #if defined(__FreeBSD__) && !defined(__Userspace__)
11625 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11626 struct sctphdr *sh, uint32_t vtag,
11627 uint8_t type, struct mbuf *cause,
11628 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11629 uint32_t vrf_id, uint16_t port)
11632 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11633 struct sctphdr *sh, uint32_t vtag,
11634 uint8_t type, struct mbuf *cause,
11635 uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11638 struct mbuf *o_pak;
11640 struct sctphdr *shout;
11641 struct sctp_chunkhdr *ch;
11642 #if defined(INET) || defined(INET6)
11643 struct udphdr *udp;
11645 int ret, len, cause_len, padding_len;
11647 #if defined(__APPLE__) && !defined(__Userspace__)
11650 struct sockaddr_in *src_sin, *dst_sin;
11654 struct sockaddr_in6 *src_sin6, *dst_sin6;
11655 struct ip6_hdr *ip6;
11658 /* Compute the length of the cause and add final padding. */
11660 if (cause != NULL) {
11661 struct mbuf *m_at, *m_last = NULL;
11663 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11664 if (SCTP_BUF_NEXT(m_at) == NULL)
11666 cause_len += SCTP_BUF_LEN(m_at);
11668 padding_len = cause_len % 4;
11669 if (padding_len != 0) {
11670 padding_len = 4 - padding_len;
11672 if (padding_len != 0) {
11673 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11674 sctp_m_freem(cause);
11681 /* Get an mbuf for the header. */
11682 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11683 switch (dst->sa_family) {
11686 len += sizeof(struct ip);
11691 len += sizeof(struct ip6_hdr);
11697 #if defined(INET) || defined(INET6)
11699 len += sizeof(struct udphdr);
11702 #if defined(__APPLE__) && !defined(__Userspace__)
11703 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11704 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11706 mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11709 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11711 if (mout == NULL) {
11713 sctp_m_freem(cause);
11717 #if defined(__APPLE__) && !defined(__Userspace__)
11718 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11719 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11721 SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11724 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11726 SCTP_BUF_LEN(mout) = len;
11727 SCTP_BUF_NEXT(mout) = cause;
11728 #if defined(__FreeBSD__) && !defined(__Userspace__)
11729 M_SETFIB(mout, fibnum);
11730 mout->m_pkthdr.flowid = mflowid;
11731 M_HASHTYPE_SET(mout, mflowtype);
11739 switch (dst->sa_family) {
11742 src_sin = (struct sockaddr_in *)src;
11743 dst_sin = (struct sockaddr_in *)dst;
11744 ip = mtod(mout, struct ip *);
11745 ip->ip_v = IPVERSION;
11746 ip->ip_hl = (sizeof(struct ip) >> 2);
11748 #if defined(__FreeBSD__) && !defined(__Userspace__)
11749 ip->ip_off = htons(IP_DF);
11750 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
11751 ip->ip_off = IP_DF;
11753 ip->ip_off = htons(IP_DF);
11755 #if defined(__Userspace__)
11756 ip->ip_id = htons(ip_id++);
11757 #elif defined(__FreeBSD__)
11759 #elif defined(__APPLE__)
11761 ip->ip_id = ip_randomid();
11763 ip->ip_id = htons(ip_id++);
11766 ip->ip_id = ip_id++;
11768 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11770 ip->ip_p = IPPROTO_UDP;
11772 ip->ip_p = IPPROTO_SCTP;
11774 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11775 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11777 len = sizeof(struct ip);
11778 shout = (struct sctphdr *)((caddr_t)ip + len);
11783 src_sin6 = (struct sockaddr_in6 *)src;
11784 dst_sin6 = (struct sockaddr_in6 *)dst;
11785 ip6 = mtod(mout, struct ip6_hdr *);
11786 ip6->ip6_flow = htonl(0x60000000);
11787 #if defined(__FreeBSD__) && !defined(__Userspace__)
11788 if (V_ip6_auto_flowlabel) {
11789 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11792 #if defined(__Userspace__)
11793 ip6->ip6_hlim = IPv6_HOP_LIMIT;
11795 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11798 ip6->ip6_nxt = IPPROTO_UDP;
11800 ip6->ip6_nxt = IPPROTO_SCTP;
11802 ip6->ip6_src = dst_sin6->sin6_addr;
11803 ip6->ip6_dst = src_sin6->sin6_addr;
11804 len = sizeof(struct ip6_hdr);
11805 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11810 shout = mtod(mout, struct sctphdr *);
11813 #if defined(INET) || defined(INET6)
11815 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11816 sctp_m_freem(mout);
11819 udp = (struct udphdr *)shout;
11820 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11821 udp->uh_dport = port;
11823 udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11824 sizeof(struct sctphdr) +
11825 sizeof(struct sctp_chunkhdr) +
11826 cause_len + padding_len));
11827 len += sizeof(struct udphdr);
11828 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11833 shout->src_port = sh->dest_port;
11834 shout->dest_port = sh->src_port;
11835 shout->checksum = 0;
11837 shout->v_tag = htonl(vtag);
11839 shout->v_tag = sh->v_tag;
11841 len += sizeof(struct sctphdr);
11842 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11843 ch->chunk_type = type;
11845 ch->chunk_flags = 0;
11847 ch->chunk_flags = SCTP_HAD_NO_TCB;
11849 ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11850 len += sizeof(struct sctp_chunkhdr);
11851 len += cause_len + padding_len;
11853 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11854 sctp_m_freem(mout);
11857 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11858 switch (dst->sa_family) {
11861 #if defined(__APPLE__) && !defined(__Userspace__)
11862 /* zap the stack pointer to the route */
11863 memset(&ro, 0, sizeof(sctp_route_t));
11866 #if !defined(_WIN32) && !defined(__Userspace__)
11867 #if defined(__FreeBSD__)
11869 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11874 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11880 #if defined(__FreeBSD__) && !defined(__Userspace__)
11881 ip->ip_len = htons(len);
11882 #elif defined(__APPLE__) || defined(__Userspace__)
11885 ip->ip_len = htons(len);
11888 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11889 SCTP_STAT_INCR(sctps_sendswcrc);
11890 #if !defined(_WIN32) && !defined(__Userspace__)
11891 #if defined(__FreeBSD__)
11893 SCTP_ENABLE_UDP_CSUM(o_pak);
11896 SCTP_ENABLE_UDP_CSUM(o_pak);
11900 #if defined(__FreeBSD__) && !defined(__Userspace__)
11901 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11902 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11903 SCTP_STAT_INCR(sctps_sendhwcrc);
11905 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11906 SCTP_STAT_INCR(sctps_sendswcrc);
11909 #ifdef SCTP_PACKET_LOGGING
11910 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11911 sctp_packet_log(o_pak);
11914 #if defined(__APPLE__) && !defined(__Userspace__)
11915 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11916 /* Free the route if we got one back */
11922 #if defined(__FreeBSD__) && !defined(__Userspace__)
11923 SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
11925 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11931 ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
11933 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11934 SCTP_STAT_INCR(sctps_sendswcrc);
11935 #if !defined(__Userspace__)
11936 #if defined(_WIN32)
11939 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11940 udp->uh_sum = 0xffff;
11945 #if defined(__FreeBSD__) && !defined(__Userspace__)
11946 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11947 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11948 SCTP_STAT_INCR(sctps_sendhwcrc);
11950 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
11951 SCTP_STAT_INCR(sctps_sendswcrc);
11954 #ifdef SCTP_PACKET_LOGGING
11955 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11956 sctp_packet_log(o_pak);
11959 #if defined(__FreeBSD__) && !defined(__Userspace__)
11960 SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
11962 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11965 #if defined(__Userspace__)
11969 struct sockaddr_conn *sconn;
11971 sconn = (struct sockaddr_conn *)src;
11972 if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
11973 shout->checksum = sctp_calculate_cksum(mout, 0);
11974 SCTP_STAT_INCR(sctps_sendswcrc);
11976 SCTP_STAT_INCR(sctps_sendhwcrc);
11978 #ifdef SCTP_PACKET_LOGGING
11979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11980 sctp_packet_log(mout);
11983 /* Don't alloc/free for each packet */
11984 if ((buffer = malloc(len)) != NULL) {
11985 m_copydata(mout, 0, len, buffer);
11986 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
11991 sctp_m_freem(mout);
11996 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11998 sctp_m_freem(mout);
11999 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12002 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
12003 #if defined(__FreeBSD__) && !defined(__Userspace__)
12005 UDPSTAT_INC(udps_opackets);
12008 SCTP_STAT_INCR(sctps_sendpackets);
12009 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12010 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12012 SCTP_STAT_INCR(sctps_senderrors);
12018 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
12019 struct sctphdr *sh,
12020 #if defined(__FreeBSD__) && !defined(__Userspace__)
12021 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12023 uint32_t vrf_id, uint16_t port)
12025 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
12026 #if defined(__FreeBSD__) && !defined(__Userspace__)
12027 mflowtype, mflowid, fibnum,
12033 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked)
12035 struct sctp_tmit_chunk *chk;
12036 struct sctp_heartbeat_chunk *hb;
12037 struct timeval now;
12039 SCTP_TCB_LOCK_ASSERT(stcb);
12043 (void)SCTP_GETTIME_TIMEVAL(&now);
12044 switch (net->ro._l_addr.sa.sa_family) {
12053 #if defined(__Userspace__)
12060 sctp_alloc_a_chunk(stcb, chk);
12062 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
12066 chk->copy_by_ref = 0;
12067 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
12068 chk->rec.chunk_id.can_take_data = 1;
12070 chk->asoc = &stcb->asoc;
12071 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
12073 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12074 if (chk->data == NULL) {
12075 sctp_free_a_chunk(stcb, chk, so_locked);
12078 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12079 SCTP_BUF_LEN(chk->data) = chk->send_size;
12080 chk->sent = SCTP_DATAGRAM_UNSENT;
12081 chk->snd_count = 0;
12083 atomic_add_int(&chk->whoTo->ref_count, 1);
12084 /* Now we have a mbuf that we can fill in with the details */
12085 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
12086 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
12087 /* fill out chunk header */
12088 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
12089 hb->ch.chunk_flags = 0;
12090 hb->ch.chunk_length = htons(chk->send_size);
12091 /* Fill out hb parameter */
12092 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
12093 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
12094 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
12095 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
12096 /* Did our user request this one, put it in */
12097 hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
12099 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
12101 switch (net->ro._l_addr.sa.sa_family) {
12104 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
12109 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
12112 #if defined(__Userspace__)
12114 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
12118 hb->heartbeat.hb_info.addr_len = 0;
12122 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
12124 * we only take from the entropy pool if the address is not
12127 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12128 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12130 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
12131 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
12133 switch (net->ro._l_addr.sa.sa_family) {
12136 memcpy(hb->heartbeat.hb_info.address,
12137 &net->ro._l_addr.sin.sin_addr,
12138 sizeof(net->ro._l_addr.sin.sin_addr));
12143 memcpy(hb->heartbeat.hb_info.address,
12144 &net->ro._l_addr.sin6.sin6_addr,
12145 sizeof(net->ro._l_addr.sin6.sin6_addr));
12148 #if defined(__Userspace__)
12150 memcpy(hb->heartbeat.hb_info.address,
12151 &net->ro._l_addr.sconn.sconn_addr,
12152 sizeof(net->ro._l_addr.sconn.sconn_addr));
12157 sctp_m_freem(chk->data);
12160 sctp_free_a_chunk(stcb, chk, so_locked);
12164 net->hb_responded = 0;
12165 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12166 stcb->asoc.ctrl_queue_cnt++;
12167 SCTP_STAT_INCR(sctps_sendheartbeat);
12172 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
12175 struct sctp_association *asoc;
12176 struct sctp_ecne_chunk *ecne;
12177 struct sctp_tmit_chunk *chk;
12182 asoc = &stcb->asoc;
12183 SCTP_TCB_LOCK_ASSERT(stcb);
12184 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12185 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
12186 /* found a previous ECN_ECHO update it if needed */
12187 uint32_t cnt, ctsn;
12188 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12189 ctsn = ntohl(ecne->tsn);
12190 if (SCTP_TSN_GT(high_tsn, ctsn)) {
12191 ecne->tsn = htonl(high_tsn);
12192 SCTP_STAT_INCR(sctps_queue_upd_ecne);
12194 cnt = ntohl(ecne->num_pkts_since_cwr);
12196 ecne->num_pkts_since_cwr = htonl(cnt);
12200 /* nope could not find one to update so we must build one */
12201 sctp_alloc_a_chunk(stcb, chk);
12205 SCTP_STAT_INCR(sctps_queue_upd_ecne);
12206 chk->copy_by_ref = 0;
12207 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
12208 chk->rec.chunk_id.can_take_data = 0;
12210 chk->asoc = &stcb->asoc;
12211 chk->send_size = sizeof(struct sctp_ecne_chunk);
12212 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12213 if (chk->data == NULL) {
12214 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12217 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12218 SCTP_BUF_LEN(chk->data) = chk->send_size;
12219 chk->sent = SCTP_DATAGRAM_UNSENT;
12220 chk->snd_count = 0;
12222 atomic_add_int(&chk->whoTo->ref_count, 1);
12224 stcb->asoc.ecn_echo_cnt_onq++;
12225 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12226 ecne->ch.chunk_type = SCTP_ECN_ECHO;
12227 ecne->ch.chunk_flags = 0;
12228 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
12229 ecne->tsn = htonl(high_tsn);
12230 ecne->num_pkts_since_cwr = htonl(1);
12231 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
12232 asoc->ctrl_queue_cnt++;
12236 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
12237 struct mbuf *m, int len, int iphlen, int bad_crc)
12239 struct sctp_association *asoc;
12240 struct sctp_pktdrop_chunk *drp;
12241 struct sctp_tmit_chunk *chk;
12247 struct sctp_chunkhdr *ch, chunk_buf;
12248 unsigned int chk_length;
12253 asoc = &stcb->asoc;
12254 SCTP_TCB_LOCK_ASSERT(stcb);
12255 if (asoc->pktdrop_supported == 0) {
12257 * peer must declare support before I send one.
12261 if (stcb->sctp_socket == NULL) {
12264 sctp_alloc_a_chunk(stcb, chk);
12268 chk->copy_by_ref = 0;
12269 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
12270 chk->rec.chunk_id.can_take_data = 1;
12273 chk->send_size = len;
12274 /* Validate that we do not have an ABORT in here. */
12275 offset = iphlen + sizeof(struct sctphdr);
12276 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12277 sizeof(*ch), (uint8_t *) & chunk_buf);
12278 while (ch != NULL) {
12279 chk_length = ntohs(ch->chunk_length);
12280 if (chk_length < sizeof(*ch)) {
12281 /* break to abort land */
12284 switch (ch->chunk_type) {
12285 case SCTP_PACKET_DROPPED:
12286 case SCTP_ABORT_ASSOCIATION:
12287 case SCTP_INITIATION_ACK:
12289 * We don't respond with an PKT-DROP to an ABORT
12290 * or PKT-DROP. We also do not respond to an
12291 * INIT-ACK, because we can't know if the initiation
12292 * tag is correct or not.
12294 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12299 offset += SCTP_SIZE32(chk_length);
12300 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12301 sizeof(*ch), (uint8_t *) & chunk_buf);
12304 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
12305 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
12306 /* only send 1 mtu worth, trim off the
12307 * excess on the end.
12310 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
12313 chk->asoc = &stcb->asoc;
12314 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12315 if (chk->data == NULL) {
12317 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12320 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12321 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
12323 sctp_m_freem(chk->data);
12327 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
12328 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
12329 chk->book_size_scale = 0;
12331 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
12332 drp->trunc_len = htons(fullsz);
12333 /* Len is already adjusted to size minus overhead above
12334 * take out the pkt_drop chunk itself from it.
12336 chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
12337 len = chk->send_size;
12339 /* no truncation needed */
12340 drp->ch.chunk_flags = 0;
12341 drp->trunc_len = htons(0);
12344 drp->ch.chunk_flags |= SCTP_BADCRC;
12346 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
12347 SCTP_BUF_LEN(chk->data) = chk->send_size;
12348 chk->sent = SCTP_DATAGRAM_UNSENT;
12349 chk->snd_count = 0;
12351 /* we should hit here */
12353 atomic_add_int(&chk->whoTo->ref_count, 1);
12357 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
12358 drp->ch.chunk_length = htons(chk->send_size);
12359 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
12363 drp->bottle_bw = htonl(spc);
12364 if (asoc->my_rwnd) {
12365 drp->current_onq = htonl(asoc->size_on_reasm_queue +
12366 asoc->size_on_all_streams +
12367 asoc->my_rwnd_control_len +
12368 stcb->sctp_socket->so_rcv.sb_cc);
12371 * If my rwnd is 0, possibly from mbuf depletion as well as
12372 * space used, tell the peer there is NO space aka onq == bw
12374 drp->current_onq = htonl(spc);
12378 m_copydata(m, iphlen, len, (caddr_t)datap);
12379 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12380 asoc->ctrl_queue_cnt++;
12384 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12386 struct sctp_association *asoc;
12387 struct sctp_cwr_chunk *cwr;
12388 struct sctp_tmit_chunk *chk;
12390 SCTP_TCB_LOCK_ASSERT(stcb);
12394 asoc = &stcb->asoc;
12395 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12396 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12397 /* found a previous CWR queued to same destination update it if needed */
12399 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12400 ctsn = ntohl(cwr->tsn);
12401 if (SCTP_TSN_GT(high_tsn, ctsn)) {
12402 cwr->tsn = htonl(high_tsn);
12404 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12405 /* Make sure override is carried */
12406 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12411 sctp_alloc_a_chunk(stcb, chk);
12415 chk->copy_by_ref = 0;
12416 chk->rec.chunk_id.id = SCTP_ECN_CWR;
12417 chk->rec.chunk_id.can_take_data = 1;
12419 chk->asoc = &stcb->asoc;
12420 chk->send_size = sizeof(struct sctp_cwr_chunk);
12421 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12422 if (chk->data == NULL) {
12423 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12426 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12427 SCTP_BUF_LEN(chk->data) = chk->send_size;
12428 chk->sent = SCTP_DATAGRAM_UNSENT;
12429 chk->snd_count = 0;
12431 atomic_add_int(&chk->whoTo->ref_count, 1);
12432 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12433 cwr->ch.chunk_type = SCTP_ECN_CWR;
12434 cwr->ch.chunk_flags = override;
12435 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12436 cwr->tsn = htonl(high_tsn);
12437 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12438 asoc->ctrl_queue_cnt++;
12442 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
12443 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12445 uint16_t len, old_len, i;
12446 struct sctp_stream_reset_out_request *req_out;
12447 struct sctp_chunkhdr *ch;
12449 int number_entries=0;
12451 ch = mtod(chk->data, struct sctp_chunkhdr *);
12452 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12453 /* get to new offset for the param. */
12454 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12455 /* now how long will this param be? */
12456 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12457 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12458 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12459 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12463 if (number_entries == 0) {
12466 if (number_entries == stcb->asoc.streamoutcnt) {
12467 number_entries = 0;
12469 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
12470 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
12472 len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12473 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12474 req_out->ph.param_length = htons(len);
12475 req_out->request_seq = htonl(seq);
12476 req_out->response_seq = htonl(resp_seq);
12477 req_out->send_reset_at_tsn = htonl(last_sent);
12479 if (number_entries) {
12480 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12481 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12482 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12483 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12484 req_out->list_of_streams[at] = htons(i);
12486 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12487 if (at >= number_entries) {
12493 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12494 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12497 if (SCTP_SIZE32(len) > len) {
12499 * Need to worry about the pad we may end up adding to the
12500 * end. This is easy since the struct is either aligned to 4
12501 * bytes or 2 bytes off.
12503 req_out->list_of_streams[number_entries] = 0;
12505 /* now fix the chunk length */
12506 ch->chunk_length = htons(len + old_len);
12507 chk->book_size = len + old_len;
12508 chk->book_size_scale = 0;
12509 chk->send_size = SCTP_SIZE32(chk->book_size);
12510 SCTP_BUF_LEN(chk->data) = chk->send_size;
12515 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12516 int number_entries, uint16_t *list,
12519 uint16_t len, old_len, i;
12520 struct sctp_stream_reset_in_request *req_in;
12521 struct sctp_chunkhdr *ch;
12523 ch = mtod(chk->data, struct sctp_chunkhdr *);
12524 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12526 /* get to new offset for the param. */
12527 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12528 /* now how long will this param be? */
12529 len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12530 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12531 req_in->ph.param_length = htons(len);
12532 req_in->request_seq = htonl(seq);
12533 if (number_entries) {
12534 for (i = 0; i < number_entries; i++) {
12535 req_in->list_of_streams[i] = htons(list[i]);
12538 if (SCTP_SIZE32(len) > len) {
12540 * Need to worry about the pad we may end up adding to the
12541 * end. This is easy since the struct is either aligned to 4
12542 * bytes or 2 bytes off.
12544 req_in->list_of_streams[number_entries] = 0;
12546 /* now fix the chunk length */
12547 ch->chunk_length = htons(len + old_len);
12548 chk->book_size = len + old_len;
12549 chk->book_size_scale = 0;
12550 chk->send_size = SCTP_SIZE32(chk->book_size);
12551 SCTP_BUF_LEN(chk->data) = chk->send_size;
12556 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12559 uint16_t len, old_len;
12560 struct sctp_stream_reset_tsn_request *req_tsn;
12561 struct sctp_chunkhdr *ch;
12563 ch = mtod(chk->data, struct sctp_chunkhdr *);
12564 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12566 /* get to new offset for the param. */
12567 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12568 /* now how long will this param be? */
12569 len = sizeof(struct sctp_stream_reset_tsn_request);
12570 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12571 req_tsn->ph.param_length = htons(len);
12572 req_tsn->request_seq = htonl(seq);
12574 /* now fix the chunk length */
12575 ch->chunk_length = htons(len + old_len);
12576 chk->send_size = len + old_len;
12577 chk->book_size = SCTP_SIZE32(chk->send_size);
12578 chk->book_size_scale = 0;
12579 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12584 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12585 uint32_t resp_seq, uint32_t result)
12587 uint16_t len, old_len;
12588 struct sctp_stream_reset_response *resp;
12589 struct sctp_chunkhdr *ch;
12591 ch = mtod(chk->data, struct sctp_chunkhdr *);
12592 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12594 /* get to new offset for the param. */
12595 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12596 /* now how long will this param be? */
12597 len = sizeof(struct sctp_stream_reset_response);
12598 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12599 resp->ph.param_length = htons(len);
12600 resp->response_seq = htonl(resp_seq);
12601 resp->result = ntohl(result);
12603 /* now fix the chunk length */
12604 ch->chunk_length = htons(len + old_len);
12605 chk->book_size = len + old_len;
12606 chk->book_size_scale = 0;
12607 chk->send_size = SCTP_SIZE32(chk->book_size);
12608 SCTP_BUF_LEN(chk->data) = chk->send_size;
12613 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
12614 struct sctp_stream_reset_list *ent,
12617 struct sctp_association *asoc;
12618 struct sctp_tmit_chunk *chk;
12619 struct sctp_chunkhdr *ch;
12621 asoc = &stcb->asoc;
12624 * Reset our last reset action to the new one IP -> response
12625 * (PERFORMED probably). This assures that if we fail to send, a
12626 * retran from the peer will get the new response.
12628 asoc->last_reset_action[0] = response;
12629 if (asoc->stream_reset_outstanding) {
12632 sctp_alloc_a_chunk(stcb, chk);
12634 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12637 chk->copy_by_ref = 0;
12638 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12639 chk->rec.chunk_id.can_take_data = 0;
12641 chk->asoc = &stcb->asoc;
12642 chk->book_size = sizeof(struct sctp_chunkhdr);
12643 chk->send_size = SCTP_SIZE32(chk->book_size);
12644 chk->book_size_scale = 0;
12645 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12646 if (chk->data == NULL) {
12647 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12648 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12651 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12652 /* setup chunk parameters */
12653 chk->sent = SCTP_DATAGRAM_UNSENT;
12654 chk->snd_count = 0;
12655 if (stcb->asoc.alternate) {
12656 chk->whoTo = stcb->asoc.alternate;
12658 chk->whoTo = stcb->asoc.primary_destination;
12660 ch = mtod(chk->data, struct sctp_chunkhdr *);
12661 ch->chunk_type = SCTP_STREAM_RESET;
12662 ch->chunk_flags = 0;
12663 ch->chunk_length = htons(chk->book_size);
12664 atomic_add_int(&chk->whoTo->ref_count, 1);
12665 SCTP_BUF_LEN(chk->data) = chk->send_size;
12666 sctp_add_stream_reset_result(chk, ent->seq, response);
12667 /* insert the chunk for sending */
12668 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12671 asoc->ctrl_queue_cnt++;
12675 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12676 uint32_t resp_seq, uint32_t result,
12677 uint32_t send_una, uint32_t recv_next)
12679 uint16_t len, old_len;
12680 struct sctp_stream_reset_response_tsn *resp;
12681 struct sctp_chunkhdr *ch;
12683 ch = mtod(chk->data, struct sctp_chunkhdr *);
12684 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12686 /* get to new offset for the param. */
12687 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12688 /* now how long will this param be? */
12689 len = sizeof(struct sctp_stream_reset_response_tsn);
12690 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12691 resp->ph.param_length = htons(len);
12692 resp->response_seq = htonl(resp_seq);
12693 resp->result = htonl(result);
12694 resp->senders_next_tsn = htonl(send_una);
12695 resp->receivers_next_tsn = htonl(recv_next);
12697 /* now fix the chunk length */
12698 ch->chunk_length = htons(len + old_len);
12699 chk->book_size = len + old_len;
12700 chk->send_size = SCTP_SIZE32(chk->book_size);
12701 chk->book_size_scale = 0;
12702 SCTP_BUF_LEN(chk->data) = chk->send_size;
12707 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12711 uint16_t len, old_len;
12712 struct sctp_chunkhdr *ch;
12713 struct sctp_stream_reset_add_strm *addstr;
12715 ch = mtod(chk->data, struct sctp_chunkhdr *);
12716 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12718 /* get to new offset for the param. */
12719 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12720 /* now how long will this param be? */
12721 len = sizeof(struct sctp_stream_reset_add_strm);
12724 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12725 addstr->ph.param_length = htons(len);
12726 addstr->request_seq = htonl(seq);
12727 addstr->number_of_streams = htons(adding);
12728 addstr->reserved = 0;
12730 /* now fix the chunk length */
12731 ch->chunk_length = htons(len + old_len);
12732 chk->send_size = len + old_len;
12733 chk->book_size = SCTP_SIZE32(chk->send_size);
12734 chk->book_size_scale = 0;
12735 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12740 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12744 uint16_t len, old_len;
12745 struct sctp_chunkhdr *ch;
12746 struct sctp_stream_reset_add_strm *addstr;
12748 ch = mtod(chk->data, struct sctp_chunkhdr *);
12749 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12751 /* get to new offset for the param. */
12752 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12753 /* now how long will this param be? */
12754 len = sizeof(struct sctp_stream_reset_add_strm);
12756 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12757 addstr->ph.param_length = htons(len);
12758 addstr->request_seq = htonl(seq);
12759 addstr->number_of_streams = htons(adding);
12760 addstr->reserved = 0;
12762 /* now fix the chunk length */
12763 ch->chunk_length = htons(len + old_len);
12764 chk->send_size = len + old_len;
12765 chk->book_size = SCTP_SIZE32(chk->send_size);
12766 chk->book_size_scale = 0;
12767 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12772 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12774 struct sctp_association *asoc;
12775 struct sctp_tmit_chunk *chk;
12776 struct sctp_chunkhdr *ch;
12779 asoc = &stcb->asoc;
12780 asoc->trigger_reset = 0;
12781 if (asoc->stream_reset_outstanding) {
12784 sctp_alloc_a_chunk(stcb, chk);
12786 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12789 chk->copy_by_ref = 0;
12790 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12791 chk->rec.chunk_id.can_take_data = 0;
12793 chk->asoc = &stcb->asoc;
12794 chk->book_size = sizeof(struct sctp_chunkhdr);
12795 chk->send_size = SCTP_SIZE32(chk->book_size);
12796 chk->book_size_scale = 0;
12797 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12798 if (chk->data == NULL) {
12799 sctp_free_a_chunk(stcb, chk, so_locked);
12800 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12803 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12805 /* setup chunk parameters */
12806 chk->sent = SCTP_DATAGRAM_UNSENT;
12807 chk->snd_count = 0;
12808 if (stcb->asoc.alternate) {
12809 chk->whoTo = stcb->asoc.alternate;
12811 chk->whoTo = stcb->asoc.primary_destination;
12813 ch = mtod(chk->data, struct sctp_chunkhdr *);
12814 ch->chunk_type = SCTP_STREAM_RESET;
12815 ch->chunk_flags = 0;
12816 ch->chunk_length = htons(chk->book_size);
12817 atomic_add_int(&chk->whoTo->ref_count, 1);
12818 SCTP_BUF_LEN(chk->data) = chk->send_size;
12819 seq = stcb->asoc.str_reset_seq_out;
12820 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12822 asoc->stream_reset_outstanding++;
12824 m_freem(chk->data);
12826 sctp_free_a_chunk(stcb, chk, so_locked);
12829 asoc->str_reset = chk;
12830 /* insert the chunk for sending */
12831 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12834 asoc->ctrl_queue_cnt++;
12836 if (stcb->asoc.send_sack) {
12837 sctp_send_sack(stcb, so_locked);
12839 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12844 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12845 uint16_t number_entries, uint16_t *list,
12846 uint8_t send_in_req,
12847 uint8_t send_tsn_req,
12848 uint8_t add_stream,
12850 uint16_t adding_i, uint8_t peer_asked)
12852 struct sctp_association *asoc;
12853 struct sctp_tmit_chunk *chk;
12854 struct sctp_chunkhdr *ch;
12855 int can_send_out_req=0;
12858 asoc = &stcb->asoc;
12859 if (asoc->stream_reset_outstanding) {
12861 * Already one pending, must get ACK back to clear the flag.
12863 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12866 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12867 (add_stream == 0)) {
12868 /* nothing to do */
12869 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12872 if (send_tsn_req && send_in_req) {
12873 /* error, can't do that */
12874 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12876 } else if (send_in_req) {
12877 can_send_out_req = 1;
12879 if (number_entries > (MCLBYTES -
12880 SCTP_MIN_OVERHEAD -
12881 sizeof(struct sctp_chunkhdr) -
12882 sizeof(struct sctp_stream_reset_out_request)) /
12883 sizeof(uint16_t)) {
12884 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12887 sctp_alloc_a_chunk(stcb, chk);
12889 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12892 chk->copy_by_ref = 0;
12893 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12894 chk->rec.chunk_id.can_take_data = 0;
12896 chk->asoc = &stcb->asoc;
12897 chk->book_size = sizeof(struct sctp_chunkhdr);
12898 chk->send_size = SCTP_SIZE32(chk->book_size);
12899 chk->book_size_scale = 0;
12900 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12901 if (chk->data == NULL) {
12902 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12903 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12906 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12908 /* setup chunk parameters */
12909 chk->sent = SCTP_DATAGRAM_UNSENT;
12910 chk->snd_count = 0;
12911 if (stcb->asoc.alternate) {
12912 chk->whoTo = stcb->asoc.alternate;
12914 chk->whoTo = stcb->asoc.primary_destination;
12916 atomic_add_int(&chk->whoTo->ref_count, 1);
12917 ch = mtod(chk->data, struct sctp_chunkhdr *);
12918 ch->chunk_type = SCTP_STREAM_RESET;
12919 ch->chunk_flags = 0;
12920 ch->chunk_length = htons(chk->book_size);
12921 SCTP_BUF_LEN(chk->data) = chk->send_size;
12923 seq = stcb->asoc.str_reset_seq_out;
12924 if (can_send_out_req) {
12926 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12929 asoc->stream_reset_outstanding++;
12932 if ((add_stream & 1) &&
12933 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12934 /* Need to allocate more */
12935 struct sctp_stream_out *oldstream;
12936 struct sctp_stream_queue_pending *sp, *nsp;
12938 #if defined(SCTP_DETAILED_STR_STATS)
12942 oldstream = stcb->asoc.strmout;
12943 /* get some more */
12944 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12945 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12947 if (stcb->asoc.strmout == NULL) {
12949 stcb->asoc.strmout = oldstream;
12950 /* Turn off the bit */
12951 x = add_stream & 0xfe;
12955 /* Ok now we proceed with copying the old out stuff and
12956 * initializing the new stuff.
12958 SCTP_TCB_SEND_LOCK(stcb);
12959 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12960 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12961 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12962 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12963 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12964 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12965 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12966 stcb->asoc.strmout[i].sid = i;
12967 stcb->asoc.strmout[i].state = oldstream[i].state;
12968 /* FIX ME FIX ME */
12969 /* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */
12970 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12971 /* now anything on those queues? */
12972 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12973 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12974 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12978 /* now the new streams */
12979 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12980 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12981 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12982 stcb->asoc.strmout[i].chunks_on_queues = 0;
12983 #if defined(SCTP_DETAILED_STR_STATS)
12984 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12985 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12986 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12989 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12990 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12992 stcb->asoc.strmout[i].next_mid_ordered = 0;
12993 stcb->asoc.strmout[i].next_mid_unordered = 0;
12994 stcb->asoc.strmout[i].sid = i;
12995 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12996 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12997 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12999 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
13000 SCTP_FREE(oldstream, SCTP_M_STRMO);
13001 SCTP_TCB_SEND_UNLOCK(stcb);
13004 if ((add_stream & 1) && (adding_o > 0)) {
13005 asoc->strm_pending_add_size = adding_o;
13006 asoc->peer_req_out = peer_asked;
13007 sctp_add_an_out_stream(chk, seq, adding_o);
13009 asoc->stream_reset_outstanding++;
13011 if ((add_stream & 2) && (adding_i > 0)) {
13012 sctp_add_an_in_stream(chk, seq, adding_i);
13014 asoc->stream_reset_outstanding++;
13017 sctp_add_stream_reset_in(chk, number_entries, list, seq);
13019 asoc->stream_reset_outstanding++;
13021 if (send_tsn_req) {
13022 sctp_add_stream_reset_tsn(chk, seq);
13023 asoc->stream_reset_outstanding++;
13025 asoc->str_reset = chk;
13026 /* insert the chunk for sending */
13027 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
13030 asoc->ctrl_queue_cnt++;
13031 if (stcb->asoc.send_sack) {
13032 sctp_send_sack(stcb, SCTP_SO_LOCKED);
13034 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
13039 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
13040 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13041 #if defined(__FreeBSD__) && !defined(__Userspace__)
13042 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13044 uint32_t vrf_id, uint16_t port)
13046 /* Don't respond to an ABORT with an ABORT. */
13047 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
13049 sctp_m_freem(cause);
13052 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
13053 #if defined(__FreeBSD__) && !defined(__Userspace__)
13054 mflowtype, mflowid, fibnum,
13061 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
13062 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13063 #if defined(__FreeBSD__) && !defined(__Userspace__)
13064 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13066 uint32_t vrf_id, uint16_t port)
13068 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
13069 #if defined(__FreeBSD__) && !defined(__Userspace__)
13070 mflowtype, mflowid, fibnum,
13076 static struct mbuf *
13077 sctp_copy_resume(struct uio *uio,
13079 #if defined(__FreeBSD__) || defined(__Userspace__)
13080 int user_marks_eor,
13084 struct mbuf **new_tail)
13086 #if defined(__FreeBSD__) || defined(__Userspace__)
13089 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
13090 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
13092 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13095 *sndout = m_length(m, NULL);
13096 *new_tail = m_last(m);
13100 int left, cancpy, willcpy;
13101 struct mbuf *m, *head;
13103 #if defined(__APPLE__) && !defined(__Userspace__)
13104 #if defined(APPLE_LEOPARD)
13105 left = (int)min(uio->uio_resid, max_send_len);
13107 left = (int)min(uio_resid(uio), max_send_len);
13110 left = (int)min(uio->uio_resid, max_send_len);
13112 /* Always get a header just in case */
13113 head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13114 if (head == NULL) {
13115 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13119 cancpy = (int)M_TRAILINGSPACE(head);
13120 willcpy = min(cancpy, left);
13121 *error = uiomove(mtod(head, caddr_t), willcpy, uio);
13123 sctp_m_freem(head);
13126 *sndout += willcpy;
13128 SCTP_BUF_LEN(head) = willcpy;
13132 /* move in user data */
13133 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13134 if (SCTP_BUF_NEXT(m) == NULL) {
13135 sctp_m_freem(head);
13137 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13141 m = SCTP_BUF_NEXT(m);
13142 cancpy = (int)M_TRAILINGSPACE(m);
13143 willcpy = min(cancpy, left);
13144 *error = uiomove(mtod(m, caddr_t), willcpy, uio);
13146 sctp_m_freem(head);
13148 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13152 SCTP_BUF_LEN(m) = willcpy;
13154 *sndout += willcpy;
13157 SCTP_BUF_NEXT(m) = NULL;
13165 sctp_copy_one(struct sctp_stream_queue_pending *sp,
13169 #if defined(__FreeBSD__) || defined(__Userspace__)
13170 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
13172 if (sp->data == NULL) {
13173 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13177 sp->tail_mbuf = m_last(sp->data);
13181 int cancpy, willcpy, error;
13182 struct mbuf *m, *head;
13185 /* First one gets a header */
13187 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
13189 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13193 * Add this one for m in now, that way if the alloc fails we won't
13196 SCTP_BUF_RESV_UF(m, resv_upfront);
13197 cancpy = (int)M_TRAILINGSPACE(m);
13198 willcpy = min(cancpy, left);
13200 /* move in user data */
13201 error = uiomove(mtod(m, caddr_t), willcpy, uio);
13203 sctp_m_freem(head);
13206 SCTP_BUF_LEN(m) = willcpy;
13210 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13211 if (SCTP_BUF_NEXT(m) == NULL) {
13213 * the head goes back to caller, he can free
13216 sctp_m_freem(head);
13217 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13220 m = SCTP_BUF_NEXT(m);
13221 cancpy = (int)M_TRAILINGSPACE(m);
13222 willcpy = min(cancpy, left);
13225 SCTP_BUF_NEXT(m) = NULL;
13236 static struct sctp_stream_queue_pending *
13237 sctp_copy_it_in(struct sctp_tcb *stcb,
13238 struct sctp_association *asoc,
13239 struct sctp_sndrcvinfo *srcv,
13241 struct sctp_nets *net,
13242 ssize_t max_send_len,
13243 int user_marks_eor,
13248 * This routine must be very careful in its work. Protocol
13249 * processing is up and running so care must be taken to spl...()
13250 * when you need to do something that may effect the stcb/asoc. The
13251 * sb is locked however. When data is copied the protocol processing
13252 * should be enabled since this is a slower operation...
13254 struct sctp_stream_queue_pending *sp = NULL;
13258 /* Now can we send this? */
13259 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13260 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13261 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13262 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13263 /* got data while shutting down */
13264 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13265 *error = ECONNRESET;
13268 sctp_alloc_a_strmoq(stcb, sp);
13270 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13275 sp->sender_all_done = 0;
13276 sp->sinfo_flags = srcv->sinfo_flags;
13277 sp->timetolive = srcv->sinfo_timetolive;
13278 sp->ppid = srcv->sinfo_ppid;
13279 sp->context = srcv->sinfo_context;
13281 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
13283 sp->sid = srcv->sinfo_stream;
13284 #if defined(__APPLE__) && !defined(__Userspace__)
13285 #if defined(APPLE_LEOPARD)
13286 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13288 sp->length = (uint32_t)min(uio_resid(uio), max_send_len);
13291 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13293 #if defined(__APPLE__) && !defined(__Userspace__)
13294 #if defined(APPLE_LEOPARD)
13295 if ((sp->length == (uint32_t)uio->uio_resid) &&
13297 if ((sp->length == (uint32_t)uio_resid(uio)) &&
13300 if ((sp->length == (uint32_t)uio->uio_resid) &&
13302 ((user_marks_eor == 0) ||
13303 (srcv->sinfo_flags & SCTP_EOF) ||
13304 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13305 sp->msg_is_complete = 1;
13307 sp->msg_is_complete = 0;
13309 sp->sender_all_done = 0;
13310 sp->some_taken = 0;
13311 sp->put_last_out = 0;
13312 resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
13313 sp->data = sp->tail_mbuf = NULL;
13314 if (sp->length == 0) {
13317 if (srcv->sinfo_keynumber_valid) {
13318 sp->auth_keyid = srcv->sinfo_keynumber;
13320 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
13322 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
13323 sctp_auth_key_acquire(stcb, sp->auth_keyid);
13324 sp->holds_key_ref = 1;
13326 #if defined(__APPLE__) && !defined(__Userspace__)
13327 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13329 *error = sctp_copy_one(sp, uio, resv_in_first);
13330 #if defined(__APPLE__) && !defined(__Userspace__)
13331 SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13335 #if defined(__Userspace__)
13336 SCTP_TCB_LOCK(stcb);
13338 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
13339 #if defined(__Userspace__)
13340 SCTP_TCB_UNLOCK(stcb);
13344 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
13346 atomic_add_int(&sp->net->ref_count, 1);
13350 sctp_set_prsctp_policy(sp);
13358 sctp_sosend(struct socket *so,
13359 struct sockaddr *addr,
13362 struct mbuf *control,
13363 #if defined(__APPLE__) && !defined(__Userspace__)
13367 #if defined(__FreeBSD__) && !defined(__Userspace__)
13369 #elif defined(_WIN32) && !defined(__Userspace__)
13372 #if defined(__Userspace__)
13374 * proc is a dummy in __Userspace__ and will not be passed
13375 * to sctp_lower_sosend
13383 #if defined(__APPLE__) && !defined(__Userspace__)
13384 struct proc *p = current_proc();
13386 int error, use_sndinfo = 0;
13387 struct sctp_sndrcvinfo sndrcvninfo;
13388 struct sockaddr *addr_to_use;
13389 #if defined(INET) && defined(INET6)
13390 struct sockaddr_in sin;
13393 #if defined(__APPLE__) && !defined(__Userspace__)
13394 SCTP_SOCKET_LOCK(so, 1);
13397 /* process cmsg snd/rcv info (maybe a assoc-id) */
13398 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
13399 sizeof(sndrcvninfo))) {
13404 addr_to_use = addr;
13405 #if defined(INET) && defined(INET6)
13406 if ((addr) && (addr->sa_family == AF_INET6)) {
13407 struct sockaddr_in6 *sin6;
13409 sin6 = (struct sockaddr_in6 *)addr;
13410 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
13411 in6_sin6_2_sin(&sin, sin6);
13412 addr_to_use = (struct sockaddr *)&sin;
13416 error = sctp_lower_sosend(so, addr_to_use, uio, top,
13419 use_sndinfo ? &sndrcvninfo: NULL
13420 #if !defined(__Userspace__)
13424 #if defined(__APPLE__) && !defined(__Userspace__)
13425 SCTP_SOCKET_UNLOCK(so, 1);
13432 sctp_lower_sosend(struct socket *so,
13433 struct sockaddr *addr,
13435 struct mbuf *i_pak,
13436 struct mbuf *control,
13438 struct sctp_sndrcvinfo *srcv
13439 #if !defined(__Userspace__)
13441 #if defined(__FreeBSD__)
13443 #elif defined(_WIN32)
13451 #if defined(__FreeBSD__) && !defined(__Userspace__)
13452 struct epoch_tracker et;
13454 ssize_t sndlen = 0, max_len, local_add_more;
13456 struct mbuf *top = NULL;
13457 int queue_only = 0, queue_only_for_init = 0;
13458 int free_cnt_applied = 0;
13460 int now_filled = 0;
13461 unsigned int inqueue_bytes = 0;
13462 struct sctp_block_entry be;
13463 struct sctp_inpcb *inp;
13464 struct sctp_tcb *stcb = NULL;
13465 struct timeval now;
13466 struct sctp_nets *net;
13467 struct sctp_association *asoc;
13468 struct sctp_inpcb *t_inp;
13469 int user_marks_eor;
13470 int create_lock_applied = 0;
13471 int nagle_applies = 0;
13472 int some_on_control = 0;
13473 int got_all_of_the_send = 0;
13474 int hold_tcblock = 0;
13475 int non_blocking = 0;
13476 ssize_t local_soresv = 0;
13478 uint16_t sinfo_flags;
13479 sctp_assoc_t sinfo_assoc_id;
13486 #if defined(__APPLE__) && !defined(__Userspace__)
13487 sctp_lock_assert(so);
13489 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
13491 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13494 SCTP_RELEASE_PKT(i_pak);
13498 if ((uio == NULL) && (i_pak == NULL)) {
13499 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13502 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
13503 atomic_add_int(&inp->total_sends, 1);
13505 #if defined(__APPLE__) && !defined(__Userspace__)
13506 #if defined(APPLE_LEOPARD)
13507 if (uio->uio_resid < 0) {
13509 if (uio_resid(uio) < 0) {
13512 if (uio->uio_resid < 0) {
13514 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13517 #if defined(__APPLE__) && !defined(__Userspace__)
13518 #if defined(APPLE_LEOPARD)
13519 sndlen = uio->uio_resid;
13521 sndlen = uio_resid(uio);
13524 sndlen = uio->uio_resid;
13527 top = SCTP_HEADER_TO_CHAIN(i_pak);
13528 sndlen = SCTP_HEADER_LEN(i_pak);
13530 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n",
13533 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13534 SCTP_IS_LISTENING(inp)) {
13535 /* The listener can NOT send */
13536 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13541 * Pre-screen address, if one is given the sin-len
13542 * must be set correctly!
13545 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13546 switch (raddr->sa.sa_family) {
13549 #ifdef HAVE_SIN_LEN
13550 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13551 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13556 port = raddr->sin.sin_port;
13561 #ifdef HAVE_SIN6_LEN
13562 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13563 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13568 port = raddr->sin6.sin6_port;
13571 #if defined(__Userspace__)
13573 #ifdef HAVE_SCONN_LEN
13574 if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13575 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13580 port = raddr->sconn.sconn_port;
13584 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13585 error = EAFNOSUPPORT;
13592 sinfo_flags = srcv->sinfo_flags;
13593 sinfo_assoc_id = srcv->sinfo_assoc_id;
13594 if (INVALID_SINFO_FLAG(sinfo_flags) ||
13595 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13596 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13600 if (srcv->sinfo_flags)
13601 SCTP_STAT_INCR(sctps_sends_with_flags);
13603 sinfo_flags = inp->def_send.sinfo_flags;
13604 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13606 #if defined(__FreeBSD__) && !defined(__Userspace__)
13607 if (flags & MSG_EOR) {
13608 sinfo_flags |= SCTP_EOR;
13610 if (flags & MSG_EOF) {
13611 sinfo_flags |= SCTP_EOF;
13614 if (sinfo_flags & SCTP_SENDALL) {
13615 /* its a sendall */
13616 error = sctp_sendall(inp, uio, top, srcv);
13620 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13621 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13625 /* now we must find the assoc */
13626 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13627 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13628 SCTP_INP_RLOCK(inp);
13629 stcb = LIST_FIRST(&inp->sctp_asoc_list);
13631 SCTP_TCB_LOCK(stcb);
13634 SCTP_INP_RUNLOCK(inp);
13635 } else if (sinfo_assoc_id) {
13636 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
13637 if (stcb != NULL) {
13642 * Since we did not use findep we must
13643 * increment it, and if we don't find a tcb
13646 SCTP_INP_WLOCK(inp);
13647 SCTP_INP_INCR_REF(inp);
13648 SCTP_INP_WUNLOCK(inp);
13649 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13650 if (stcb == NULL) {
13651 SCTP_INP_WLOCK(inp);
13652 SCTP_INP_DECR_REF(inp);
13653 SCTP_INP_WUNLOCK(inp);
13658 if ((stcb == NULL) && (addr)) {
13659 /* Possible implicit send? */
13660 SCTP_ASOC_CREATE_LOCK(inp);
13661 create_lock_applied = 1;
13662 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13663 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13664 /* Should I really unlock ? */
13665 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13670 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13671 (addr->sa_family == AF_INET6)) {
13672 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13676 SCTP_INP_WLOCK(inp);
13677 SCTP_INP_INCR_REF(inp);
13678 SCTP_INP_WUNLOCK(inp);
13679 /* With the lock applied look again */
13680 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13681 #if defined(INET) || defined(INET6)
13682 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13683 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13686 if (stcb == NULL) {
13687 SCTP_INP_WLOCK(inp);
13688 SCTP_INP_DECR_REF(inp);
13689 SCTP_INP_WUNLOCK(inp);
13696 if (t_inp != inp) {
13697 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13702 if (stcb == NULL) {
13703 if (addr == NULL) {
13704 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13708 /* We must go ahead and start the INIT process */
13711 if ((sinfo_flags & SCTP_ABORT) ||
13712 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13714 * User asks to abort a non-existant assoc,
13715 * or EOF a non-existant assoc with no data
13717 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13721 /* get an asoc/stcb struct */
13722 vrf_id = inp->def_vrf_id;
13724 if (create_lock_applied == 0) {
13725 panic("Error, should hold create lock and I don't?");
13728 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13729 inp->sctp_ep.pre_open_stream_count,
13731 #if !defined(__Userspace__)
13734 (struct proc *)NULL,
13736 SCTP_INITIALIZE_AUTH_PARAMS);
13737 if (stcb == NULL) {
13738 /* Error is setup for us in the call */
13741 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13742 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13743 /* Set the connected flag so we can queue data */
13744 soisconnecting(so);
13747 if (create_lock_applied) {
13748 SCTP_ASOC_CREATE_UNLOCK(inp);
13749 create_lock_applied = 0;
13751 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13753 /* Turn on queue only flag to prevent data from being sent */
13755 asoc = &stcb->asoc;
13756 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13757 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13760 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13761 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
13762 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
13768 /* out with the INIT */
13769 queue_only_for_init = 1;
13771 * we may want to dig in after this call and adjust the MTU
13772 * value. It defaulted to 1500 (constant) but the ro
13773 * structure may now have an update and thus we may need to
13774 * change it BEFORE we append the message.
13778 asoc = &stcb->asoc;
13779 if (srcv == NULL) {
13780 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13781 sinfo_flags = srcv->sinfo_flags;
13782 #if defined(__FreeBSD__) && !defined(__Userspace__)
13783 if (flags & MSG_EOR) {
13784 sinfo_flags |= SCTP_EOR;
13786 if (flags & MSG_EOF) {
13787 sinfo_flags |= SCTP_EOF;
13791 if (sinfo_flags & SCTP_ADDR_OVER) {
13793 net = sctp_findnet(stcb, addr);
13796 if ((net == NULL) ||
13797 ((port != 0) && (port != stcb->rport))) {
13798 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13803 if (stcb->asoc.alternate) {
13804 net = stcb->asoc.alternate;
13806 net = stcb->asoc.primary_destination;
13809 atomic_add_int(&stcb->total_sends, 1);
13810 /* Keep the stcb from being freed under our feet */
13811 atomic_add_int(&asoc->refcnt, 1);
13812 free_cnt_applied = 1;
13814 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13815 if (sndlen > (ssize_t)asoc->smallest_mtu) {
13816 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13821 #if defined(__Userspace__)
13822 if (inp->recv_callback) {
13826 if (SCTP_SO_IS_NBIO(so)
13827 #if defined(__FreeBSD__) && !defined(__Userspace__)
13828 || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
13833 /* would we block? */
13834 if (non_blocking) {
13837 if (hold_tcblock == 0) {
13838 SCTP_TCB_LOCK(stcb);
13841 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13842 if (user_marks_eor == 0) {
13847 if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13848 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13849 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13850 if (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so))
13853 error = EWOULDBLOCK;
13856 stcb->asoc.sb_send_resv += (uint32_t)sndlen;
13857 SCTP_TCB_UNLOCK(stcb);
13860 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13862 local_soresv = sndlen;
13863 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13864 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13865 error = ECONNRESET;
13868 if (create_lock_applied) {
13869 SCTP_ASOC_CREATE_UNLOCK(inp);
13870 create_lock_applied = 0;
13872 /* Is the stream no. valid? */
13873 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13874 /* Invalid stream number */
13875 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13879 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
13880 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
13882 * Can't queue any data while stream reset is underway.
13884 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
13889 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
13892 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
13893 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
13896 /* we are now done with all control */
13898 sctp_m_freem(control);
13901 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13902 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13903 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13904 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13905 if (sinfo_flags & SCTP_ABORT) {
13908 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13909 error = ECONNRESET;
13913 /* Ok, we will attempt a msgsnd :> */
13914 #if !(defined(_WIN32) || defined(__Userspace__))
13916 #if defined(__FreeBSD__)
13917 p->td_ru.ru_msgsnd++;
13919 p->p_stats->p_ru.ru_msgsnd++;
13923 /* Are we aborting? */
13924 if (sinfo_flags & SCTP_ABORT) {
13926 ssize_t tot_demand, tot_out = 0, max_out;
13928 SCTP_STAT_INCR(sctps_sends_with_abort);
13929 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
13930 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
13931 /* It has to be up before we abort */
13932 /* how big is the user initiated abort? */
13933 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13937 if (hold_tcblock) {
13938 SCTP_TCB_UNLOCK(stcb);
13942 struct mbuf *cntm = NULL;
13944 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13946 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13947 tot_out += SCTP_BUF_LEN(cntm);
13951 /* Must fit in a MTU */
13953 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13954 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13956 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13960 mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_WAITOK, 1, MT_DATA);
13963 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13967 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13968 max_out -= sizeof(struct sctp_abort_msg);
13969 if (tot_out > max_out) {
13973 struct sctp_paramhdr *ph;
13975 /* now move forward the data pointer */
13976 ph = mtod(mm, struct sctp_paramhdr *);
13977 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13978 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
13980 SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
13982 #if defined(__APPLE__) && !defined(__Userspace__)
13983 SCTP_SOCKET_UNLOCK(so, 0);
13985 error = uiomove((caddr_t)ph, (int)tot_out, uio);
13986 #if defined(__APPLE__) && !defined(__Userspace__)
13987 SCTP_SOCKET_LOCK(so, 0);
13991 * Here if we can't get his data we
13992 * still abort we just don't get to
13993 * send the users note :-0
14000 SCTP_BUF_NEXT(mm) = top;
14004 if (hold_tcblock == 0) {
14005 SCTP_TCB_LOCK(stcb);
14007 atomic_add_int(&stcb->asoc.refcnt, -1);
14008 free_cnt_applied = 0;
14009 /* release this lock, otherwise we hang on ourselves */
14010 #if defined(__FreeBSD__) && !defined(__Userspace__)
14011 NET_EPOCH_ENTER(et);
14013 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
14014 #if defined(__FreeBSD__) && !defined(__Userspace__)
14015 NET_EPOCH_EXIT(et);
14017 /* now relock the stcb so everything is sane */
14020 /* In this case top is already chained to mm
14021 * avoid double free, since we free it below if
14022 * top != NULL and driver would free it after sending
14030 /* Calculate the maximum we can send */
14031 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14032 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14033 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14037 if (hold_tcblock) {
14038 SCTP_TCB_UNLOCK(stcb);
14041 if (asoc->strmout == NULL) {
14042 /* huh? software error */
14043 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
14048 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
14049 if ((user_marks_eor == 0) &&
14050 (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
14051 /* It will NEVER fit */
14052 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
14056 if ((uio == NULL) && user_marks_eor) {
14058 * We do not support eeor mode for
14059 * sending with mbuf chains (like sendfile).
14061 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14066 if (user_marks_eor) {
14067 local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
14070 * For non-eeor the whole message must fit in
14071 * the socket send buffer.
14073 local_add_more = sndlen;
14076 if (non_blocking) {
14077 goto skip_preblock;
14079 if (((max_len <= local_add_more) &&
14080 ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
14082 ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14083 /* No room right now ! */
14084 SOCKBUF_LOCK(&so->so_snd);
14085 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14086 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
14087 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14088 SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n",
14089 (unsigned int)SCTP_SB_LIMIT_SND(so),
14092 stcb->asoc.stream_queue_cnt,
14093 stcb->asoc.chunks_on_out_queue,
14094 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
14095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14096 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
14099 #if !(defined(_WIN32) && !defined(__Userspace__))
14100 stcb->block_entry = &be;
14102 error = sbwait(&so->so_snd);
14103 stcb->block_entry = NULL;
14104 if (error || so->so_error || be.error) {
14107 error = so->so_error;
14112 SOCKBUF_UNLOCK(&so->so_snd);
14115 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14116 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14117 asoc, stcb->asoc.total_output_queue_size);
14119 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14120 SOCKBUF_UNLOCK(&so->so_snd);
14123 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14125 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14126 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14130 SOCKBUF_UNLOCK(&so->so_snd);
14134 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14137 #if defined(__APPLE__) && !defined(__Userspace__)
14138 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14140 /* sndlen covers for mbuf case
14141 * uio_resid covers for the non-mbuf case
14142 * NOTE: uio will be null when top/mbuf is passed
14145 if (sinfo_flags & SCTP_EOF) {
14146 got_all_of_the_send = 1;
14149 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14155 struct sctp_stream_queue_pending *sp;
14156 struct sctp_stream_out *strm;
14159 SCTP_TCB_SEND_LOCK(stcb);
14160 if ((asoc->stream_locked) &&
14161 (asoc->stream_locked_on != srcv->sinfo_stream)) {
14162 SCTP_TCB_SEND_UNLOCK(stcb);
14163 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14167 SCTP_TCB_SEND_UNLOCK(stcb);
14169 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
14170 if (strm->last_msg_incomplete == 0) {
14172 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
14176 SCTP_TCB_SEND_LOCK(stcb);
14177 if (sp->msg_is_complete) {
14178 strm->last_msg_incomplete = 0;
14179 asoc->stream_locked = 0;
14181 /* Just got locked to this guy in
14182 * case of an interrupt.
14184 strm->last_msg_incomplete = 1;
14185 if (stcb->asoc.idata_supported == 0) {
14186 asoc->stream_locked = 1;
14187 asoc->stream_locked_on = srcv->sinfo_stream;
14189 sp->sender_all_done = 0;
14191 sctp_snd_sb_alloc(stcb, sp->length);
14192 atomic_add_int(&asoc->stream_queue_cnt, 1);
14193 if (sinfo_flags & SCTP_UNORDERED) {
14194 SCTP_STAT_INCR(sctps_sends_with_unord);
14196 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
14197 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
14198 SCTP_TCB_SEND_UNLOCK(stcb);
14200 SCTP_TCB_SEND_LOCK(stcb);
14201 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
14202 SCTP_TCB_SEND_UNLOCK(stcb);
14204 /* ???? Huh ??? last msg is gone */
14206 panic("Warning: Last msg marked incomplete, yet nothing left?");
14208 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
14209 strm->last_msg_incomplete = 0;
14215 #if defined(__APPLE__) && !defined(__Userspace__)
14216 #if defined(APPLE_LEOPARD)
14217 while (uio->uio_resid > 0) {
14219 while (uio_resid(uio) > 0) {
14222 while (uio->uio_resid > 0) {
14224 /* How much room do we have? */
14225 struct mbuf *new_tail, *mm;
14227 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14228 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14229 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14233 if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
14234 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
14235 #if defined(__APPLE__) && !defined(__Userspace__)
14236 #if defined(APPLE_LEOPARD)
14237 (uio->uio_resid && (uio->uio_resid <= max_len))) {
14239 (uio_resid(uio) && (uio_resid(uio) <= max_len))) {
14242 (uio->uio_resid && (uio->uio_resid <= max_len))) {
14246 if (hold_tcblock) {
14247 SCTP_TCB_UNLOCK(stcb);
14250 #if defined(__APPLE__) && !defined(__Userspace__)
14251 SCTP_SOCKET_UNLOCK(so, 0);
14253 #if defined(__FreeBSD__) || defined(__Userspace__)
14254 mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
14256 mm = sctp_copy_resume(uio, (int)max_len, &error, &sndout, &new_tail);
14258 #if defined(__APPLE__) && !defined(__Userspace__)
14259 SCTP_SOCKET_LOCK(so, 0);
14261 if ((mm == NULL) || error) {
14267 /* Update the mbuf and count */
14268 SCTP_TCB_SEND_LOCK(stcb);
14269 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14270 /* we need to get out.
14271 * Peer probably aborted.
14274 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
14275 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
14276 error = ECONNRESET;
14278 SCTP_TCB_SEND_UNLOCK(stcb);
14281 if (sp->tail_mbuf) {
14282 /* tack it to the end */
14283 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
14284 sp->tail_mbuf = new_tail;
14286 /* A stolen mbuf */
14288 sp->tail_mbuf = new_tail;
14290 sctp_snd_sb_alloc(stcb, sndout);
14291 atomic_add_int(&sp->length, sndout);
14293 if (sinfo_flags & SCTP_SACK_IMMEDIATELY) {
14294 sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
14297 /* Did we reach EOR? */
14298 #if defined(__APPLE__) && !defined(__Userspace__)
14299 #if defined(APPLE_LEOPARD)
14300 if ((uio->uio_resid == 0) &&
14302 if ((uio_resid(uio) == 0) &&
14305 if ((uio->uio_resid == 0) &&
14307 ((user_marks_eor == 0) ||
14308 (sinfo_flags & SCTP_EOF) ||
14309 (user_marks_eor && (sinfo_flags & SCTP_EOR)))) {
14310 sp->msg_is_complete = 1;
14312 sp->msg_is_complete = 0;
14314 SCTP_TCB_SEND_UNLOCK(stcb);
14316 #if defined(__APPLE__) && !defined(__Userspace__)
14317 #if defined(APPLE_LEOPARD)
14318 if (uio->uio_resid == 0) {
14320 if (uio_resid(uio) == 0) {
14323 if (uio->uio_resid == 0) {
14329 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
14330 /* This is ugly but we must assure locking order */
14331 if (hold_tcblock == 0) {
14332 SCTP_TCB_LOCK(stcb);
14335 sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
14336 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14337 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14338 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14344 SCTP_TCB_UNLOCK(stcb);
14347 /* wait for space now */
14348 if (non_blocking) {
14349 /* Non-blocking io in place out */
14352 /* What about the INIT, send it maybe */
14353 if (queue_only_for_init) {
14354 if (hold_tcblock == 0) {
14355 SCTP_TCB_LOCK(stcb);
14358 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14359 /* a collision took us forward? */
14362 #if defined(__FreeBSD__) && !defined(__Userspace__)
14363 NET_EPOCH_ENTER(et);
14365 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14366 #if defined(__FreeBSD__) && !defined(__Userspace__)
14367 NET_EPOCH_EXIT(et);
14369 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14373 if ((net->flight_size > net->cwnd) &&
14374 (asoc->sctp_cmt_on_off == 0)) {
14375 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14377 } else if (asoc->ifp_had_enobuf) {
14378 SCTP_STAT_INCR(sctps_ifnomemqueued);
14379 if (net->flight_size > (2 * net->mtu)) {
14382 asoc->ifp_had_enobuf = 0;
14384 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14385 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14386 (stcb->asoc.total_flight > 0) &&
14387 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14388 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14391 * Ok, Nagle is set on and we have data outstanding.
14392 * Don't send anything and let SACKs drive out the
14393 * data unless we have a "full" segment to send.
14395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14396 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14398 SCTP_STAT_INCR(sctps_naglequeued);
14401 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14402 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14403 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14405 SCTP_STAT_INCR(sctps_naglesent);
14408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14410 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14411 nagle_applies, un_sent);
14412 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14413 stcb->asoc.total_flight,
14414 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14416 if (queue_only_for_init)
14417 queue_only_for_init = 0;
14418 if ((queue_only == 0) && (nagle_applies == 0)) {
14420 * need to start chunk output
14421 * before blocking.. note that if
14422 * a lock is already applied, then
14423 * the input via the net is happening
14424 * and I don't need to start output :-D
14426 #if defined(__FreeBSD__) && !defined(__Userspace__)
14427 NET_EPOCH_ENTER(et);
14429 if (hold_tcblock == 0) {
14430 if (SCTP_TCB_TRYLOCK(stcb)) {
14432 sctp_chunk_output(inp,
14434 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14437 sctp_chunk_output(inp,
14439 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14441 #if defined(__FreeBSD__) && !defined(__Userspace__)
14442 NET_EPOCH_EXIT(et);
14445 if (hold_tcblock == 1) {
14446 SCTP_TCB_UNLOCK(stcb);
14449 SOCKBUF_LOCK(&so->so_snd);
14451 * This is a bit strange, but I think it will
14452 * work. The total_output_queue_size is locked and
14453 * protected by the TCB_LOCK, which we just released.
14454 * There is a race that can occur between releasing it
14455 * above, and me getting the socket lock, where sacks
14456 * come in but we have not put the SB_WAIT on the
14457 * so_snd buffer to get the wakeup. After the LOCK
14458 * is applied the sack_processing will also need to
14459 * LOCK the so->so_snd to do the actual sowwakeup(). So
14460 * once we have the socket buffer lock if we recheck the
14461 * size we KNOW we will get to sleep safely with the
14462 * wakeup flag in place.
14464 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14465 if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
14466 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
14467 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14468 #if defined(__APPLE__) && !defined(__Userspace__)
14469 #if defined(APPLE_LEOPARD)
14470 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14471 asoc, uio->uio_resid);
14473 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14474 asoc, uio_resid(uio));
14477 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14478 asoc, uio->uio_resid);
14482 #if !(defined(_WIN32) && !defined(__Userspace__))
14483 stcb->block_entry = &be;
14485 #if defined(__APPLE__) && !defined(__Userspace__)
14486 sbunlock(&so->so_snd, 1);
14488 error = sbwait(&so->so_snd);
14489 stcb->block_entry = NULL;
14491 if (error || so->so_error || be.error) {
14494 error = so->so_error;
14499 SOCKBUF_UNLOCK(&so->so_snd);
14503 #if defined(__APPLE__) && !defined(__Userspace__)
14504 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14506 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14507 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14508 asoc, stcb->asoc.total_output_queue_size);
14511 SOCKBUF_UNLOCK(&so->so_snd);
14512 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14516 SCTP_TCB_SEND_LOCK(stcb);
14517 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14518 SCTP_TCB_SEND_UNLOCK(stcb);
14522 if (sp->msg_is_complete == 0) {
14523 strm->last_msg_incomplete = 1;
14524 if (stcb->asoc.idata_supported == 0) {
14525 asoc->stream_locked = 1;
14526 asoc->stream_locked_on = srcv->sinfo_stream;
14529 sp->sender_all_done = 1;
14530 strm->last_msg_incomplete = 0;
14531 asoc->stream_locked = 0;
14534 SCTP_PRINTF("Huh no sp TSNH?\n");
14535 strm->last_msg_incomplete = 0;
14536 asoc->stream_locked = 0;
14538 SCTP_TCB_SEND_UNLOCK(stcb);
14539 #if defined(__APPLE__) && !defined(__Userspace__)
14540 #if defined(APPLE_LEOPARD)
14541 if (uio->uio_resid == 0) {
14543 if (uio_resid(uio) == 0) {
14546 if (uio->uio_resid == 0) {
14548 got_all_of_the_send = 1;
14551 /* We send in a 0, since we do NOT have any locks */
14552 error = sctp_msg_append(stcb, net, top, srcv, 0);
14554 if (sinfo_flags & SCTP_EOF) {
14555 got_all_of_the_send = 1;
14563 if ((sinfo_flags & SCTP_EOF) &&
14564 (got_all_of_the_send == 1)) {
14565 SCTP_STAT_INCR(sctps_sends_with_eof);
14567 if (hold_tcblock == 0) {
14568 SCTP_TCB_LOCK(stcb);
14571 if (TAILQ_EMPTY(&asoc->send_queue) &&
14572 TAILQ_EMPTY(&asoc->sent_queue) &&
14573 sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
14574 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14577 /* there is nothing queued to send, so I'm done... */
14578 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14579 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14580 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14581 struct sctp_nets *netp;
14583 /* only send SHUTDOWN the first time through */
14584 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14585 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14587 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
14588 sctp_stop_timers_for_shutdown(stcb);
14589 if (stcb->asoc.alternate) {
14590 netp = stcb->asoc.alternate;
14592 netp = stcb->asoc.primary_destination;
14594 sctp_send_shutdown(stcb, netp);
14595 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14597 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14602 * we still got (or just got) data to send, so set
14606 * XXX sockets draft says that SCTP_EOF should be
14607 * sent with no data. currently, we will allow user
14608 * data to be sent first and move to
14611 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14612 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14613 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14614 if (hold_tcblock == 0) {
14615 SCTP_TCB_LOCK(stcb);
14618 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14619 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
14621 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
14622 if (TAILQ_EMPTY(&asoc->send_queue) &&
14623 TAILQ_EMPTY(&asoc->sent_queue) &&
14624 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14625 struct mbuf *op_err;
14626 char msg[SCTP_DIAG_INFO_LEN];
14629 if (free_cnt_applied) {
14630 atomic_add_int(&stcb->asoc.refcnt, -1);
14631 free_cnt_applied = 0;
14633 SCTP_SNPRINTF(msg, sizeof(msg),
14634 "%s:%d at %s", __FILE__, __LINE__, __func__);
14635 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
14637 #if defined(__FreeBSD__) && !defined(__Userspace__)
14638 NET_EPOCH_ENTER(et);
14640 sctp_abort_an_association(stcb->sctp_ep, stcb,
14641 op_err, SCTP_SO_LOCKED);
14642 #if defined(__FreeBSD__) && !defined(__Userspace__)
14643 NET_EPOCH_EXIT(et);
14645 /* now relock the stcb so everything is sane */
14650 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14652 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14657 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14658 some_on_control = 1;
14660 if (queue_only_for_init) {
14661 if (hold_tcblock == 0) {
14662 SCTP_TCB_LOCK(stcb);
14665 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14666 /* a collision took us forward? */
14669 #if defined(__FreeBSD__) && !defined(__Userspace__)
14670 NET_EPOCH_ENTER(et);
14672 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14673 #if defined(__FreeBSD__) && !defined(__Userspace__)
14674 NET_EPOCH_EXIT(et);
14676 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14680 if ((net->flight_size > net->cwnd) &&
14681 (stcb->asoc.sctp_cmt_on_off == 0)) {
14682 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14684 } else if (asoc->ifp_had_enobuf) {
14685 SCTP_STAT_INCR(sctps_ifnomemqueued);
14686 if (net->flight_size > (2 * net->mtu)) {
14689 asoc->ifp_had_enobuf = 0;
14691 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14692 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14693 (stcb->asoc.total_flight > 0) &&
14694 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14695 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14697 * Ok, Nagle is set on and we have data outstanding.
14698 * Don't send anything and let SACKs drive out the
14699 * data unless wen have a "full" segment to send.
14701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14702 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14704 SCTP_STAT_INCR(sctps_naglequeued);
14707 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14708 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14709 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14711 SCTP_STAT_INCR(sctps_naglesent);
14714 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14715 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14716 nagle_applies, un_sent);
14717 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14718 stcb->asoc.total_flight,
14719 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14721 #if defined(__FreeBSD__) && !defined(__Userspace__)
14722 NET_EPOCH_ENTER(et);
14724 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14725 /* we can attempt to send too. */
14726 if (hold_tcblock == 0) {
14727 /* If there is activity recv'ing sacks no need to send */
14728 if (SCTP_TCB_TRYLOCK(stcb)) {
14729 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14733 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14735 } else if ((queue_only == 0) &&
14736 (stcb->asoc.peers_rwnd == 0) &&
14737 (stcb->asoc.total_flight == 0)) {
14738 /* We get to have a probe outstanding */
14739 if (hold_tcblock == 0) {
14741 SCTP_TCB_LOCK(stcb);
14743 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14744 } else if (some_on_control) {
14745 int num_out, reason, frag_point;
14747 /* Here we do control only */
14748 if (hold_tcblock == 0) {
14750 SCTP_TCB_LOCK(stcb);
14752 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14753 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14754 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14756 #if defined(__FreeBSD__) && !defined(__Userspace__)
14757 NET_EPOCH_EXIT(et);
14759 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14760 queue_only, stcb->asoc.peers_rwnd, un_sent,
14761 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14762 stcb->asoc.total_output_queue_size, error);
14765 #if defined(__APPLE__) && !defined(__Userspace__)
14766 sbunlock(&so->so_snd, 1);
14770 if (local_soresv && stcb) {
14771 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14773 if (create_lock_applied) {
14774 SCTP_ASOC_CREATE_UNLOCK(inp);
14776 if ((stcb) && hold_tcblock) {
14777 SCTP_TCB_UNLOCK(stcb);
14779 if (stcb && free_cnt_applied) {
14780 atomic_add_int(&stcb->asoc.refcnt, -1);
14783 #if defined(__FreeBSD__) && !defined(__Userspace__)
14785 if (mtx_owned(&stcb->tcb_mtx)) {
14786 panic("Leaving with tcb mtx owned?");
14788 if (mtx_owned(&stcb->tcb_send_mtx)) {
14789 panic("Leaving with tcb send mtx owned?");
14798 sctp_m_freem(control);
14805 * generate an AUTHentication chunk, if required
14808 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
14809 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
14810 struct sctp_tcb *stcb, uint8_t chunk)
14812 struct mbuf *m_auth;
14813 struct sctp_auth_chunk *auth;
14817 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
14821 if (stcb->asoc.auth_supported == 0) {
14824 /* does the requested chunk require auth? */
14825 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
14828 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
14829 if (m_auth == NULL) {
14833 /* reserve some space if this will be the first mbuf */
14835 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
14836 /* fill in the AUTH chunk details */
14837 auth = mtod(m_auth, struct sctp_auth_chunk *);
14838 memset(auth, 0, sizeof(*auth));
14839 auth->ch.chunk_type = SCTP_AUTHENTICATION;
14840 auth->ch.chunk_flags = 0;
14841 chunk_len = sizeof(*auth) +
14842 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
14843 auth->ch.chunk_length = htons(chunk_len);
14844 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
14845 /* key id and hmac digest will be computed and filled in upon send */
14847 /* save the offset where the auth was inserted into the chain */
14849 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
14850 *offset += SCTP_BUF_LEN(cn);
14853 /* update length and return pointer to the auth chunk */
14854 SCTP_BUF_LEN(m_auth) = chunk_len;
14855 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
14856 if (auth_ret != NULL)
14862 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
14865 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14867 struct nd_prefix *pfx = NULL;
14868 struct nd_pfxrouter *pfxrtr = NULL;
14869 struct sockaddr_in6 gw6;
14871 #if defined(__FreeBSD__)
14872 if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6)
14874 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
14878 /* get prefix entry of address */
14879 #if defined(__FreeBSD__)
14882 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
14883 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
14885 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
14886 &src6->sin6_addr, &pfx->ndpr_mask))
14889 /* no prefix entry in the prefix list */
14891 #if defined(__FreeBSD__)
14894 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
14895 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14899 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
14900 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14902 /* search installed gateway from prefix entry */
14903 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
14904 memset(&gw6, 0, sizeof(struct sockaddr_in6));
14905 gw6.sin6_family = AF_INET6;
14906 #ifdef HAVE_SIN6_LEN
14907 gw6.sin6_len = sizeof(struct sockaddr_in6);
14909 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
14910 sizeof(struct in6_addr));
14911 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
14912 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
14913 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
14914 #if defined(__FreeBSD__)
14915 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
14917 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14919 #if defined(__FreeBSD__)
14920 if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) {
14923 if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
14925 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
14929 #if defined(__FreeBSD__)
14932 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
14938 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14941 struct sockaddr_in *sin, *mask;
14942 struct ifaddr *ifa;
14943 struct in_addr srcnetaddr, gwnetaddr;
14945 #if defined(__FreeBSD__)
14946 if (ro == NULL || ro->ro_nh == NULL ||
14948 if (ro == NULL || ro->ro_rt == NULL ||
14950 sifa->address.sa.sa_family != AF_INET) {
14953 ifa = (struct ifaddr *)sifa->ifa;
14954 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
14955 sin = &sifa->address.sin;
14956 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14957 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
14958 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
14959 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
14961 #if defined(__FreeBSD__)
14962 sin = &ro->ro_nh->gw4_sa;
14964 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
14966 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14967 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
14968 #if defined(__FreeBSD__)
14969 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
14971 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14973 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
14974 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
14980 #elif defined(__Userspace__)
14981 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
14983 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14988 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)