1 // SPDX-License-Identifier: GPL-2.0-only
5 * An implementation of the DCCP protocol
6 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 #include <linux/dccp.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
17 #include <linux/if_arp.h>
18 #include <linux/init.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <net/checksum.h>
23 #include <net/inet_sock.h>
24 #include <net/inet_common.h>
28 #include <asm/ioctls.h>
29 #include <linux/spinlock.h>
30 #include <linux/timer.h>
31 #include <linux/delay.h>
32 #include <linux/poll.h>
38 #define CREATE_TRACE_POINTS
41 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
43 EXPORT_SYMBOL_GPL(dccp_statistics);
45 DEFINE_PER_CPU(unsigned int, dccp_orphan_count);
46 EXPORT_PER_CPU_SYMBOL_GPL(dccp_orphan_count);
48 struct inet_hashinfo dccp_hashinfo;
49 EXPORT_SYMBOL_GPL(dccp_hashinfo);
51 /* the maximum queue length for tx in packets. 0 is no limit */
52 int sysctl_dccp_tx_qlen __read_mostly = 5;
54 #ifdef CONFIG_IP_DCCP_DEBUG
55 static const char *dccp_state_name(const int state)
57 static const char *const dccp_state_names[] = {
59 [DCCP_REQUESTING] = "REQUESTING",
60 [DCCP_PARTOPEN] = "PARTOPEN",
61 [DCCP_LISTEN] = "LISTEN",
62 [DCCP_RESPOND] = "RESPOND",
63 [DCCP_CLOSING] = "CLOSING",
64 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
65 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
66 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
67 [DCCP_TIME_WAIT] = "TIME_WAIT",
68 [DCCP_CLOSED] = "CLOSED",
71 if (state >= DCCP_MAX_STATES)
72 return "INVALID STATE!";
74 return dccp_state_names[state];
78 void dccp_set_state(struct sock *sk, const int state)
80 const int oldstate = sk->sk_state;
82 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
83 dccp_state_name(oldstate), dccp_state_name(state));
84 WARN_ON(state == oldstate);
88 if (oldstate != DCCP_OPEN)
89 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
90 /* Client retransmits all Confirm options until entering OPEN */
91 if (oldstate == DCCP_PARTOPEN)
92 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
96 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
97 oldstate == DCCP_CLOSING)
98 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
100 sk->sk_prot->unhash(sk);
101 if (inet_csk(sk)->icsk_bind_hash != NULL &&
102 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
106 if (oldstate == DCCP_OPEN)
107 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
110 /* Change state AFTER socket is unhashed to avoid closed
111 * socket sitting in hash tables.
113 inet_sk_set_state(sk, state);
116 EXPORT_SYMBOL_GPL(dccp_set_state);
118 static void dccp_finish_passive_close(struct sock *sk)
120 switch (sk->sk_state) {
121 case DCCP_PASSIVE_CLOSE:
122 /* Node (client or server) has received Close packet. */
123 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
124 dccp_set_state(sk, DCCP_CLOSED);
126 case DCCP_PASSIVE_CLOSEREQ:
128 * Client received CloseReq. We set the `active' flag so that
129 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
131 dccp_send_close(sk, 1);
132 dccp_set_state(sk, DCCP_CLOSING);
136 void dccp_done(struct sock *sk)
138 dccp_set_state(sk, DCCP_CLOSED);
139 dccp_clear_xmit_timers(sk);
141 sk->sk_shutdown = SHUTDOWN_MASK;
143 if (!sock_flag(sk, SOCK_DEAD))
144 sk->sk_state_change(sk);
146 inet_csk_destroy_sock(sk);
149 EXPORT_SYMBOL_GPL(dccp_done);
151 const char *dccp_packet_name(const int type)
153 static const char *const dccp_packet_names[] = {
154 [DCCP_PKT_REQUEST] = "REQUEST",
155 [DCCP_PKT_RESPONSE] = "RESPONSE",
156 [DCCP_PKT_DATA] = "DATA",
157 [DCCP_PKT_ACK] = "ACK",
158 [DCCP_PKT_DATAACK] = "DATAACK",
159 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
160 [DCCP_PKT_CLOSE] = "CLOSE",
161 [DCCP_PKT_RESET] = "RESET",
162 [DCCP_PKT_SYNC] = "SYNC",
163 [DCCP_PKT_SYNCACK] = "SYNCACK",
166 if (type >= DCCP_NR_PKT_TYPES)
169 return dccp_packet_names[type];
172 EXPORT_SYMBOL_GPL(dccp_packet_name);
174 void dccp_destruct_common(struct sock *sk)
176 struct dccp_sock *dp = dccp_sk(sk);
178 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
179 dp->dccps_hc_tx_ccid = NULL;
181 EXPORT_SYMBOL_GPL(dccp_destruct_common);
183 static void dccp_sk_destruct(struct sock *sk)
185 dccp_destruct_common(sk);
186 inet_sock_destruct(sk);
189 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
191 struct dccp_sock *dp = dccp_sk(sk);
192 struct inet_connection_sock *icsk = inet_csk(sk);
194 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
195 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
196 sk->sk_state = DCCP_CLOSED;
197 sk->sk_write_space = dccp_write_space;
198 sk->sk_destruct = dccp_sk_destruct;
199 icsk->icsk_sync_mss = dccp_sync_mss;
200 dp->dccps_mss_cache = 536;
201 dp->dccps_rate_last = jiffies;
202 dp->dccps_role = DCCP_ROLE_UNDEFINED;
203 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
204 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
206 dccp_init_xmit_timers(sk);
208 INIT_LIST_HEAD(&dp->dccps_featneg);
209 /* control socket doesn't need feat nego */
210 if (likely(ctl_sock_initialized))
211 return dccp_feat_init(sk);
215 EXPORT_SYMBOL_GPL(dccp_init_sock);
217 void dccp_destroy_sock(struct sock *sk)
219 struct dccp_sock *dp = dccp_sk(sk);
221 __skb_queue_purge(&sk->sk_write_queue);
222 if (sk->sk_send_head != NULL) {
223 kfree_skb(sk->sk_send_head);
224 sk->sk_send_head = NULL;
227 /* Clean up a referenced DCCP bind bucket. */
228 if (inet_csk(sk)->icsk_bind_hash != NULL)
231 kfree(dp->dccps_service_list);
232 dp->dccps_service_list = NULL;
234 if (dp->dccps_hc_rx_ackvec != NULL) {
235 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
236 dp->dccps_hc_rx_ackvec = NULL;
238 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
239 dp->dccps_hc_rx_ccid = NULL;
241 /* clean up feature negotiation state */
242 dccp_feat_list_purge(&dp->dccps_featneg);
245 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
247 static inline int dccp_need_reset(int state)
249 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
250 state != DCCP_REQUESTING;
253 int dccp_disconnect(struct sock *sk, int flags)
255 struct inet_connection_sock *icsk = inet_csk(sk);
256 struct inet_sock *inet = inet_sk(sk);
257 struct dccp_sock *dp = dccp_sk(sk);
258 const int old_state = sk->sk_state;
260 if (old_state != DCCP_CLOSED)
261 dccp_set_state(sk, DCCP_CLOSED);
264 * This corresponds to the ABORT function of RFC793, sec. 3.8
265 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
267 if (old_state == DCCP_LISTEN) {
268 inet_csk_listen_stop(sk);
269 } else if (dccp_need_reset(old_state)) {
270 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
271 sk->sk_err = ECONNRESET;
272 } else if (old_state == DCCP_REQUESTING)
273 sk->sk_err = ECONNRESET;
275 dccp_clear_xmit_timers(sk);
276 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
277 dp->dccps_hc_rx_ccid = NULL;
279 __skb_queue_purge(&sk->sk_receive_queue);
280 __skb_queue_purge(&sk->sk_write_queue);
281 if (sk->sk_send_head != NULL) {
282 __kfree_skb(sk->sk_send_head);
283 sk->sk_send_head = NULL;
286 inet->inet_dport = 0;
288 inet_bhash2_reset_saddr(sk);
291 sock_reset_flag(sk, SOCK_DONE);
293 icsk->icsk_backoff = 0;
294 inet_csk_delack_init(sk);
297 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
303 EXPORT_SYMBOL_GPL(dccp_disconnect);
306 * Wait for a DCCP event.
308 * Note that we don't need to lock the socket, as the upper poll layers
309 * take care of normal races (between the test and the event) and we don't
310 * go look at any of the socket buffers directly.
312 __poll_t dccp_poll(struct file *file, struct socket *sock,
316 struct sock *sk = sock->sk;
318 sock_poll_wait(file, sock, wait);
319 if (sk->sk_state == DCCP_LISTEN)
320 return inet_csk_listen_poll(sk);
322 /* Socket is not locked. We are protected from async events
323 by poll logic and correct handling of state changes
324 made by another threads is impossible in any case.
331 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
333 if (sk->sk_shutdown & RCV_SHUTDOWN)
334 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
337 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
338 if (atomic_read(&sk->sk_rmem_alloc) > 0)
339 mask |= EPOLLIN | EPOLLRDNORM;
341 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
342 if (sk_stream_is_writeable(sk)) {
343 mask |= EPOLLOUT | EPOLLWRNORM;
344 } else { /* send SIGIO later */
345 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
346 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
348 /* Race breaker. If space is freed after
349 * wspace test but before the flags are set,
350 * IO signal will be lost.
352 if (sk_stream_is_writeable(sk))
353 mask |= EPOLLOUT | EPOLLWRNORM;
360 EXPORT_SYMBOL_GPL(dccp_poll);
362 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
368 if (sk->sk_state == DCCP_LISTEN)
373 int amount = sk_wmem_alloc_get(sk);
374 /* Using sk_wmem_alloc here because sk_wmem_queued is not used by DCCP and
375 * always 0, comparably to UDP.
378 rc = put_user(amount, (int __user *)arg);
383 unsigned long amount = 0;
385 skb = skb_peek(&sk->sk_receive_queue);
388 * We will only return the amount of this packet since
389 * that is all that will be read.
393 rc = put_user(amount, (int __user *)arg);
405 EXPORT_SYMBOL_GPL(dccp_ioctl);
407 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
408 sockptr_t optval, unsigned int optlen)
410 struct dccp_sock *dp = dccp_sk(sk);
411 struct dccp_service_list *sl = NULL;
413 if (service == DCCP_SERVICE_INVALID_VALUE ||
414 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
417 if (optlen > sizeof(service)) {
418 sl = kmalloc(optlen, GFP_KERNEL);
422 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
423 if (copy_from_sockptr_offset(sl->dccpsl_list, optval,
424 sizeof(service), optlen - sizeof(service)) ||
425 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
432 dp->dccps_service = service;
434 kfree(dp->dccps_service_list);
436 dp->dccps_service_list = sl;
441 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
446 if (cscov < 0 || cscov > 15)
449 * Populate a list of permissible values, in the range cscov...15. This
450 * is necessary since feature negotiation of single values only works if
451 * both sides incidentally choose the same value. Since the list starts
452 * lowest-value first, negotiation will pick the smallest shared value.
458 list = kmalloc(len, GFP_KERNEL);
462 for (i = 0; i < len; i++)
465 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
469 dccp_sk(sk)->dccps_pcrlen = cscov;
471 dccp_sk(sk)->dccps_pcslen = cscov;
477 static int dccp_setsockopt_ccid(struct sock *sk, int type,
478 sockptr_t optval, unsigned int optlen)
483 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
486 val = memdup_sockptr(optval, optlen);
491 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
492 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
494 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
495 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
502 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
503 sockptr_t optval, unsigned int optlen)
505 struct dccp_sock *dp = dccp_sk(sk);
509 case DCCP_SOCKOPT_PACKET_SIZE:
510 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
512 case DCCP_SOCKOPT_CHANGE_L:
513 case DCCP_SOCKOPT_CHANGE_R:
514 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
516 case DCCP_SOCKOPT_CCID:
517 case DCCP_SOCKOPT_RX_CCID:
518 case DCCP_SOCKOPT_TX_CCID:
519 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
522 if (optlen < (int)sizeof(int))
525 if (copy_from_sockptr(&val, optval, sizeof(int)))
528 if (optname == DCCP_SOCKOPT_SERVICE)
529 return dccp_setsockopt_service(sk, val, optval, optlen);
533 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
534 if (dp->dccps_role != DCCP_ROLE_SERVER)
537 dp->dccps_server_timewait = (val != 0);
539 case DCCP_SOCKOPT_SEND_CSCOV:
540 err = dccp_setsockopt_cscov(sk, val, false);
542 case DCCP_SOCKOPT_RECV_CSCOV:
543 err = dccp_setsockopt_cscov(sk, val, true);
545 case DCCP_SOCKOPT_QPOLICY_ID:
546 if (sk->sk_state != DCCP_CLOSED)
548 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
551 dp->dccps_qpolicy = val;
553 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
557 dp->dccps_tx_qlen = val;
568 int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
571 if (level != SOL_DCCP)
572 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
575 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
578 EXPORT_SYMBOL_GPL(dccp_setsockopt);
580 static int dccp_getsockopt_service(struct sock *sk, int len,
581 __be32 __user *optval,
584 const struct dccp_sock *dp = dccp_sk(sk);
585 const struct dccp_service_list *sl;
586 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
589 if ((sl = dp->dccps_service_list) != NULL) {
590 slen = sl->dccpsl_nr * sizeof(u32);
599 if (put_user(total_len, optlen) ||
600 put_user(dp->dccps_service, optval) ||
601 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
608 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
609 char __user *optval, int __user *optlen)
611 struct dccp_sock *dp;
614 if (get_user(len, optlen))
617 if (len < (int)sizeof(int))
623 case DCCP_SOCKOPT_PACKET_SIZE:
624 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
626 case DCCP_SOCKOPT_SERVICE:
627 return dccp_getsockopt_service(sk, len,
628 (__be32 __user *)optval, optlen);
629 case DCCP_SOCKOPT_GET_CUR_MPS:
630 val = dp->dccps_mss_cache;
632 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
633 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
634 case DCCP_SOCKOPT_TX_CCID:
635 val = ccid_get_current_tx_ccid(dp);
639 case DCCP_SOCKOPT_RX_CCID:
640 val = ccid_get_current_rx_ccid(dp);
644 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
645 val = dp->dccps_server_timewait;
647 case DCCP_SOCKOPT_SEND_CSCOV:
648 val = dp->dccps_pcslen;
650 case DCCP_SOCKOPT_RECV_CSCOV:
651 val = dp->dccps_pcrlen;
653 case DCCP_SOCKOPT_QPOLICY_ID:
654 val = dp->dccps_qpolicy;
656 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
657 val = dp->dccps_tx_qlen;
660 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
661 len, (u32 __user *)optval, optlen);
663 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
664 len, (u32 __user *)optval, optlen);
670 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
676 int dccp_getsockopt(struct sock *sk, int level, int optname,
677 char __user *optval, int __user *optlen)
679 if (level != SOL_DCCP)
680 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
683 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
686 EXPORT_SYMBOL_GPL(dccp_getsockopt);
688 static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
690 struct cmsghdr *cmsg;
693 * Assign an (opaque) qpolicy priority value to skb->priority.
695 * We are overloading this skb field for use with the qpolicy subystem.
696 * The skb->priority is normally used for the SO_PRIORITY option, which
697 * is initialised from sk_priority. Since the assignment of sk_priority
698 * to skb->priority happens later (on layer 3), we overload this field
699 * for use with queueing priorities as long as the skb is on layer 4.
700 * The default priority value (if nothing is set) is 0.
704 for_each_cmsghdr(cmsg, msg) {
705 if (!CMSG_OK(msg, cmsg))
708 if (cmsg->cmsg_level != SOL_DCCP)
711 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
712 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
715 switch (cmsg->cmsg_type) {
716 case DCCP_SCM_PRIORITY:
717 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
719 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
728 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
730 const struct dccp_sock *dp = dccp_sk(sk);
731 const int flags = msg->msg_flags;
732 const int noblock = flags & MSG_DONTWAIT;
737 trace_dccp_probe(sk, len);
739 if (len > dp->dccps_mss_cache)
744 timeo = sock_sndtimeo(sk, noblock);
747 * We have to use sk_stream_wait_connect here to set sk_write_pending,
748 * so that the trick in dccp_rcv_request_sent_state_process.
750 /* Wait for a connection to finish. */
751 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
752 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
755 size = sk->sk_prot->max_header + len;
757 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
762 if (dccp_qpolicy_full(sk)) {
767 if (sk->sk_state == DCCP_CLOSED) {
772 skb_reserve(skb, sk->sk_prot->max_header);
773 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
777 rc = dccp_msghdr_parse(msg, skb);
781 dccp_qpolicy_push(sk, skb);
783 * The xmit_timer is set if the TX CCID is rate-based and will expire
784 * when congestion control permits to release further packets into the
785 * network. Window-based CCIDs do not use this timer.
787 if (!timer_pending(&dp->dccps_xmit_timer))
797 EXPORT_SYMBOL_GPL(dccp_sendmsg);
799 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
802 const struct dccp_hdr *dh;
807 if (sk->sk_state == DCCP_LISTEN) {
812 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
815 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
818 goto verify_sock_status;
822 switch (dh->dccph_type) {
824 case DCCP_PKT_DATAACK:
828 case DCCP_PKT_CLOSEREQ:
829 if (!(flags & MSG_PEEK))
830 dccp_finish_passive_close(sk);
833 dccp_pr_debug("found fin (%s) ok!\n",
834 dccp_packet_name(dh->dccph_type));
838 dccp_pr_debug("packet_type=%s\n",
839 dccp_packet_name(dh->dccph_type));
843 if (sock_flag(sk, SOCK_DONE)) {
849 len = sock_error(sk);
853 if (sk->sk_shutdown & RCV_SHUTDOWN) {
858 if (sk->sk_state == DCCP_CLOSED) {
859 if (!sock_flag(sk, SOCK_DONE)) {
860 /* This occurs when user tries to read
861 * from never connected socket.
875 if (signal_pending(current)) {
876 len = sock_intr_errno(timeo);
880 sk_wait_data(sk, &timeo, NULL);
885 else if (len < skb->len)
886 msg->msg_flags |= MSG_TRUNC;
888 if (skb_copy_datagram_msg(skb, 0, msg, len)) {
889 /* Exception. Bailout! */
893 if (flags & MSG_TRUNC)
896 if (!(flags & MSG_PEEK))
905 EXPORT_SYMBOL_GPL(dccp_recvmsg);
907 int inet_dccp_listen(struct socket *sock, int backlog)
909 struct sock *sk = sock->sk;
910 unsigned char old_state;
916 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
919 old_state = sk->sk_state;
920 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
923 WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
924 /* Really, if the socket is already in listen state
925 * we can only allow the backlog to be adjusted.
927 if (old_state != DCCP_LISTEN) {
928 struct dccp_sock *dp = dccp_sk(sk);
930 dp->dccps_role = DCCP_ROLE_LISTEN;
932 /* do not start to listen if feature negotiation setup fails */
933 if (dccp_feat_finalise_settings(dp)) {
938 err = inet_csk_listen_start(sk);
949 EXPORT_SYMBOL_GPL(inet_dccp_listen);
951 static void dccp_terminate_connection(struct sock *sk)
953 u8 next_state = DCCP_CLOSED;
955 switch (sk->sk_state) {
956 case DCCP_PASSIVE_CLOSE:
957 case DCCP_PASSIVE_CLOSEREQ:
958 dccp_finish_passive_close(sk);
961 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
962 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
965 dccp_send_close(sk, 1);
967 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
968 !dccp_sk(sk)->dccps_server_timewait)
969 next_state = DCCP_ACTIVE_CLOSEREQ;
971 next_state = DCCP_CLOSING;
974 dccp_set_state(sk, next_state);
978 void dccp_close(struct sock *sk, long timeout)
980 struct dccp_sock *dp = dccp_sk(sk);
982 u32 data_was_unread = 0;
987 sk->sk_shutdown = SHUTDOWN_MASK;
989 if (sk->sk_state == DCCP_LISTEN) {
990 dccp_set_state(sk, DCCP_CLOSED);
993 inet_csk_listen_stop(sk);
995 goto adjudge_to_death;
998 sk_stop_timer(sk, &dp->dccps_xmit_timer);
1001 * We need to flush the recv. buffs. We do this only on the
1002 * descriptor close, not protocol-sourced closes, because the
1003 *reader process may not have drained the data yet!
1005 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1006 data_was_unread += skb->len;
1010 /* If socket has been already reset kill it. */
1011 if (sk->sk_state == DCCP_CLOSED)
1012 goto adjudge_to_death;
1014 if (data_was_unread) {
1015 /* Unread data was tossed, send an appropriate Reset Code */
1016 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
1017 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1018 dccp_set_state(sk, DCCP_CLOSED);
1019 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1020 /* Check zero linger _after_ checking for unread data. */
1021 sk->sk_prot->disconnect(sk, 0);
1022 } else if (sk->sk_state != DCCP_CLOSED) {
1024 * Normal connection termination. May need to wait if there are
1025 * still packets in the TX queue that are delayed by the CCID.
1027 dccp_flush_write_queue(sk, &timeout);
1028 dccp_terminate_connection(sk);
1032 * Flush write queue. This may be necessary in several cases:
1033 * - we have been closed by the peer but still have application data;
1034 * - abortive termination (unread data or zero linger time),
1035 * - normal termination but queue could not be flushed within time limit
1037 __skb_queue_purge(&sk->sk_write_queue);
1039 sk_stream_wait_close(sk, timeout);
1042 state = sk->sk_state;
1047 * It is the last release_sock in its life. It will remove backlog.
1051 * Now socket is owned by kernel and we acquire BH lock
1052 * to finish close. No need to check for user refs.
1056 WARN_ON(sock_owned_by_user(sk));
1058 this_cpu_inc(dccp_orphan_count);
1060 /* Have we already been destroyed by a softirq or backlog? */
1061 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1064 if (sk->sk_state == DCCP_CLOSED)
1065 inet_csk_destroy_sock(sk);
1067 /* Otherwise, socket is reprieved until protocol close. */
1075 EXPORT_SYMBOL_GPL(dccp_close);
1077 void dccp_shutdown(struct sock *sk, int how)
1079 dccp_pr_debug("called shutdown(%x)\n", how);
1082 EXPORT_SYMBOL_GPL(dccp_shutdown);
1084 static inline int __init dccp_mib_init(void)
1086 dccp_statistics = alloc_percpu(struct dccp_mib);
1087 if (!dccp_statistics)
1092 static inline void dccp_mib_exit(void)
1094 free_percpu(dccp_statistics);
1097 static int thash_entries;
1098 module_param(thash_entries, int, 0444);
1099 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1101 #ifdef CONFIG_IP_DCCP_DEBUG
1103 module_param(dccp_debug, bool, 0644);
1104 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1106 EXPORT_SYMBOL_GPL(dccp_debug);
1109 static int __init dccp_init(void)
1112 unsigned long nr_pages = totalram_pages();
1113 int ehash_order, bhash_order, i;
1116 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1117 sizeof_field(struct sk_buff, cb));
1118 rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
1122 dccp_hashinfo.bind_bucket_cachep =
1123 kmem_cache_create("dccp_bind_bucket",
1124 sizeof(struct inet_bind_bucket), 0,
1125 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
1126 if (!dccp_hashinfo.bind_bucket_cachep)
1127 goto out_free_hashinfo2;
1128 dccp_hashinfo.bind2_bucket_cachep =
1129 kmem_cache_create("dccp_bind2_bucket",
1130 sizeof(struct inet_bind2_bucket), 0,
1131 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
1132 if (!dccp_hashinfo.bind2_bucket_cachep)
1133 goto out_free_bind_bucket_cachep;
1136 * Size and allocate the main established and bind bucket
1139 * The methodology is similar to that of the buffer cache.
1141 if (nr_pages >= (128 * 1024))
1142 goal = nr_pages >> (21 - PAGE_SHIFT);
1144 goal = nr_pages >> (23 - PAGE_SHIFT);
1147 goal = (thash_entries *
1148 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1149 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1152 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1153 sizeof(struct inet_ehash_bucket);
1155 while (hash_size & (hash_size - 1))
1157 dccp_hashinfo.ehash_mask = hash_size - 1;
1158 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1159 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1160 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1162 if (!dccp_hashinfo.ehash) {
1163 DCCP_CRIT("Failed to allocate DCCP established hash table");
1164 goto out_free_bind2_bucket_cachep;
1167 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
1168 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1170 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1171 goto out_free_dccp_ehash;
1173 bhash_order = ehash_order;
1176 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1177 sizeof(struct inet_bind_hashbucket);
1178 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1181 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1182 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1183 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1185 if (!dccp_hashinfo.bhash) {
1186 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1187 goto out_free_dccp_locks;
1190 dccp_hashinfo.bhash2 = (struct inet_bind_hashbucket *)
1191 __get_free_pages(GFP_ATOMIC | __GFP_NOWARN, bhash_order);
1193 if (!dccp_hashinfo.bhash2) {
1194 DCCP_CRIT("Failed to allocate DCCP bind2 hash table");
1195 goto out_free_dccp_bhash;
1198 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1199 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1200 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1201 spin_lock_init(&dccp_hashinfo.bhash2[i].lock);
1202 INIT_HLIST_HEAD(&dccp_hashinfo.bhash2[i].chain);
1205 dccp_hashinfo.pernet = false;
1207 rc = dccp_mib_init();
1209 goto out_free_dccp_bhash2;
1211 rc = dccp_ackvec_init();
1213 goto out_free_dccp_mib;
1215 rc = dccp_sysctl_init();
1217 goto out_ackvec_exit;
1219 rc = ccid_initialize_builtins();
1221 goto out_sysctl_exit;
1223 dccp_timestamping_init();
1233 out_free_dccp_bhash2:
1234 free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
1235 out_free_dccp_bhash:
1236 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1237 out_free_dccp_locks:
1238 inet_ehash_locks_free(&dccp_hashinfo);
1239 out_free_dccp_ehash:
1240 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1241 out_free_bind2_bucket_cachep:
1242 kmem_cache_destroy(dccp_hashinfo.bind2_bucket_cachep);
1243 out_free_bind_bucket_cachep:
1244 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1246 inet_hashinfo2_free_mod(&dccp_hashinfo);
1248 dccp_hashinfo.bhash = NULL;
1249 dccp_hashinfo.bhash2 = NULL;
1250 dccp_hashinfo.ehash = NULL;
1251 dccp_hashinfo.bind_bucket_cachep = NULL;
1252 dccp_hashinfo.bind2_bucket_cachep = NULL;
1256 static void __exit dccp_fini(void)
1258 int bhash_order = get_order(dccp_hashinfo.bhash_size *
1259 sizeof(struct inet_bind_hashbucket));
1261 ccid_cleanup_builtins();
1263 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1264 free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
1265 free_pages((unsigned long)dccp_hashinfo.ehash,
1266 get_order((dccp_hashinfo.ehash_mask + 1) *
1267 sizeof(struct inet_ehash_bucket)));
1268 inet_ehash_locks_free(&dccp_hashinfo);
1269 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1272 inet_hashinfo2_free_mod(&dccp_hashinfo);
1275 module_init(dccp_init);
1276 module_exit(dccp_fini);
1278 MODULE_LICENSE("GPL");
1279 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1280 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");