1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001 Intel Corp.
7 * Copyright (c) 2001 Nokia, Inc.
8 * Copyright (c) 2001 La Monte H.P. Yarroll
10 * This abstraction carries sctp events to the ULP (sockets).
12 * Please send any bug reports or fixes you make to the
14 * lksctp developers <linux-sctp@vger.kernel.org>
16 * Written or modified by:
17 * Jon Grimm <jgrimm@us.ibm.com>
18 * La Monte H.P. Yarroll <piggy@acm.org>
19 * Sridhar Samudrala <sri@us.ibm.com>
22 #include <linux/slab.h>
23 #include <linux/types.h>
24 #include <linux/skbuff.h>
26 #include <net/busy_poll.h>
27 #include <net/sctp/structs.h>
28 #include <net/sctp/sctp.h>
29 #include <net/sctp/sm.h>
31 /* Forward declarations for internal helpers. */
32 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
33 struct sctp_ulpevent *);
34 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
35 struct sctp_ulpevent *);
36 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
38 /* 1st Level Abstractions */
40 /* Initialize a ULP queue from a block of memory. */
41 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
42 struct sctp_association *asoc)
44 memset(ulpq, 0, sizeof(struct sctp_ulpq));
47 skb_queue_head_init(&ulpq->reasm);
48 skb_queue_head_init(&ulpq->reasm_uo);
49 skb_queue_head_init(&ulpq->lobby);
56 /* Flush the reassembly and ordering queues. */
57 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
60 struct sctp_ulpevent *event;
62 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
63 event = sctp_skb2event(skb);
64 sctp_ulpevent_free(event);
67 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
68 event = sctp_skb2event(skb);
69 sctp_ulpevent_free(event);
72 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
73 event = sctp_skb2event(skb);
74 sctp_ulpevent_free(event);
78 /* Dispose of a ulpqueue. */
79 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
81 sctp_ulpq_flush(ulpq);
84 /* Process an incoming DATA chunk. */
85 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
88 struct sk_buff_head temp;
89 struct sctp_ulpevent *event;
92 /* Create an event from the incoming chunk. */
93 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
97 event->ssn = ntohs(chunk->subh.data_hdr->ssn);
98 event->ppid = chunk->subh.data_hdr->ppid;
100 /* Do reassembly if needed. */
101 event = sctp_ulpq_reasm(ulpq, event);
103 /* Do ordering if needed. */
105 /* Create a temporary list to collect chunks on. */
106 skb_queue_head_init(&temp);
107 __skb_queue_tail(&temp, sctp_event2skb(event));
109 if (event->msg_flags & MSG_EOR)
110 event = sctp_ulpq_order(ulpq, event);
113 /* Send event to the ULP. 'event' is the sctp_ulpevent for
114 * very first SKB on the 'temp' list.
117 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
118 sctp_ulpq_tail_event(ulpq, &temp);
124 /* Add a new event for propagation to the ULP. */
125 /* Clear the partial delivery mode for this socket. Note: This
126 * assumes that no association is currently in partial delivery mode.
128 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
130 struct sctp_sock *sp = sctp_sk(sk);
132 if (atomic_dec_and_test(&sp->pd_mode)) {
133 /* This means there are no other associations in PD, so
134 * we can go ahead and clear out the lobby in one shot
136 if (!skb_queue_empty(&sp->pd_lobby)) {
137 skb_queue_splice_tail_init(&sp->pd_lobby,
138 &sk->sk_receive_queue);
142 /* There are other associations in PD, so we only need to
143 * pull stuff out of the lobby that belongs to the
144 * associations that is exiting PD (all of its notifications
147 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
148 struct sk_buff *skb, *tmp;
149 struct sctp_ulpevent *event;
151 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
152 event = sctp_skb2event(skb);
153 if (event->asoc == asoc) {
154 __skb_unlink(skb, &sp->pd_lobby);
155 __skb_queue_tail(&sk->sk_receive_queue,
165 /* Set the pd_mode on the socket and ulpq */
166 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
168 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
170 atomic_inc(&sp->pd_mode);
174 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
175 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
178 sctp_ulpq_reasm_drain(ulpq);
179 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
182 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
184 struct sock *sk = ulpq->asoc->base.sk;
185 struct sctp_sock *sp = sctp_sk(sk);
186 struct sctp_ulpevent *event;
187 struct sk_buff_head *queue;
191 skb = __skb_peek(skb_list);
192 event = sctp_skb2event(skb);
194 /* If the socket is just going to throw this away, do not
195 * even try to deliver it.
197 if (sk->sk_shutdown & RCV_SHUTDOWN &&
198 (sk->sk_shutdown & SEND_SHUTDOWN ||
199 !sctp_ulpevent_is_notification(event)))
202 if (!sctp_ulpevent_is_notification(event)) {
203 sk_mark_napi_id(sk, skb);
204 sk_incoming_cpu_update(sk);
206 /* Check if the user wishes to receive this event. */
207 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
210 /* If we are in partial delivery mode, post to the lobby until
211 * partial delivery is cleared, unless, of course _this_ is
212 * the association the cause of the partial delivery.
215 if (atomic_read(&sp->pd_mode) == 0) {
216 queue = &sk->sk_receive_queue;
219 /* If the association is in partial delivery, we
220 * need to finish delivering the partially processed
221 * packet before passing any other data. This is
222 * because we don't truly support stream interleaving.
224 if ((event->msg_flags & MSG_NOTIFICATION) ||
225 (SCTP_DATA_NOT_FRAG ==
226 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
227 queue = &sp->pd_lobby;
229 clear_pd = event->msg_flags & MSG_EOR;
230 queue = &sk->sk_receive_queue;
234 * If fragment interleave is enabled, we
235 * can queue this to the receive queue instead
238 if (sp->frag_interleave)
239 queue = &sk->sk_receive_queue;
241 queue = &sp->pd_lobby;
245 skb_queue_splice_tail_init(skb_list, queue);
247 /* Did we just complete partial delivery and need to get
248 * rolling again? Move pending data to the receive
252 sctp_ulpq_clear_pd(ulpq);
254 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
255 if (!sock_owned_by_user(sk))
256 sp->data_ready_signalled = 1;
257 sk->sk_data_ready(sk);
263 sctp_queue_purge_ulpevents(skb_list);
265 sctp_ulpevent_free(event);
270 /* 2nd Level Abstractions */
272 /* Helper function to store chunks that need to be reassembled. */
273 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
274 struct sctp_ulpevent *event)
277 struct sctp_ulpevent *cevent;
282 /* See if it belongs at the end. */
283 pos = skb_peek_tail(&ulpq->reasm);
285 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
289 /* Short circuit just dropping it at the end. */
290 cevent = sctp_skb2event(pos);
292 if (TSN_lt(ctsn, tsn)) {
293 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
297 /* Find the right place in this list. We store them by TSN. */
298 skb_queue_walk(&ulpq->reasm, pos) {
299 cevent = sctp_skb2event(pos);
302 if (TSN_lt(tsn, ctsn))
306 /* Insert before pos. */
307 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
311 /* Helper function to return an event corresponding to the reassembled
313 * This routine creates a re-assembled skb given the first and last skb's
314 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
315 * payload was fragmented on the way and ip had to reassemble them.
316 * We add the rest of skb's to the first skb's fraglist.
318 struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
319 struct sk_buff_head *queue,
320 struct sk_buff *f_frag,
321 struct sk_buff *l_frag)
324 struct sk_buff *new = NULL;
325 struct sctp_ulpevent *event;
326 struct sk_buff *pnext, *last;
327 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
329 /* Store the pointer to the 2nd skb */
330 if (f_frag == l_frag)
335 /* Get the last skb in the f_frag's frag_list if present. */
336 for (last = list; list; last = list, list = list->next)
339 /* Add the list of remaining fragments to the first fragments
345 if (skb_cloned(f_frag)) {
346 /* This is a cloned skb, we can't just modify
347 * the frag_list. We need a new skb to do that.
348 * Instead of calling skb_unshare(), we'll do it
349 * ourselves since we need to delay the free.
351 new = skb_copy(f_frag, GFP_ATOMIC);
353 return NULL; /* try again later */
355 sctp_skb_set_owner_r(new, f_frag->sk);
357 skb_shinfo(new)->frag_list = pos;
359 skb_shinfo(f_frag)->frag_list = pos;
362 /* Remove the first fragment from the reassembly queue. */
363 __skb_unlink(f_frag, queue);
365 /* if we did unshare, then free the old skb and re-assign */
375 /* Update the len and data_len fields of the first fragment. */
376 f_frag->len += pos->len;
377 f_frag->data_len += pos->len;
379 /* Remove the fragment from the reassembly queue. */
380 __skb_unlink(pos, queue);
382 /* Break if we have reached the last fragment. */
389 event = sctp_skb2event(f_frag);
390 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
396 /* Helper function to check if an incoming chunk has filled up the last
397 * missing fragment in a SCTP datagram and return the corresponding event.
399 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
402 struct sctp_ulpevent *cevent;
403 struct sk_buff *first_frag = NULL;
404 __u32 ctsn, next_tsn;
405 struct sctp_ulpevent *retval = NULL;
406 struct sk_buff *pd_first = NULL;
407 struct sk_buff *pd_last = NULL;
409 struct sctp_association *asoc;
412 /* Initialized to 0 just to avoid compiler warning message. Will
413 * never be used with this value. It is referenced only after it
414 * is set when we find the first fragment of a message.
418 /* The chunks are held in the reasm queue sorted by TSN.
419 * Walk through the queue sequentially and look for a sequence of
420 * fragmented chunks that complete a datagram.
421 * 'first_frag' and next_tsn are reset when we find a chunk which
422 * is the first fragment of a datagram. Once these 2 fields are set
423 * we expect to find the remaining middle fragments and the last
424 * fragment in order. If not, first_frag is reset to NULL and we
425 * start the next pass when we find another first fragment.
427 * There is a potential to do partial delivery if user sets
428 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
429 * to see if can do PD.
431 skb_queue_walk(&ulpq->reasm, pos) {
432 cevent = sctp_skb2event(pos);
435 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
436 case SCTP_DATA_FIRST_FRAG:
437 /* If this "FIRST_FRAG" is the first
438 * element in the queue, then count it towards
441 if (skb_queue_is_first(&ulpq->reasm, pos)) {
455 case SCTP_DATA_MIDDLE_FRAG:
456 if ((first_frag) && (ctsn == next_tsn)) {
466 case SCTP_DATA_LAST_FRAG:
467 if (first_frag && (ctsn == next_tsn))
477 /* Make sure we can enter partial deliver.
478 * We can trigger partial delivery only if framgent
479 * interleave is set, or the socket is not already
480 * in partial delivery.
482 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
483 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
486 cevent = sctp_skb2event(pd_first);
487 pd_point = sctp_sk(asoc->base.sk)->pd_point;
488 if (pd_point && pd_point <= pd_len) {
489 retval = sctp_make_reassembled_event(asoc->base.net,
493 sctp_ulpq_set_pd(ulpq);
499 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
500 &ulpq->reasm, first_frag, pos);
502 retval->msg_flags |= MSG_EOR;
506 /* Retrieve the next set of fragments of a partial message. */
507 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
509 struct sk_buff *pos, *last_frag, *first_frag;
510 struct sctp_ulpevent *cevent;
511 __u32 ctsn, next_tsn;
513 struct sctp_ulpevent *retval;
515 /* The chunks are held in the reasm queue sorted by TSN.
516 * Walk through the queue sequentially and look for the first
517 * sequence of fragmented chunks.
520 if (skb_queue_empty(&ulpq->reasm))
523 last_frag = first_frag = NULL;
528 skb_queue_walk(&ulpq->reasm, pos) {
529 cevent = sctp_skb2event(pos);
532 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
533 case SCTP_DATA_FIRST_FRAG:
537 case SCTP_DATA_MIDDLE_FRAG:
542 } else if (next_tsn == ctsn) {
548 case SCTP_DATA_LAST_FRAG:
551 else if (ctsn != next_tsn)
561 /* We have the reassembled event. There is no need to look
565 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
566 first_frag, last_frag);
567 if (retval && is_last)
568 retval->msg_flags |= MSG_EOR;
574 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
577 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
578 struct sctp_ulpevent *event)
580 struct sctp_ulpevent *retval = NULL;
582 /* Check if this is part of a fragmented message. */
583 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
584 event->msg_flags |= MSG_EOR;
588 sctp_ulpq_store_reasm(ulpq, event);
590 retval = sctp_ulpq_retrieve_reassembled(ulpq);
594 /* Do not even bother unless this is the next tsn to
598 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
599 if (TSN_lte(ctsn, ctsnap))
600 retval = sctp_ulpq_retrieve_partial(ulpq);
606 /* Retrieve the first part (sequential fragments) for partial delivery. */
607 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
609 struct sk_buff *pos, *last_frag, *first_frag;
610 struct sctp_ulpevent *cevent;
611 __u32 ctsn, next_tsn;
612 struct sctp_ulpevent *retval;
614 /* The chunks are held in the reasm queue sorted by TSN.
615 * Walk through the queue sequentially and look for a sequence of
616 * fragmented chunks that start a datagram.
619 if (skb_queue_empty(&ulpq->reasm))
622 last_frag = first_frag = NULL;
626 skb_queue_walk(&ulpq->reasm, pos) {
627 cevent = sctp_skb2event(pos);
630 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
631 case SCTP_DATA_FIRST_FRAG:
640 case SCTP_DATA_MIDDLE_FRAG:
643 if (ctsn == next_tsn) {
650 case SCTP_DATA_LAST_FRAG:
662 /* We have the reassembled event. There is no need to look
666 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
667 first_frag, last_frag);
672 * Flush out stale fragments from the reassembly queue when processing
675 * RFC 3758, Section 3.6
677 * After receiving and processing a FORWARD TSN, the data receiver MUST
678 * take cautions in updating its re-assembly queue. The receiver MUST
679 * remove any partially reassembled message, which is still missing one
680 * or more TSNs earlier than or equal to the new cumulative TSN point.
681 * In the event that the receiver has invoked the partial delivery API,
682 * a notification SHOULD also be generated to inform the upper layer API
683 * that the message being partially delivered will NOT be completed.
685 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
687 struct sk_buff *pos, *tmp;
688 struct sctp_ulpevent *event;
691 if (skb_queue_empty(&ulpq->reasm))
694 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
695 event = sctp_skb2event(pos);
698 /* Since the entire message must be abandoned by the
699 * sender (item A3 in Section 3.5, RFC 3758), we can
700 * free all fragments on the list that are less then
701 * or equal to ctsn_point
703 if (TSN_lte(tsn, fwd_tsn)) {
704 __skb_unlink(pos, &ulpq->reasm);
705 sctp_ulpevent_free(event);
712 * Drain the reassembly queue. If we just cleared parted delivery, it
713 * is possible that the reassembly queue will contain already reassembled
714 * messages. Retrieve any such messages and give them to the user.
716 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
718 struct sctp_ulpevent *event = NULL;
720 if (skb_queue_empty(&ulpq->reasm))
723 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
724 struct sk_buff_head temp;
726 skb_queue_head_init(&temp);
727 __skb_queue_tail(&temp, sctp_event2skb(event));
729 /* Do ordering if needed. */
730 if (event->msg_flags & MSG_EOR)
731 event = sctp_ulpq_order(ulpq, event);
733 /* Send event to the ULP. 'event' is the
734 * sctp_ulpevent for very first SKB on the temp' list.
737 sctp_ulpq_tail_event(ulpq, &temp);
742 /* Helper function to gather skbs that have possibly become
743 * ordered by an incoming chunk.
745 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
746 struct sctp_ulpevent *event)
748 struct sk_buff_head *event_list;
749 struct sk_buff *pos, *tmp;
750 struct sctp_ulpevent *cevent;
751 struct sctp_stream *stream;
752 __u16 sid, csid, cssn;
755 stream = &ulpq->asoc->stream;
757 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
759 /* We are holding the chunks by stream, by SSN. */
760 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
761 cevent = (struct sctp_ulpevent *) pos->cb;
762 csid = cevent->stream;
765 /* Have we gone too far? */
769 /* Have we not gone far enough? */
773 if (cssn != sctp_ssn_peek(stream, in, sid))
776 /* Found it, so mark in the stream. */
777 sctp_ssn_next(stream, in, sid);
779 __skb_unlink(pos, &ulpq->lobby);
781 /* Attach all gathered skbs to the event. */
782 __skb_queue_tail(event_list, pos);
786 /* Helper function to store chunks needing ordering. */
787 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
788 struct sctp_ulpevent *event)
791 struct sctp_ulpevent *cevent;
795 pos = skb_peek_tail(&ulpq->lobby);
797 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
804 cevent = (struct sctp_ulpevent *) pos->cb;
805 csid = cevent->stream;
808 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
812 if ((sid == csid) && SSN_lt(cssn, ssn)) {
813 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
817 /* Find the right place in this list. We store them by
818 * stream ID and then by SSN.
820 skb_queue_walk(&ulpq->lobby, pos) {
821 cevent = (struct sctp_ulpevent *) pos->cb;
822 csid = cevent->stream;
827 if (csid == sid && SSN_lt(ssn, cssn))
832 /* Insert before pos. */
833 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
836 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
837 struct sctp_ulpevent *event)
840 struct sctp_stream *stream;
842 /* Check if this message needs ordering. */
843 if (event->msg_flags & SCTP_DATA_UNORDERED)
846 /* Note: The stream ID must be verified before this routine. */
849 stream = &ulpq->asoc->stream;
851 /* Is this the expected SSN for this stream ID? */
852 if (ssn != sctp_ssn_peek(stream, in, sid)) {
853 /* We've received something out of order, so find where it
854 * needs to be placed. We order by stream and then by SSN.
856 sctp_ulpq_store_ordered(ulpq, event);
860 /* Mark that the next chunk has been found. */
861 sctp_ssn_next(stream, in, sid);
863 /* Go find any other chunks that were waiting for
866 sctp_ulpq_retrieve_ordered(ulpq, event);
871 /* Helper function to gather skbs that have possibly become
872 * ordered by forward tsn skipping their dependencies.
874 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
876 struct sk_buff *pos, *tmp;
877 struct sctp_ulpevent *cevent;
878 struct sctp_ulpevent *event;
879 struct sctp_stream *stream;
880 struct sk_buff_head temp;
881 struct sk_buff_head *lobby = &ulpq->lobby;
884 stream = &ulpq->asoc->stream;
886 /* We are holding the chunks by stream, by SSN. */
887 skb_queue_head_init(&temp);
889 sctp_skb_for_each(pos, lobby, tmp) {
890 cevent = (struct sctp_ulpevent *) pos->cb;
891 csid = cevent->stream;
894 /* Have we gone too far? */
898 /* Have we not gone far enough? */
902 /* see if this ssn has been marked by skipping */
903 if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
906 __skb_unlink(pos, lobby);
908 /* Create a temporary list to collect chunks on. */
909 event = sctp_skb2event(pos);
911 /* Attach all gathered skbs to the event. */
912 __skb_queue_tail(&temp, pos);
915 /* If we didn't reap any data, see if the next expected SSN
916 * is next on the queue and if so, use that.
918 if (event == NULL && pos != (struct sk_buff *)lobby) {
919 cevent = (struct sctp_ulpevent *) pos->cb;
920 csid = cevent->stream;
923 if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
924 sctp_ssn_next(stream, in, csid);
925 __skb_unlink(pos, lobby);
926 __skb_queue_tail(&temp, pos);
927 event = sctp_skb2event(pos);
931 /* Send event to the ULP. 'event' is the sctp_ulpevent for
932 * very first SKB on the 'temp' list.
935 /* see if we have more ordered that we can deliver */
936 sctp_ulpq_retrieve_ordered(ulpq, event);
937 sctp_ulpq_tail_event(ulpq, &temp);
941 /* Skip over an SSN. This is used during the processing of
942 * Forwared TSN chunk to skip over the abandoned ordered data
944 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
946 struct sctp_stream *stream;
948 /* Note: The stream ID must be verified before this routine. */
949 stream = &ulpq->asoc->stream;
951 /* Is this an old SSN? If so ignore. */
952 if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
955 /* Mark that we are no longer expecting this SSN or lower. */
956 sctp_ssn_skip(stream, in, sid, ssn);
958 /* Go find any other chunks that were waiting for
959 * ordering and deliver them if needed.
961 sctp_ulpq_reap_ordered(ulpq, sid);
964 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
969 struct sk_buff *skb, *flist, *last;
970 struct sctp_ulpevent *event;
971 struct sctp_tsnmap *tsnmap;
973 tsnmap = &ulpq->asoc->peer.tsn_map;
975 while ((skb = skb_peek_tail(list)) != NULL) {
976 event = sctp_skb2event(skb);
979 /* Don't renege below the Cumulative TSN ACK Point. */
980 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
983 /* Events in ordering queue may have multiple fragments
984 * corresponding to additional TSNs. Sum the total
985 * freed space; find the last TSN.
987 freed += skb_headlen(skb);
988 flist = skb_shinfo(skb)->frag_list;
989 for (last = flist; flist; flist = flist->next) {
991 freed += skb_headlen(last);
994 last_tsn = sctp_skb2event(last)->tsn;
998 /* Unlink the event, then renege all applicable TSNs. */
999 __skb_unlink(skb, list);
1000 sctp_ulpevent_free(event);
1001 while (TSN_lte(tsn, last_tsn)) {
1002 sctp_tsnmap_renege(tsnmap, tsn);
1005 if (freed >= needed)
1012 /* Renege 'needed' bytes from the ordering queue. */
1013 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1015 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1018 /* Renege 'needed' bytes from the reassembly queue. */
1019 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1021 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1024 /* Partial deliver the first message as there is pressure on rwnd. */
1025 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1028 struct sctp_ulpevent *event;
1029 struct sctp_association *asoc;
1030 struct sctp_sock *sp;
1032 struct sk_buff *skb;
1035 sp = sctp_sk(asoc->base.sk);
1037 /* If the association is already in Partial Delivery mode
1038 * we have nothing to do.
1043 /* Data must be at or below the Cumulative TSN ACK Point to
1044 * start partial delivery.
1046 skb = skb_peek(&asoc->ulpq.reasm);
1048 ctsn = sctp_skb2event(skb)->tsn;
1049 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1053 /* If the user enabled fragment interleave socket option,
1054 * multiple associations can enter partial delivery.
1055 * Otherwise, we can only enter partial delivery if the
1056 * socket is not in partial deliver mode.
1058 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1059 /* Is partial delivery possible? */
1060 event = sctp_ulpq_retrieve_first(ulpq);
1061 /* Send event to the ULP. */
1063 struct sk_buff_head temp;
1065 skb_queue_head_init(&temp);
1066 __skb_queue_tail(&temp, sctp_event2skb(event));
1067 sctp_ulpq_tail_event(ulpq, &temp);
1068 sctp_ulpq_set_pd(ulpq);
1074 /* Renege some packets to make room for an incoming chunk. */
1075 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1078 struct sctp_association *asoc = ulpq->asoc;
1082 needed = ntohs(chunk->chunk_hdr->length) -
1083 sizeof(struct sctp_data_chunk);
1085 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1086 freed = sctp_ulpq_renege_order(ulpq, needed);
1088 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1090 /* If able to free enough room, accept this chunk. */
1091 if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1093 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1095 * Enter partial delivery if chunk has not been
1096 * delivered; otherwise, drain the reassembly queue.
1099 sctp_ulpq_partial_delivery(ulpq, gfp);
1100 else if (retval == 1)
1101 sctp_ulpq_reasm_drain(ulpq);
1104 sk_mem_reclaim(asoc->base.sk);
1109 /* Notify the application if an association is aborted and in
1110 * partial delivery mode. Send up any pending received messages.
1112 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1114 struct sctp_ulpevent *ev = NULL;
1115 struct sctp_sock *sp;
1121 sk = ulpq->asoc->base.sk;
1123 if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1124 SCTP_PARTIAL_DELIVERY_EVENT))
1125 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1126 SCTP_PARTIAL_DELIVERY_ABORTED,
1129 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1131 /* If there is data waiting, send it up the socket now. */
1132 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1133 sp->data_ready_signalled = 1;
1134 sk->sk_data_ready(sk);