1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright Red Hat Inc. 2017
5 * This file is part of the SCTP kernel implementation
7 * These functions manipulate sctp stream queue/scheduling.
9 * Please send any bug reports or fixes you make to the
10 * email addresched(es):
11 * lksctp developers <linux-sctp@vger.kernel.org>
13 * Written or modified by:
14 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
17 #include <linux/list.h>
18 #include <net/sctp/sctp.h>
19 #include <net/sctp/sm.h>
20 #include <net/sctp/stream_sched.h>
22 /* First Come First Serve (a.k.a. FIFO)
23 * RFC DRAFT ndata Section 3.1
25 static int sctp_sched_fcfs_set(struct sctp_stream *stream, __u16 sid,
26 __u16 value, gfp_t gfp)
31 static int sctp_sched_fcfs_get(struct sctp_stream *stream, __u16 sid,
38 static int sctp_sched_fcfs_init(struct sctp_stream *stream)
43 static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
49 static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid)
53 static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
54 struct sctp_datamsg *msg)
58 static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
60 struct sctp_stream *stream = &q->asoc->stream;
61 struct sctp_chunk *ch = NULL;
62 struct list_head *entry;
64 if (list_empty(&q->out_chunk_list))
67 if (stream->out_curr) {
68 ch = list_entry(stream->out_curr->ext->outq.next,
69 struct sctp_chunk, stream_list);
71 entry = q->out_chunk_list.next;
72 ch = list_entry(entry, struct sctp_chunk, list);
75 sctp_sched_dequeue_common(q, ch);
81 static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
82 struct sctp_chunk *chunk)
86 static void sctp_sched_fcfs_sched_all(struct sctp_stream *stream)
90 static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream)
94 static struct sctp_sched_ops sctp_sched_fcfs = {
95 .set = sctp_sched_fcfs_set,
96 .get = sctp_sched_fcfs_get,
97 .init = sctp_sched_fcfs_init,
98 .init_sid = sctp_sched_fcfs_init_sid,
99 .free_sid = sctp_sched_fcfs_free_sid,
100 .enqueue = sctp_sched_fcfs_enqueue,
101 .dequeue = sctp_sched_fcfs_dequeue,
102 .dequeue_done = sctp_sched_fcfs_dequeue_done,
103 .sched_all = sctp_sched_fcfs_sched_all,
104 .unsched_all = sctp_sched_fcfs_unsched_all,
107 static void sctp_sched_ops_fcfs_init(void)
109 sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
112 /* API to other parts of the stack */
114 static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
116 void sctp_sched_ops_register(enum sctp_sched_type sched,
117 struct sctp_sched_ops *sched_ops)
119 sctp_sched_ops[sched] = sched_ops;
122 void sctp_sched_ops_init(void)
124 sctp_sched_ops_fcfs_init();
125 sctp_sched_ops_prio_init();
126 sctp_sched_ops_rr_init();
127 sctp_sched_ops_fc_init();
128 sctp_sched_ops_wfq_init();
131 static void sctp_sched_free_sched(struct sctp_stream *stream)
133 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
134 struct sctp_stream_out_ext *soute;
137 sched->unsched_all(stream);
138 for (i = 0; i < stream->outcnt; i++) {
139 soute = SCTP_SO(stream, i)->ext;
142 sched->free_sid(stream, i);
143 /* Give the next scheduler a clean slate. */
144 memset_after(soute, 0, outq);
148 int sctp_sched_set_sched(struct sctp_association *asoc,
149 enum sctp_sched_type sched)
151 struct sctp_sched_ops *old = asoc->outqueue.sched;
152 struct sctp_datamsg *msg = NULL;
153 struct sctp_sched_ops *n;
154 struct sctp_chunk *ch;
157 if (sched > SCTP_SS_MAX)
160 n = sctp_sched_ops[sched];
165 sctp_sched_free_sched(&asoc->stream);
167 asoc->outqueue.sched = n;
168 n->init(&asoc->stream);
169 for (i = 0; i < asoc->stream.outcnt; i++) {
170 if (!SCTP_SO(&asoc->stream, i)->ext)
173 ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
178 /* We have to requeue all chunks already queued. */
179 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
183 n->enqueue(&asoc->outqueue, msg);
189 sctp_sched_free_sched(&asoc->stream);
190 asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
195 int sctp_sched_get_sched(struct sctp_association *asoc)
199 for (i = 0; i <= SCTP_SS_MAX; i++)
200 if (asoc->outqueue.sched == sctp_sched_ops[i])
206 int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
207 __u16 value, gfp_t gfp)
209 if (sid >= asoc->stream.outcnt)
212 if (!SCTP_SO(&asoc->stream, sid)->ext) {
215 ret = sctp_stream_init_ext(&asoc->stream, sid);
220 return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
223 int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
226 if (sid >= asoc->stream.outcnt)
229 if (!SCTP_SO(&asoc->stream, sid)->ext)
232 return asoc->outqueue.sched->get(&asoc->stream, sid, value);
235 void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
237 if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
238 !q->asoc->peer.intl_capable) {
239 struct sctp_stream_out *sout;
242 /* datamsg is not finish, so save it as current one,
243 * in case application switch scheduler or a higher
244 * priority stream comes in.
246 sid = sctp_chunk_stream_no(ch);
247 sout = SCTP_SO(&q->asoc->stream, sid);
248 q->asoc->stream.out_curr = sout;
252 q->asoc->stream.out_curr = NULL;
253 q->sched->dequeue_done(q, ch);
256 /* Auxiliary functions for the schedulers */
257 void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
259 list_del_init(&ch->list);
260 list_del_init(&ch->stream_list);
261 q->out_qlen -= ch->skb->len;
264 int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
266 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
267 struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext;
269 INIT_LIST_HEAD(&ext->outq);
270 return sched->init_sid(stream, sid, gfp);
273 struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream)
275 struct sctp_association *asoc;
277 asoc = container_of(stream, struct sctp_association, stream);
279 return asoc->outqueue.sched;