sctp: add weighted fair queueing stream scheduler
authorXin Long <lucien.xin@gmail.com>
Tue, 7 Mar 2023 21:23:27 +0000 (16:23 -0500)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 9 Mar 2023 10:31:44 +0000 (11:31 +0100)
As it says in rfc8260#section-3.6 about the weighted fair queueing
scheduler:

   A Weighted Fair Queueing scheduler between the streams is used.  The
   weight is configurable per outgoing SCTP stream.  This scheduler
   considers the lengths of the messages of each stream and schedules
   them in a specific way to use the capacity according to the given
   weights.  If the weight of stream S1 is n times the weight of stream
   S2, the scheduler should assign to stream S1 n times the capacity it
   assigns to stream S2.  The details are implementation dependent.
   Interleaving user messages allows for a better realization of the
   capacity usage according to the given weights.

This patch adds Weighted Fair Queueing Scheduler actually based on
the code of Fair Capacity Scheduler by adding fc_weight into struct
sctp_stream_out_ext and taking it into account when sorting stream->
fc_list in sctp_sched_fc_sched() and sctp_sched_fc_dequeue_done().

Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
include/net/sctp/stream_sched.h
include/net/sctp/structs.h
include/uapi/linux/sctp.h
net/sctp/stream_sched.c
net/sctp/stream_sched_fc.c

index 913170710adb67169f34b1f8c7d33a1caa1657ce..572d73fdcd5eaf7f24e3c13bd52c69e2ec9d6f9e 100644 (file)
@@ -59,5 +59,6 @@ void sctp_sched_ops_register(enum sctp_sched_type sched,
 void sctp_sched_ops_prio_init(void);
 void sctp_sched_ops_rr_init(void);
 void sctp_sched_ops_fc_init(void);
+void sctp_sched_ops_wfq_init(void);
 
 #endif /* __sctp_stream_sched_h__ */
index 2f1c9f50b35232a941609d6390b6ffe93951fa4d..a0933efd93c3e20933f22a60cc19b1a3b8ecab39 100644 (file)
@@ -1432,6 +1432,7 @@ struct sctp_stream_out_ext {
                struct {
                        struct list_head fc_list;
                        __u32 fc_length;
+                       __u16 fc_weight;
                };
        };
 };
index 6814c5a1c4bcf7fe2eb4261a52db21cfc38464eb..b7d91d4cf0db5a171dcc4a6abed48a21c04b1c66 100644 (file)
@@ -1212,7 +1212,8 @@ enum sctp_sched_type {
        SCTP_SS_PRIO,
        SCTP_SS_RR,
        SCTP_SS_FC,
-       SCTP_SS_MAX = SCTP_SS_FC
+       SCTP_SS_WFQ,
+       SCTP_SS_MAX = SCTP_SS_WFQ
 };
 
 /* Probe Interval socket option */
index 1ebd14ef8daac7c1f1f97e13fa4f31b64ba725aa..e843760e9aaad3ec0383cdaf72c6c4b1507e9f04 100644 (file)
@@ -125,6 +125,7 @@ void sctp_sched_ops_init(void)
        sctp_sched_ops_prio_init();
        sctp_sched_ops_rr_init();
        sctp_sched_ops_fc_init();
+       sctp_sched_ops_wfq_init();
 }
 
 static void sctp_sched_free_sched(struct sctp_stream *stream)
index b336c2f5486b3c408df95ca8a90d456c9c3cf77f..4bd18a497a6dc60b2f34e0171d63d6048ce63a82 100644 (file)
 #include <net/sctp/sm.h>
 #include <net/sctp/stream_sched.h>
 
-/* Fair Capacity handling
- * RFC 8260 section 3.5
+/* Fair Capacity and Weighted Fair Queueing handling
+ * RFC 8260 section 3.5 and 3.6
  */
 static void sctp_sched_fc_unsched_all(struct sctp_stream *stream);
 
+static int sctp_sched_wfq_set(struct sctp_stream *stream, __u16 sid,
+                             __u16 weight, gfp_t gfp)
+{
+       struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext;
+
+       if (!weight)
+               return -EINVAL;
+
+       soute->fc_weight = weight;
+       return 0;
+}
+
+static int sctp_sched_wfq_get(struct sctp_stream *stream, __u16 sid,
+                             __u16 *value)
+{
+       struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext;
+
+       *value = soute->fc_weight;
+       return 0;
+}
+
 static int sctp_sched_fc_set(struct sctp_stream *stream, __u16 sid,
                             __u16 weight, gfp_t gfp)
 {
@@ -50,6 +71,7 @@ static int sctp_sched_fc_init_sid(struct sctp_stream *stream, __u16 sid,
 
        INIT_LIST_HEAD(&soute->fc_list);
        soute->fc_length = 0;
+       soute->fc_weight = 1;
 
        return 0;
 }
@@ -67,7 +89,8 @@ static void sctp_sched_fc_sched(struct sctp_stream *stream,
                return;
 
        list_for_each_entry(pos, &stream->fc_list, fc_list)
-               if (pos->fc_length >= soute->fc_length)
+               if ((__u64)pos->fc_length * soute->fc_weight >=
+                   (__u64)soute->fc_length * pos->fc_weight)
                        break;
        list_add_tail(&soute->fc_list, &pos->fc_list);
 }
@@ -137,7 +160,8 @@ static void sctp_sched_fc_dequeue_done(struct sctp_outq *q,
 
        pos = soute;
        list_for_each_entry_continue(pos, &stream->fc_list, fc_list)
-               if (pos->fc_length >= soute->fc_length)
+               if ((__u64)pos->fc_length * soute->fc_weight >=
+                   (__u64)soute->fc_length * pos->fc_weight)
                        break;
        list_move_tail(&soute->fc_list, &pos->fc_list);
 }
@@ -181,3 +205,21 @@ void sctp_sched_ops_fc_init(void)
 {
        sctp_sched_ops_register(SCTP_SS_FC, &sctp_sched_fc);
 }
+
+static struct sctp_sched_ops sctp_sched_wfq = {
+       .set = sctp_sched_wfq_set,
+       .get = sctp_sched_wfq_get,
+       .init = sctp_sched_fc_init,
+       .init_sid = sctp_sched_fc_init_sid,
+       .free_sid = sctp_sched_fc_free_sid,
+       .enqueue = sctp_sched_fc_enqueue,
+       .dequeue = sctp_sched_fc_dequeue,
+       .dequeue_done = sctp_sched_fc_dequeue_done,
+       .sched_all = sctp_sched_fc_sched_all,
+       .unsched_all = sctp_sched_fc_unsched_all,
+};
+
+void sctp_sched_ops_wfq_init(void)
+{
+       sctp_sched_ops_register(SCTP_SS_WFQ, &sctp_sched_wfq);
+}