1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/jiffies.h>
12 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/skbuff.h>
17 #include <linux/siphash.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
26 /* Stochastic Fairness Queuing algorithm.
27 =======================================
30 Paul E. McKenney "Stochastic Fairness Queuing",
31 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 "Interworking: Research and Experience", v.2, 1991, p.113-131.
38 M. Shreedhar and George Varghese "Efficient Fair
39 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
42 This is not the thing that is usually called (W)FQ nowadays.
43 It does not use any timestamp mechanism, but instead
44 processes queues in round-robin order.
48 - It is very cheap. Both CPU and memory requirements are minimal.
52 - "Stochastic" -> It is not 100% fair.
53 When hash collisions occur, several flows are considered as one.
55 - "Round-robin" -> It introduces larger delays than virtual clock
56 based schemes, and should not be used for isolating interactive
57 traffic from non-interactive. It means, that this scheduler
58 should be used as leaf of CBQ or P3, which put interactive traffic
59 to higher priority band.
61 We still need true WFQ for top level CSZ, but using WFQ
62 for the best effort traffic is absolutely pointless:
63 SFQ is superior for this purpose.
66 This implementation limits :
67 - maximal queue length per flow to 127 packets.
70 - number of hash buckets to 65536.
72 It is easy to increase these values, but not in flight. */
74 #define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
75 #define SFQ_DEFAULT_FLOWS 128
76 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
77 #define SFQ_EMPTY_SLOT 0xffff
78 #define SFQ_DEFAULT_HASH_DIVISOR 1024
80 /* We use 16 bits to store allot, and want to handle packets up to 64K
81 * Scale allot by 8 (1<<3) so that no overflow occurs.
83 #define SFQ_ALLOT_SHIFT 3
84 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
86 /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
87 typedef u16 sfq_index;
90 * We dont use pointers to save space.
91 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
92 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
93 * are 'pointers' to dep[] array
101 struct sk_buff *skblist_next;
102 struct sk_buff *skblist_prev;
103 sfq_index qlen; /* number of skbs in skblist */
104 sfq_index next; /* next slot in sfq RR chain */
105 struct sfq_head dep; /* anchor in dep[] chains */
106 unsigned short hash; /* hash value (index in ht[]) */
107 short allot; /* credit for this slot */
109 unsigned int backlog;
110 struct red_vars vars;
113 struct sfq_sched_data {
114 /* frequently used fields */
115 int limit; /* limit of total number of packets in this qdisc */
116 unsigned int divisor; /* number of slots in hash table */
118 u8 maxdepth; /* limit of packets per flow */
120 siphash_key_t perturbation;
121 u8 cur_depth; /* depth of longest slot */
123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
124 struct tcf_proto __rcu *filter_list;
125 struct tcf_block *block;
126 sfq_index *ht; /* Hash table ('divisor' slots) */
127 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
129 struct red_parms *red_parms;
130 struct tc_sfqred_stats stats;
131 struct sfq_slot *tail; /* current slot in round */
133 struct sfq_head dep[SFQ_MAX_DEPTH + 1];
134 /* Linked lists of slots, indexed by depth
135 * dep[0] : list of unused flows
136 * dep[1] : list of flows with 1 packet
137 * dep[X] : list of flows with X packets
140 unsigned int maxflows; /* number of flows in flows array */
142 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
143 struct timer_list perturb_timer;
148 * sfq_head are either in a sfq_slot or in dep[] array
150 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
152 if (val < SFQ_MAX_FLOWS)
153 return &q->slots[val].dep;
154 return &q->dep[val - SFQ_MAX_FLOWS];
157 static unsigned int sfq_hash(const struct sfq_sched_data *q,
158 const struct sk_buff *skb)
160 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
163 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
166 struct sfq_sched_data *q = qdisc_priv(sch);
167 struct tcf_result res;
168 struct tcf_proto *fl;
171 if (TC_H_MAJ(skb->priority) == sch->handle &&
172 TC_H_MIN(skb->priority) > 0 &&
173 TC_H_MIN(skb->priority) <= q->divisor)
174 return TC_H_MIN(skb->priority);
176 fl = rcu_dereference_bh(q->filter_list);
178 return sfq_hash(q, skb) + 1;
180 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
181 result = tcf_classify(skb, fl, &res, false);
183 #ifdef CONFIG_NET_CLS_ACT
188 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
194 if (TC_H_MIN(res.classid) <= q->divisor)
195 return TC_H_MIN(res.classid);
201 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
203 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
206 struct sfq_slot *slot = &q->slots[x];
207 int qlen = slot->qlen;
209 p = qlen + SFQ_MAX_FLOWS;
210 n = q->dep[qlen].next;
215 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
216 sfq_dep_head(q, n)->prev = x;
219 #define sfq_unlink(q, x, n, p) \
221 n = q->slots[x].dep.next; \
222 p = q->slots[x].dep.prev; \
223 sfq_dep_head(q, p)->next = n; \
224 sfq_dep_head(q, n)->prev = p; \
228 static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
233 sfq_unlink(q, x, n, p);
235 d = q->slots[x].qlen--;
236 if (n == p && q->cur_depth == d)
241 static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
246 sfq_unlink(q, x, n, p);
248 d = ++q->slots[x].qlen;
249 if (q->cur_depth < d)
254 /* helper functions : might be changed when/if skb use a standard list_head */
256 /* remove one skb from tail of slot queue */
257 static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
259 struct sk_buff *skb = slot->skblist_prev;
261 slot->skblist_prev = skb->prev;
262 skb->prev->next = (struct sk_buff *)slot;
263 skb->next = skb->prev = NULL;
267 /* remove one skb from head of slot queue */
268 static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
270 struct sk_buff *skb = slot->skblist_next;
272 slot->skblist_next = skb->next;
273 skb->next->prev = (struct sk_buff *)slot;
274 skb->next = skb->prev = NULL;
278 static inline void slot_queue_init(struct sfq_slot *slot)
280 memset(slot, 0, sizeof(*slot));
281 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
284 /* add skb to slot queue (tail add) */
285 static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
287 skb->prev = slot->skblist_prev;
288 skb->next = (struct sk_buff *)slot;
289 slot->skblist_prev->next = skb;
290 slot->skblist_prev = skb;
293 static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
295 struct sfq_sched_data *q = qdisc_priv(sch);
296 sfq_index x, d = q->cur_depth;
299 struct sfq_slot *slot;
301 /* Queue is full! Find the longest slot and drop tail packet from it */
306 skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
307 len = qdisc_pkt_len(skb);
308 slot->backlog -= len;
311 qdisc_qstats_backlog_dec(sch, skb);
312 qdisc_drop(skb, sch, to_free);
317 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
320 q->tail->next = slot->next;
321 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
328 /* Is ECN parameter configured */
329 static int sfq_prob_mark(const struct sfq_sched_data *q)
331 return q->flags & TC_RED_ECN;
334 /* Should packets over max threshold just be marked */
335 static int sfq_hard_mark(const struct sfq_sched_data *q)
337 return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
340 static int sfq_headdrop(const struct sfq_sched_data *q)
346 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
348 struct sfq_sched_data *q = qdisc_priv(sch);
349 unsigned int hash, dropped;
351 struct sfq_slot *slot;
352 int uninitialized_var(ret);
353 struct sk_buff *head;
356 hash = sfq_classify(skb, sch, &ret);
358 if (ret & __NET_XMIT_BYPASS)
359 qdisc_qstats_drop(sch);
360 __qdisc_drop(skb, to_free);
367 if (x == SFQ_EMPTY_SLOT) {
368 x = q->dep[0].next; /* get a free slot */
369 if (x >= SFQ_MAX_FLOWS)
370 return qdisc_drop(skb, sch, to_free);
374 slot->backlog = 0; /* should already be 0 anyway... */
375 red_set_vars(&slot->vars);
379 slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
382 switch (red_action(q->red_parms,
389 qdisc_qstats_overlimit(sch);
390 if (sfq_prob_mark(q)) {
391 /* We know we have at least one packet in queue */
392 if (sfq_headdrop(q) &&
393 INET_ECN_set_ce(slot->skblist_next)) {
394 q->stats.prob_mark_head++;
397 if (INET_ECN_set_ce(skb)) {
398 q->stats.prob_mark++;
402 q->stats.prob_drop++;
403 goto congestion_drop;
406 qdisc_qstats_overlimit(sch);
407 if (sfq_hard_mark(q)) {
408 /* We know we have at least one packet in queue */
409 if (sfq_headdrop(q) &&
410 INET_ECN_set_ce(slot->skblist_next)) {
411 q->stats.forced_mark_head++;
414 if (INET_ECN_set_ce(skb)) {
415 q->stats.forced_mark++;
419 q->stats.forced_drop++;
420 goto congestion_drop;
424 if (slot->qlen >= q->maxdepth) {
426 if (!sfq_headdrop(q))
427 return qdisc_drop(skb, sch, to_free);
429 /* We know we have at least one packet in queue */
430 head = slot_dequeue_head(slot);
431 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
432 sch->qstats.backlog -= delta;
433 slot->backlog -= delta;
434 qdisc_drop(head, sch, to_free);
436 slot_queue_add(slot, skb);
437 qdisc_tree_reduce_backlog(sch, 0, delta);
442 qdisc_qstats_backlog_inc(sch, skb);
443 slot->backlog += qdisc_pkt_len(skb);
444 slot_queue_add(slot, skb);
446 if (slot->qlen == 1) { /* The flow is new */
447 if (q->tail == NULL) { /* It is the first flow */
450 slot->next = q->tail->next;
453 /* We put this flow at the end of our flow list.
454 * This might sound unfair for a new flow to wait after old ones,
455 * but we could endup servicing new flows only, and freeze old ones.
458 /* We could use a bigger initial quantum for new flows */
459 slot->allot = q->scaled_quantum;
461 if (++sch->q.qlen <= q->limit)
462 return NET_XMIT_SUCCESS;
465 dropped = sfq_drop(sch, to_free);
466 /* Return Congestion Notification only if we dropped a packet
469 if (qlen != slot->qlen) {
470 qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
474 /* As we dropped a packet, better let upper stack know this */
475 qdisc_tree_reduce_backlog(sch, 1, dropped);
476 return NET_XMIT_SUCCESS;
479 static struct sk_buff *
480 sfq_dequeue(struct Qdisc *sch)
482 struct sfq_sched_data *q = qdisc_priv(sch);
485 struct sfq_slot *slot;
487 /* No active slots */
494 if (slot->allot <= 0) {
496 slot->allot += q->scaled_quantum;
499 skb = slot_dequeue_head(slot);
501 qdisc_bstats_update(sch, skb);
503 qdisc_qstats_backlog_dec(sch, skb);
504 slot->backlog -= qdisc_pkt_len(skb);
505 /* Is the slot empty? */
506 if (slot->qlen == 0) {
507 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
510 q->tail = NULL; /* no more active slots */
513 q->tail->next = next_a;
515 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
521 sfq_reset(struct Qdisc *sch)
525 while ((skb = sfq_dequeue(sch)) != NULL)
526 rtnl_kfree_skbs(skb, skb);
530 * When q->perturbation is changed, we rehash all queued skbs
531 * to avoid OOO (Out Of Order) effects.
532 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
535 static void sfq_rehash(struct Qdisc *sch)
537 struct sfq_sched_data *q = qdisc_priv(sch);
540 struct sfq_slot *slot;
541 struct sk_buff_head list;
543 unsigned int drop_len = 0;
545 __skb_queue_head_init(&list);
547 for (i = 0; i < q->maxflows; i++) {
552 skb = slot_dequeue_head(slot);
554 __skb_queue_tail(&list, skb);
557 red_set_vars(&slot->vars);
558 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
562 while ((skb = __skb_dequeue(&list)) != NULL) {
563 unsigned int hash = sfq_hash(q, skb);
564 sfq_index x = q->ht[hash];
567 if (x == SFQ_EMPTY_SLOT) {
568 x = q->dep[0].next; /* get a free slot */
569 if (x >= SFQ_MAX_FLOWS) {
571 qdisc_qstats_backlog_dec(sch, skb);
572 drop_len += qdisc_pkt_len(skb);
581 if (slot->qlen >= q->maxdepth)
583 slot_queue_add(slot, skb);
585 slot->vars.qavg = red_calc_qavg(q->red_parms,
588 slot->backlog += qdisc_pkt_len(skb);
590 if (slot->qlen == 1) { /* The flow is new */
591 if (q->tail == NULL) { /* It is the first flow */
594 slot->next = q->tail->next;
598 slot->allot = q->scaled_quantum;
601 sch->q.qlen -= dropped;
602 qdisc_tree_reduce_backlog(sch, dropped, drop_len);
605 static void sfq_perturbation(struct timer_list *t)
607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
608 struct Qdisc *sch = q->sch;
609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
612 get_random_bytes(&nkey, sizeof(nkey));
613 spin_lock(root_lock);
614 q->perturbation = nkey;
615 if (!q->filter_list && q->tail)
617 spin_unlock(root_lock);
619 if (q->perturb_period)
620 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
623 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
625 struct sfq_sched_data *q = qdisc_priv(sch);
626 struct tc_sfq_qopt *ctl = nla_data(opt);
627 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
628 unsigned int qlen, dropped = 0;
629 struct red_parms *p = NULL;
630 struct sk_buff *to_free = NULL;
631 struct sk_buff *tail = NULL;
633 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
635 if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
636 ctl_v1 = nla_data(opt);
638 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
641 /* slot->allot is a short, make sure quantum is not too big. */
643 unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
645 if (scaled <= 0 || scaled > SHRT_MAX)
649 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
652 if (ctl_v1 && ctl_v1->qth_min) {
653 p = kmalloc(sizeof(*p), GFP_KERNEL);
659 q->quantum = ctl->quantum;
660 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
662 q->perturb_period = ctl->perturb_period * HZ;
664 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
666 q->divisor = ctl->divisor;
667 q->maxflows = min_t(u32, q->maxflows, q->divisor);
671 q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
673 swap(q->red_parms, p);
674 red_set_parms(q->red_parms,
675 ctl_v1->qth_min, ctl_v1->qth_max,
677 ctl_v1->Plog, ctl_v1->Scell_log,
681 q->flags = ctl_v1->flags;
682 q->headdrop = ctl_v1->headdrop;
685 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
686 q->maxflows = min_t(u32, q->maxflows, q->limit);
690 while (sch->q.qlen > q->limit) {
691 dropped += sfq_drop(sch, &to_free);
696 rtnl_kfree_skbs(to_free, tail);
697 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
699 del_timer(&q->perturb_timer);
700 if (q->perturb_period) {
701 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
702 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
704 sch_tree_unlock(sch);
709 static void *sfq_alloc(size_t sz)
711 return kvmalloc(sz, GFP_KERNEL);
714 static void sfq_free(void *addr)
719 static void sfq_destroy(struct Qdisc *sch)
721 struct sfq_sched_data *q = qdisc_priv(sch);
723 tcf_block_put(q->block);
724 q->perturb_period = 0;
725 del_timer_sync(&q->perturb_timer);
731 static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
732 struct netlink_ext_ack *extack)
734 struct sfq_sched_data *q = qdisc_priv(sch);
739 timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
741 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
745 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
746 q->dep[i].next = i + SFQ_MAX_FLOWS;
747 q->dep[i].prev = i + SFQ_MAX_FLOWS;
750 q->limit = SFQ_MAX_DEPTH;
751 q->maxdepth = SFQ_MAX_DEPTH;
754 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
755 q->maxflows = SFQ_DEFAULT_FLOWS;
756 q->quantum = psched_mtu(qdisc_dev(sch));
757 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
758 q->perturb_period = 0;
759 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
762 int err = sfq_change(sch, opt);
767 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
768 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
769 if (!q->ht || !q->slots) {
770 /* Note: sfq_destroy() will be called by our caller */
774 for (i = 0; i < q->divisor; i++)
775 q->ht[i] = SFQ_EMPTY_SLOT;
777 for (i = 0; i < q->maxflows; i++) {
778 slot_queue_init(&q->slots[i]);
782 sch->flags |= TCQ_F_CAN_BYPASS;
784 sch->flags &= ~TCQ_F_CAN_BYPASS;
788 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
790 struct sfq_sched_data *q = qdisc_priv(sch);
791 unsigned char *b = skb_tail_pointer(skb);
792 struct tc_sfq_qopt_v1 opt;
793 struct red_parms *p = q->red_parms;
795 memset(&opt, 0, sizeof(opt));
796 opt.v0.quantum = q->quantum;
797 opt.v0.perturb_period = q->perturb_period / HZ;
798 opt.v0.limit = q->limit;
799 opt.v0.divisor = q->divisor;
800 opt.v0.flows = q->maxflows;
801 opt.depth = q->maxdepth;
802 opt.headdrop = q->headdrop;
805 opt.qth_min = p->qth_min >> p->Wlog;
806 opt.qth_max = p->qth_max >> p->Wlog;
809 opt.Scell_log = p->Scell_log;
810 opt.max_P = p->max_P;
812 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
813 opt.flags = q->flags;
815 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
816 goto nla_put_failure;
825 static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
830 static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
835 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
841 static void sfq_unbind(struct Qdisc *q, unsigned long cl)
845 static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
846 struct netlink_ext_ack *extack)
848 struct sfq_sched_data *q = qdisc_priv(sch);
855 static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
856 struct sk_buff *skb, struct tcmsg *tcm)
858 tcm->tcm_handle |= TC_H_MIN(cl);
862 static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
865 struct sfq_sched_data *q = qdisc_priv(sch);
866 sfq_index idx = q->ht[cl - 1];
867 struct gnet_stats_queue qs = { 0 };
868 struct tc_sfq_xstats xstats = { 0 };
870 if (idx != SFQ_EMPTY_SLOT) {
871 const struct sfq_slot *slot = &q->slots[idx];
873 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
874 qs.qlen = slot->qlen;
875 qs.backlog = slot->backlog;
877 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
879 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
882 static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
884 struct sfq_sched_data *q = qdisc_priv(sch);
890 for (i = 0; i < q->divisor; i++) {
891 if (q->ht[i] == SFQ_EMPTY_SLOT ||
892 arg->count < arg->skip) {
896 if (arg->fn(sch, i + 1, arg) < 0) {
904 static const struct Qdisc_class_ops sfq_class_ops = {
907 .tcf_block = sfq_tcf_block,
908 .bind_tcf = sfq_bind,
909 .unbind_tcf = sfq_unbind,
910 .dump = sfq_dump_class,
911 .dump_stats = sfq_dump_class_stats,
915 static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
916 .cl_ops = &sfq_class_ops,
918 .priv_size = sizeof(struct sfq_sched_data),
919 .enqueue = sfq_enqueue,
920 .dequeue = sfq_dequeue,
921 .peek = qdisc_peek_dequeued,
924 .destroy = sfq_destroy,
927 .owner = THIS_MODULE,
930 static int __init sfq_module_init(void)
932 return register_qdisc(&sfq_qdisc_ops);
934 static void __exit sfq_module_exit(void)
936 unregister_qdisc(&sfq_qdisc_ops);
938 module_init(sfq_module_init)
939 module_exit(sfq_module_exit)
940 MODULE_LICENSE("GPL");