2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
16 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/skbuff.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
30 struct multiq_sched_data {
34 struct tcf_proto __rcu *filter_list;
35 struct tcf_block *block;
36 struct Qdisc **queues;
41 multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
43 struct multiq_sched_data *q = qdisc_priv(sch);
45 struct tcf_result res;
46 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
49 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
50 err = tcf_classify(skb, fl, &res, false);
51 #ifdef CONFIG_NET_CLS_ACT
56 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
61 band = skb_get_queue_mapping(skb);
66 return q->queues[band];
70 multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
71 struct sk_buff **to_free)
76 qdisc = multiq_classify(skb, sch, &ret);
77 #ifdef CONFIG_NET_CLS_ACT
80 if (ret & __NET_XMIT_BYPASS)
81 qdisc_qstats_drop(sch);
82 __qdisc_drop(skb, to_free);
87 ret = qdisc_enqueue(skb, qdisc, to_free);
88 if (ret == NET_XMIT_SUCCESS) {
90 return NET_XMIT_SUCCESS;
92 if (net_xmit_drop_count(ret))
93 qdisc_qstats_drop(sch);
97 static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
99 struct multiq_sched_data *q = qdisc_priv(sch);
104 for (band = 0; band < q->bands; band++) {
105 /* cycle through bands to ensure fairness */
107 if (q->curband >= q->bands)
110 /* Check that target subqueue is available before
111 * pulling an skb to avoid head-of-line blocking.
113 if (!netif_xmit_stopped(
114 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
115 qdisc = q->queues[q->curband];
116 skb = qdisc->dequeue(qdisc);
118 qdisc_bstats_update(sch, skb);
128 static struct sk_buff *multiq_peek(struct Qdisc *sch)
130 struct multiq_sched_data *q = qdisc_priv(sch);
131 unsigned int curband = q->curband;
136 for (band = 0; band < q->bands; band++) {
137 /* cycle through bands to ensure fairness */
139 if (curband >= q->bands)
142 /* Check that target subqueue is available before
143 * pulling an skb to avoid head-of-line blocking.
145 if (!netif_xmit_stopped(
146 netdev_get_tx_queue(qdisc_dev(sch), curband))) {
147 qdisc = q->queues[curband];
148 skb = qdisc->ops->peek(qdisc);
158 multiq_reset(struct Qdisc *sch)
161 struct multiq_sched_data *q = qdisc_priv(sch);
163 for (band = 0; band < q->bands; band++)
164 qdisc_reset(q->queues[band]);
170 multiq_destroy(struct Qdisc *sch)
173 struct multiq_sched_data *q = qdisc_priv(sch);
175 tcf_block_put(q->block);
176 for (band = 0; band < q->bands; band++)
177 qdisc_destroy(q->queues[band]);
182 static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
184 struct multiq_sched_data *q = qdisc_priv(sch);
185 struct tc_multiq_qopt *qopt;
188 if (!netif_is_multiqueue(qdisc_dev(sch)))
190 if (nla_len(opt) < sizeof(*qopt))
193 qopt = nla_data(opt);
195 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
198 q->bands = qopt->bands;
199 for (i = q->bands; i < q->max_bands; i++) {
200 if (q->queues[i] != &noop_qdisc) {
201 struct Qdisc *child = q->queues[i];
202 q->queues[i] = &noop_qdisc;
203 qdisc_tree_reduce_backlog(child, child->q.qlen,
204 child->qstats.backlog);
205 qdisc_destroy(child);
209 sch_tree_unlock(sch);
211 for (i = 0; i < q->bands; i++) {
212 if (q->queues[i] == &noop_qdisc) {
213 struct Qdisc *child, *old;
214 child = qdisc_create_dflt(sch->dev_queue,
216 TC_H_MAKE(sch->handle,
221 q->queues[i] = child;
222 if (child != &noop_qdisc)
223 qdisc_hash_add(child, true);
225 if (old != &noop_qdisc) {
226 qdisc_tree_reduce_backlog(old,
228 old->qstats.backlog);
231 sch_tree_unlock(sch);
238 static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
240 struct multiq_sched_data *q = qdisc_priv(sch);
248 err = tcf_block_get(&q->block, &q->filter_list);
252 q->max_bands = qdisc_dev(sch)->num_tx_queues;
254 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
257 for (i = 0; i < q->max_bands; i++)
258 q->queues[i] = &noop_qdisc;
260 err = multiq_tune(sch, opt);
268 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
270 struct multiq_sched_data *q = qdisc_priv(sch);
271 unsigned char *b = skb_tail_pointer(skb);
272 struct tc_multiq_qopt opt;
274 opt.bands = q->bands;
275 opt.max_bands = q->max_bands;
277 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
278 goto nla_put_failure;
287 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
290 struct multiq_sched_data *q = qdisc_priv(sch);
291 unsigned long band = arg - 1;
296 *old = qdisc_replace(sch, new, &q->queues[band]);
300 static struct Qdisc *
301 multiq_leaf(struct Qdisc *sch, unsigned long arg)
303 struct multiq_sched_data *q = qdisc_priv(sch);
304 unsigned long band = arg - 1;
306 return q->queues[band];
309 static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
311 struct multiq_sched_data *q = qdisc_priv(sch);
312 unsigned long band = TC_H_MIN(classid);
314 if (band - 1 >= q->bands)
319 static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
322 return multiq_get(sch, classid);
326 static void multiq_put(struct Qdisc *q, unsigned long cl)
330 static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
331 struct sk_buff *skb, struct tcmsg *tcm)
333 struct multiq_sched_data *q = qdisc_priv(sch);
335 tcm->tcm_handle |= TC_H_MIN(cl);
336 tcm->tcm_info = q->queues[cl - 1]->handle;
340 static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
343 struct multiq_sched_data *q = qdisc_priv(sch);
346 cl_q = q->queues[cl - 1];
347 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
348 d, NULL, &cl_q->bstats) < 0 ||
349 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
355 static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
357 struct multiq_sched_data *q = qdisc_priv(sch);
363 for (band = 0; band < q->bands; band++) {
364 if (arg->count < arg->skip) {
368 if (arg->fn(sch, band + 1, arg) < 0) {
376 static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
378 struct multiq_sched_data *q = qdisc_priv(sch);
385 static const struct Qdisc_class_ops multiq_class_ops = {
386 .graft = multiq_graft,
391 .tcf_block = multiq_tcf_block,
392 .bind_tcf = multiq_bind,
393 .unbind_tcf = multiq_put,
394 .dump = multiq_dump_class,
395 .dump_stats = multiq_dump_class_stats,
398 static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
400 .cl_ops = &multiq_class_ops,
402 .priv_size = sizeof(struct multiq_sched_data),
403 .enqueue = multiq_enqueue,
404 .dequeue = multiq_dequeue,
407 .reset = multiq_reset,
408 .destroy = multiq_destroy,
409 .change = multiq_tune,
411 .owner = THIS_MODULE,
414 static int __init multiq_module_init(void)
416 return register_qdisc(&multiq_qdisc_ops);
419 static void __exit multiq_module_exit(void)
421 unregister_qdisc(&multiq_qdisc_ops);
424 module_init(multiq_module_init)
425 module_exit(multiq_module_exit)
427 MODULE_LICENSE("GPL");