1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* net/sched/sch_ingress.c - Ingress and clsact qdisc
4 * Authors: Jamal Hadi Salim 1999
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/list.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
17 struct ingress_sched_data {
18 struct tcf_block *block;
19 struct tcf_block_ext_info block_info;
20 struct mini_Qdisc_pair miniqp;
23 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
28 static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
30 return TC_H_MIN(classid) + 1;
33 static unsigned long ingress_bind_filter(struct Qdisc *sch,
34 unsigned long parent, u32 classid)
36 return ingress_find(sch, classid);
39 static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
43 static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
47 static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
48 struct netlink_ext_ack *extack)
50 struct ingress_sched_data *q = qdisc_priv(sch);
55 static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
57 struct mini_Qdisc_pair *miniqp = priv;
59 mini_qdisc_pair_swap(miniqp, tp_head);
62 static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
64 struct ingress_sched_data *q = qdisc_priv(sch);
66 q->block_info.block_index = block_index;
69 static u32 ingress_ingress_block_get(struct Qdisc *sch)
71 struct ingress_sched_data *q = qdisc_priv(sch);
73 return q->block_info.block_index;
76 static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
77 struct netlink_ext_ack *extack)
79 struct ingress_sched_data *q = qdisc_priv(sch);
80 struct net_device *dev = qdisc_dev(sch);
83 if (sch->parent != TC_H_INGRESS)
86 net_inc_ingress_queue();
88 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
90 q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
91 q->block_info.chain_head_change = clsact_chain_head_change;
92 q->block_info.chain_head_change_priv = &q->miniqp;
94 err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
98 mini_qdisc_pair_block_init(&q->miniqp, q->block);
103 static void ingress_destroy(struct Qdisc *sch)
105 struct ingress_sched_data *q = qdisc_priv(sch);
107 if (sch->parent != TC_H_INGRESS)
110 tcf_block_put_ext(q->block, sch, &q->block_info);
111 net_dec_ingress_queue();
114 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
118 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
120 goto nla_put_failure;
122 return nla_nest_end(skb, nest);
125 nla_nest_cancel(skb, nest);
129 static const struct Qdisc_class_ops ingress_class_ops = {
130 .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
131 .leaf = ingress_leaf,
132 .find = ingress_find,
133 .walk = ingress_walk,
134 .tcf_block = ingress_tcf_block,
135 .bind_tcf = ingress_bind_filter,
136 .unbind_tcf = ingress_unbind_filter,
139 static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
140 .cl_ops = &ingress_class_ops,
142 .priv_size = sizeof(struct ingress_sched_data),
143 .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
144 .init = ingress_init,
145 .destroy = ingress_destroy,
146 .dump = ingress_dump,
147 .ingress_block_set = ingress_ingress_block_set,
148 .ingress_block_get = ingress_ingress_block_get,
149 .owner = THIS_MODULE,
152 struct clsact_sched_data {
153 struct tcf_block *ingress_block;
154 struct tcf_block *egress_block;
155 struct tcf_block_ext_info ingress_block_info;
156 struct tcf_block_ext_info egress_block_info;
157 struct mini_Qdisc_pair miniqp_ingress;
158 struct mini_Qdisc_pair miniqp_egress;
161 static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
163 switch (TC_H_MIN(classid)) {
164 case TC_H_MIN(TC_H_MIN_INGRESS):
165 case TC_H_MIN(TC_H_MIN_EGRESS):
166 return TC_H_MIN(classid);
172 static unsigned long clsact_bind_filter(struct Qdisc *sch,
173 unsigned long parent, u32 classid)
175 return clsact_find(sch, classid);
178 static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
179 struct netlink_ext_ack *extack)
181 struct clsact_sched_data *q = qdisc_priv(sch);
184 case TC_H_MIN(TC_H_MIN_INGRESS):
185 return q->ingress_block;
186 case TC_H_MIN(TC_H_MIN_EGRESS):
187 return q->egress_block;
193 static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
195 struct clsact_sched_data *q = qdisc_priv(sch);
197 q->ingress_block_info.block_index = block_index;
200 static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
202 struct clsact_sched_data *q = qdisc_priv(sch);
204 q->egress_block_info.block_index = block_index;
207 static u32 clsact_ingress_block_get(struct Qdisc *sch)
209 struct clsact_sched_data *q = qdisc_priv(sch);
211 return q->ingress_block_info.block_index;
214 static u32 clsact_egress_block_get(struct Qdisc *sch)
216 struct clsact_sched_data *q = qdisc_priv(sch);
218 return q->egress_block_info.block_index;
221 static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
222 struct netlink_ext_ack *extack)
224 struct clsact_sched_data *q = qdisc_priv(sch);
225 struct net_device *dev = qdisc_dev(sch);
228 if (sch->parent != TC_H_CLSACT)
231 net_inc_ingress_queue();
232 net_inc_egress_queue();
234 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
236 q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
237 q->ingress_block_info.chain_head_change = clsact_chain_head_change;
238 q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
240 err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
245 mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
247 mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
249 q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
250 q->egress_block_info.chain_head_change = clsact_chain_head_change;
251 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
253 return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
256 static void clsact_destroy(struct Qdisc *sch)
258 struct clsact_sched_data *q = qdisc_priv(sch);
260 if (sch->parent != TC_H_CLSACT)
263 tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
264 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
266 net_dec_ingress_queue();
267 net_dec_egress_queue();
270 static const struct Qdisc_class_ops clsact_class_ops = {
271 .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
272 .leaf = ingress_leaf,
274 .walk = ingress_walk,
275 .tcf_block = clsact_tcf_block,
276 .bind_tcf = clsact_bind_filter,
277 .unbind_tcf = ingress_unbind_filter,
280 static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
281 .cl_ops = &clsact_class_ops,
283 .priv_size = sizeof(struct clsact_sched_data),
284 .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
286 .destroy = clsact_destroy,
287 .dump = ingress_dump,
288 .ingress_block_set = clsact_ingress_block_set,
289 .egress_block_set = clsact_egress_block_set,
290 .ingress_block_get = clsact_ingress_block_get,
291 .egress_block_get = clsact_egress_block_get,
292 .owner = THIS_MODULE,
295 static int __init ingress_module_init(void)
299 ret = register_qdisc(&ingress_qdisc_ops);
301 ret = register_qdisc(&clsact_qdisc_ops);
303 unregister_qdisc(&ingress_qdisc_ops);
309 static void __exit ingress_module_exit(void)
311 unregister_qdisc(&ingress_qdisc_ops);
312 unregister_qdisc(&clsact_qdisc_ops);
315 module_init(ingress_module_init);
316 module_exit(ingress_module_exit);
318 MODULE_ALIAS("sch_clsact");
319 MODULE_LICENSE("GPL");