1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_police.c Input police filter
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * J Hadi Salim (action changes)
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <net/act_api.h>
20 #include <net/netlink.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_police.h>
23 #include <net/tc_wrapper.h>
25 /* Each policer is serialized by its individual spinlock */
27 static struct tc_action_ops act_police_ops;
29 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
30 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
31 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
32 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
33 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
34 [TCA_POLICE_RATE64] = { .type = NLA_U64 },
35 [TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 },
36 [TCA_POLICE_PKTRATE64] = { .type = NLA_U64, .min = 1 },
37 [TCA_POLICE_PKTBURST64] = { .type = NLA_U64, .min = 1 },
40 static int tcf_police_init(struct net *net, struct nlattr *nla,
41 struct nlattr *est, struct tc_action **a,
42 struct tcf_proto *tp, u32 flags,
43 struct netlink_ext_ack *extack)
45 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
46 bool bind = flags & TCA_ACT_FLAGS_BIND;
47 struct nlattr *tb[TCA_POLICE_MAX + 1];
48 struct tcf_chain *goto_ch = NULL;
49 struct tc_police *parm;
50 struct tcf_police *police;
51 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
52 struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
53 struct tcf_police_params *new;
62 err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
67 if (tb[TCA_POLICE_TBF] == NULL)
69 size = nla_len(tb[TCA_POLICE_TBF]);
70 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
73 parm = nla_data(tb[TCA_POLICE_TBF]);
75 err = tcf_idr_check_alloc(tn, &index, a, bind);
83 ret = tcf_idr_create(tn, index, NULL, a,
84 &act_police_ops, bind, true, flags);
86 tcf_idr_cleanup(tn, index);
90 spin_lock_init(&(to_police(*a)->tcfp_lock));
91 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
92 tcf_idr_release(*a, bind);
95 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
99 police = to_police(*a);
100 if (parm->rate.rate) {
102 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
106 if (parm->peakrate.rate) {
107 P_tab = qdisc_get_rtab(&parm->peakrate,
108 tb[TCA_POLICE_PEAKRATE], NULL);
115 err = gen_replace_estimator(&police->tcf_bstats,
116 police->common.cpu_bstats,
117 &police->tcf_rate_est,
122 } else if (tb[TCA_POLICE_AVRATE] &&
123 (ret == ACT_P_CREATED ||
124 !gen_estimator_active(&police->tcf_rate_est))) {
129 if (tb[TCA_POLICE_RESULT]) {
130 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
131 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
132 NL_SET_ERR_MSG(extack,
133 "goto chain not allowed on fallback");
139 if ((tb[TCA_POLICE_PKTRATE64] && !tb[TCA_POLICE_PKTBURST64]) ||
140 (!tb[TCA_POLICE_PKTRATE64] && tb[TCA_POLICE_PKTBURST64])) {
141 NL_SET_ERR_MSG(extack,
142 "Both or neither packet-per-second burst and rate must be provided");
147 if (tb[TCA_POLICE_PKTRATE64] && R_tab) {
148 NL_SET_ERR_MSG(extack,
149 "packet-per-second and byte-per-second rate limits not allowed in same action");
154 new = kzalloc(sizeof(*new), GFP_KERNEL);
155 if (unlikely(!new)) {
160 /* No failure allowed after this point */
161 new->tcfp_result = tcfp_result;
162 new->tcfp_mtu = parm->mtu;
163 if (!new->tcfp_mtu) {
166 new->tcfp_mtu = 255 << R_tab->rate.cell_log;
169 new->rate_present = true;
170 rate64 = tb[TCA_POLICE_RATE64] ?
171 nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
172 psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64);
173 qdisc_put_rtab(R_tab);
175 new->rate_present = false;
178 new->peak_present = true;
179 prate64 = tb[TCA_POLICE_PEAKRATE64] ?
180 nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
181 psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64);
182 qdisc_put_rtab(P_tab);
184 new->peak_present = false;
187 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
188 if (new->peak_present)
189 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
192 if (tb[TCA_POLICE_AVRATE])
193 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
195 if (tb[TCA_POLICE_PKTRATE64]) {
196 pps = nla_get_u64(tb[TCA_POLICE_PKTRATE64]);
197 ppsburst = nla_get_u64(tb[TCA_POLICE_PKTBURST64]);
198 new->pps_present = true;
199 new->tcfp_pkt_burst = PSCHED_TICKS2NS(ppsburst);
200 psched_ppscfg_precompute(&new->ppsrate, pps);
203 spin_lock_bh(&police->tcf_lock);
204 spin_lock_bh(&police->tcfp_lock);
205 police->tcfp_t_c = ktime_get_ns();
206 police->tcfp_toks = new->tcfp_burst;
207 if (new->peak_present)
208 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
209 spin_unlock_bh(&police->tcfp_lock);
210 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
211 new = rcu_replace_pointer(police->params,
213 lockdep_is_held(&police->tcf_lock));
214 spin_unlock_bh(&police->tcf_lock);
217 tcf_chain_put_by_act(goto_ch);
224 qdisc_put_rtab(P_tab);
225 qdisc_put_rtab(R_tab);
227 tcf_chain_put_by_act(goto_ch);
229 tcf_idr_release(*a, bind);
233 static bool tcf_police_mtu_check(struct sk_buff *skb, u32 limit)
238 return skb_gso_validate_mac_len(skb, limit);
240 len = qdisc_pkt_len(skb);
241 if (skb_at_tc_ingress(skb))
247 TC_INDIRECT_SCOPE int tcf_police_act(struct sk_buff *skb,
248 const struct tc_action *a,
249 struct tcf_result *res)
251 struct tcf_police *police = to_police(a);
252 s64 now, toks, ppstoks = 0, ptoks = 0;
253 struct tcf_police_params *p;
256 tcf_lastuse_update(&police->tcf_tm);
257 bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
259 ret = READ_ONCE(police->tcf_action);
260 p = rcu_dereference_bh(police->params);
262 if (p->tcfp_ewma_rate) {
263 struct gnet_stats_rate_est64 sample;
265 if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
266 sample.bps >= p->tcfp_ewma_rate)
270 if (tcf_police_mtu_check(skb, p->tcfp_mtu)) {
271 if (!p->rate_present && !p->pps_present) {
272 ret = p->tcfp_result;
276 now = ktime_get_ns();
277 spin_lock_bh(&police->tcfp_lock);
278 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
279 if (p->peak_present) {
280 ptoks = toks + police->tcfp_ptoks;
281 if (ptoks > p->tcfp_mtu_ptoks)
282 ptoks = p->tcfp_mtu_ptoks;
283 ptoks -= (s64)psched_l2t_ns(&p->peak,
286 if (p->rate_present) {
287 toks += police->tcfp_toks;
288 if (toks > p->tcfp_burst)
289 toks = p->tcfp_burst;
290 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
291 } else if (p->pps_present) {
292 ppstoks = min_t(s64, now - police->tcfp_t_c, p->tcfp_pkt_burst);
293 ppstoks += police->tcfp_pkttoks;
294 if (ppstoks > p->tcfp_pkt_burst)
295 ppstoks = p->tcfp_pkt_burst;
296 ppstoks -= (s64)psched_pkt2t_ns(&p->ppsrate, 1);
298 if ((toks | ptoks | ppstoks) >= 0) {
299 police->tcfp_t_c = now;
300 police->tcfp_toks = toks;
301 police->tcfp_ptoks = ptoks;
302 police->tcfp_pkttoks = ppstoks;
303 spin_unlock_bh(&police->tcfp_lock);
304 ret = p->tcfp_result;
307 spin_unlock_bh(&police->tcfp_lock);
311 qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
313 if (ret == TC_ACT_SHOT)
314 qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
319 static void tcf_police_cleanup(struct tc_action *a)
321 struct tcf_police *police = to_police(a);
322 struct tcf_police_params *p;
324 p = rcu_dereference_protected(police->params, 1);
329 static void tcf_police_stats_update(struct tc_action *a,
330 u64 bytes, u64 packets, u64 drops,
331 u64 lastuse, bool hw)
333 struct tcf_police *police = to_police(a);
334 struct tcf_t *tm = &police->tcf_tm;
336 tcf_action_update_stats(a, bytes, packets, drops, hw);
337 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
340 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
343 unsigned char *b = skb_tail_pointer(skb);
344 struct tcf_police *police = to_police(a);
345 struct tcf_police_params *p;
346 struct tc_police opt = {
347 .index = police->tcf_index,
348 .refcnt = refcount_read(&police->tcf_refcnt) - ref,
349 .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
353 spin_lock_bh(&police->tcf_lock);
354 opt.action = police->tcf_action;
355 p = rcu_dereference_protected(police->params,
356 lockdep_is_held(&police->tcf_lock));
357 opt.mtu = p->tcfp_mtu;
358 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
359 if (p->rate_present) {
360 psched_ratecfg_getrate(&opt.rate, &p->rate);
361 if ((p->rate.rate_bytes_ps >= (1ULL << 32)) &&
362 nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
363 p->rate.rate_bytes_ps,
365 goto nla_put_failure;
367 if (p->peak_present) {
368 psched_ratecfg_getrate(&opt.peakrate, &p->peak);
369 if ((p->peak.rate_bytes_ps >= (1ULL << 32)) &&
370 nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
371 p->peak.rate_bytes_ps,
373 goto nla_put_failure;
375 if (p->pps_present) {
376 if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
377 p->ppsrate.rate_pkts_ps,
379 goto nla_put_failure;
380 if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
381 PSCHED_NS2TICKS(p->tcfp_pkt_burst),
383 goto nla_put_failure;
385 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
386 goto nla_put_failure;
387 if (p->tcfp_result &&
388 nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
389 goto nla_put_failure;
390 if (p->tcfp_ewma_rate &&
391 nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
392 goto nla_put_failure;
394 tcf_tm_dump(&t, &police->tcf_tm);
395 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
396 goto nla_put_failure;
397 spin_unlock_bh(&police->tcf_lock);
402 spin_unlock_bh(&police->tcf_lock);
407 static int tcf_police_act_to_flow_act(int tc_act, u32 *extval,
408 struct netlink_ext_ack *extack)
410 int act_id = -EOPNOTSUPP;
412 if (!TC_ACT_EXT_OPCODE(tc_act)) {
413 if (tc_act == TC_ACT_OK)
414 act_id = FLOW_ACTION_ACCEPT;
415 else if (tc_act == TC_ACT_SHOT)
416 act_id = FLOW_ACTION_DROP;
417 else if (tc_act == TC_ACT_PIPE)
418 act_id = FLOW_ACTION_PIPE;
419 else if (tc_act == TC_ACT_RECLASSIFY)
420 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"reclassify\"");
422 NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
423 } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_GOTO_CHAIN)) {
424 act_id = FLOW_ACTION_GOTO;
425 *extval = tc_act & TC_ACT_EXT_VAL_MASK;
426 } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_JUMP)) {
427 act_id = FLOW_ACTION_JUMP;
428 *extval = tc_act & TC_ACT_EXT_VAL_MASK;
429 } else if (tc_act == TC_ACT_UNSPEC) {
430 act_id = FLOW_ACTION_CONTINUE;
432 NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
438 static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data,
439 u32 *index_inc, bool bind,
440 struct netlink_ext_ack *extack)
443 struct flow_action_entry *entry = entry_data;
444 struct tcf_police *police = to_police(act);
445 struct tcf_police_params *p;
448 p = rcu_dereference_protected(police->params,
449 lockdep_is_held(&police->tcf_lock));
451 entry->id = FLOW_ACTION_POLICE;
452 entry->police.burst = tcf_police_burst(act);
453 entry->police.rate_bytes_ps =
454 tcf_police_rate_bytes_ps(act);
455 entry->police.peakrate_bytes_ps = tcf_police_peakrate_bytes_ps(act);
456 entry->police.avrate = tcf_police_tcfp_ewma_rate(act);
457 entry->police.overhead = tcf_police_rate_overhead(act);
458 entry->police.burst_pkt = tcf_police_burst_pkt(act);
459 entry->police.rate_pkt_ps =
460 tcf_police_rate_pkt_ps(act);
461 entry->police.mtu = tcf_police_tcfp_mtu(act);
463 act_id = tcf_police_act_to_flow_act(police->tcf_action,
464 &entry->police.exceed.extval,
469 entry->police.exceed.act_id = act_id;
471 act_id = tcf_police_act_to_flow_act(p->tcfp_result,
472 &entry->police.notexceed.extval,
477 entry->police.notexceed.act_id = act_id;
481 struct flow_offload_action *fl_action = entry_data;
483 fl_action->id = FLOW_ACTION_POLICE;
489 MODULE_AUTHOR("Alexey Kuznetsov");
490 MODULE_DESCRIPTION("Policing actions");
491 MODULE_LICENSE("GPL");
493 static struct tc_action_ops act_police_ops = {
496 .owner = THIS_MODULE,
497 .stats_update = tcf_police_stats_update,
498 .act = tcf_police_act,
499 .dump = tcf_police_dump,
500 .init = tcf_police_init,
501 .cleanup = tcf_police_cleanup,
502 .offload_act_setup = tcf_police_offload_act_setup,
503 .size = sizeof(struct tcf_police),
506 static __net_init int police_init_net(struct net *net)
508 struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
510 return tc_action_net_init(net, tn, &act_police_ops);
513 static void __net_exit police_exit_net(struct list_head *net_list)
515 tc_action_net_exit(net_list, act_police_ops.net_id);
518 static struct pernet_operations police_net_ops = {
519 .init = police_init_net,
520 .exit_batch = police_exit_net,
521 .id = &act_police_ops.net_id,
522 .size = sizeof(struct tc_action_net),
525 static int __init police_init_module(void)
527 return tcf_register_action(&act_police_ops, &police_net_ops);
530 static void __exit police_cleanup_module(void)
532 tcf_unregister_action(&act_police_ops, &police_net_ops);
535 module_init(police_init_module);
536 module_exit(police_cleanup_module);