1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_tbf.c Token Bucket Filter queue.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
7 * original idea by Martin Devera
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <net/netlink.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_cls.h>
19 #include <net/pkt_sched.h>
22 /* Simple Token Bucket Filter.
23 =======================================
33 A data flow obeys TBF with rate R and depth B, if for any
34 time interval t_i...t_f the number of transmitted bits
35 does not exceed B + R*(t_f-t_i).
37 Packetized version of this definition:
38 The sequence of packets of sizes s_i served at moments t_i
39 obeys TBF, if for any i<=k:
41 s_i+....+s_k <= B + R*(t_k - t_i)
46 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
48 N(t+delta) = min{B/R, N(t) + delta}
50 If the first packet in queue has length S, it may be
51 transmitted only at the time t_* when S/R <= N(t_*),
52 and in this case N(t) jumps:
54 N(t_* + 0) = N(t_* - 0) - S/R.
58 Actually, QoS requires two TBF to be applied to a data stream.
59 One of them controls steady state burst size, another
60 one with rate P (peak rate) and depth M (equal to link MTU)
61 limits bursts at a smaller time scale.
63 It is easy to see that P>R, and B>M. If P is infinity, this double
64 TBF is equivalent to a single one.
66 When TBF works in reshaping mode, latency is estimated as:
68 lat = max ((L-B)/R, (L-M)/P)
74 If TBF throttles, it starts a watchdog timer, which will wake it up
75 when it is ready to transmit.
76 Note that the minimal timer resolution is 1/HZ.
77 If no new packets arrive during this period,
78 or if the device is not awaken by EOI for some previous packet,
79 TBF can stop its activity for 1/HZ.
82 This means, that with depth B, the maximal rate is
86 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
88 Note that the peak rate TBF is much more tough: with MTU 1500
89 P_crit = 150Kbytes/sec. So, if you need greater peak
90 rates, use alpha with HZ=1000 :-)
92 With classful TBF, limit is just kept for backwards compatibility.
93 It is passed to the default bfifo qdisc - if the inner qdisc is
94 changed the limit is not effective anymore.
97 struct tbf_sched_data {
99 u32 limit; /* Maximal length of backlog: bytes */
101 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
103 struct psched_ratecfg rate;
104 struct psched_ratecfg peak;
107 s64 tokens; /* Current number of B tokens */
108 s64 ptokens; /* Current number of P tokens */
109 s64 t_c; /* Time check-point */
110 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
111 struct qdisc_watchdog watchdog; /* Watchdog timer */
115 /* Time to Length, convert time in ns to length in bytes
116 * to determinate how many bytes can be sent in given time.
118 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
122 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
124 u64 len = time_in_ns * r->rate_bytes_ps;
126 do_div(len, NSEC_PER_SEC);
128 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 if (len > r->overhead)
141 static void tbf_offload_change(struct Qdisc *sch)
143 struct tbf_sched_data *q = qdisc_priv(sch);
144 struct net_device *dev = qdisc_dev(sch);
145 struct tc_tbf_qopt_offload qopt;
147 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
150 qopt.command = TC_TBF_REPLACE;
151 qopt.handle = sch->handle;
152 qopt.parent = sch->parent;
153 qopt.replace_params.rate = q->rate;
154 qopt.replace_params.max_size = q->max_size;
155 qopt.replace_params.qstats = &sch->qstats;
157 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
160 static void tbf_offload_destroy(struct Qdisc *sch)
162 struct net_device *dev = qdisc_dev(sch);
163 struct tc_tbf_qopt_offload qopt;
165 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
168 qopt.command = TC_TBF_DESTROY;
169 qopt.handle = sch->handle;
170 qopt.parent = sch->parent;
171 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
174 static int tbf_offload_dump(struct Qdisc *sch)
176 struct tc_tbf_qopt_offload qopt;
178 qopt.command = TC_TBF_STATS;
179 qopt.handle = sch->handle;
180 qopt.parent = sch->parent;
181 qopt.stats.bstats = &sch->bstats;
182 qopt.stats.qstats = &sch->qstats;
184 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_TBF, &qopt);
187 static void tbf_offload_graft(struct Qdisc *sch, struct Qdisc *new,
188 struct Qdisc *old, struct netlink_ext_ack *extack)
190 struct tc_tbf_qopt_offload graft_offload = {
191 .handle = sch->handle,
192 .parent = sch->parent,
193 .child_handle = new->handle,
194 .command = TC_TBF_GRAFT,
197 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
198 TC_SETUP_QDISC_TBF, &graft_offload, extack);
201 /* GSO packet is too big, segment it so that tbf can transmit
202 * each segment in time
204 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
205 struct sk_buff **to_free)
207 struct tbf_sched_data *q = qdisc_priv(sch);
208 struct sk_buff *segs, *nskb;
209 netdev_features_t features = netif_skb_features(skb);
210 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
213 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
215 if (IS_ERR_OR_NULL(segs))
216 return qdisc_drop(skb, sch, to_free);
219 skb_list_walk_safe(segs, segs, nskb) {
220 skb_mark_not_on_list(segs);
221 qdisc_skb_cb(segs)->pkt_len = segs->len;
223 ret = qdisc_enqueue(segs, q->qdisc, to_free);
224 if (ret != NET_XMIT_SUCCESS) {
225 if (net_xmit_drop_count(ret))
226 qdisc_qstats_drop(sch);
233 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
235 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
238 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
239 struct sk_buff **to_free)
241 struct tbf_sched_data *q = qdisc_priv(sch);
242 unsigned int len = qdisc_pkt_len(skb);
245 if (qdisc_pkt_len(skb) > q->max_size) {
246 if (skb_is_gso(skb) &&
247 skb_gso_validate_mac_len(skb, q->max_size))
248 return tbf_segment(skb, sch, to_free);
249 return qdisc_drop(skb, sch, to_free);
251 ret = qdisc_enqueue(skb, q->qdisc, to_free);
252 if (ret != NET_XMIT_SUCCESS) {
253 if (net_xmit_drop_count(ret))
254 qdisc_qstats_drop(sch);
258 sch->qstats.backlog += len;
260 return NET_XMIT_SUCCESS;
263 static bool tbf_peak_present(const struct tbf_sched_data *q)
265 return q->peak.rate_bytes_ps;
268 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
270 struct tbf_sched_data *q = qdisc_priv(sch);
273 skb = q->qdisc->ops->peek(q->qdisc);
279 unsigned int len = qdisc_pkt_len(skb);
281 now = ktime_get_ns();
282 toks = min_t(s64, now - q->t_c, q->buffer);
284 if (tbf_peak_present(q)) {
285 ptoks = toks + q->ptokens;
288 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
291 if (toks > q->buffer)
293 toks -= (s64) psched_l2t_ns(&q->rate, len);
295 if ((toks|ptoks) >= 0) {
296 skb = qdisc_dequeue_peeked(q->qdisc);
303 qdisc_qstats_backlog_dec(sch, skb);
305 qdisc_bstats_update(sch, skb);
309 qdisc_watchdog_schedule_ns(&q->watchdog,
310 now + max_t(long, -toks, -ptoks));
312 /* Maybe we have a shorter packet in the queue,
313 which can be sent now. It sounds cool,
314 but, however, this is wrong in principle.
315 We MUST NOT reorder packets under these circumstances.
317 Really, if we split the flow into independent
318 subflows, it would be a very good solution.
319 This is the main idea of all FQ algorithms
320 (cf. CSZ, HPFQ, HFSC)
323 qdisc_qstats_overlimit(sch);
328 static void tbf_reset(struct Qdisc *sch)
330 struct tbf_sched_data *q = qdisc_priv(sch);
332 qdisc_reset(q->qdisc);
333 q->t_c = ktime_get_ns();
334 q->tokens = q->buffer;
336 qdisc_watchdog_cancel(&q->watchdog);
339 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
340 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
341 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
342 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
343 [TCA_TBF_RATE64] = { .type = NLA_U64 },
344 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
345 [TCA_TBF_BURST] = { .type = NLA_U32 },
346 [TCA_TBF_PBURST] = { .type = NLA_U32 },
349 static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
350 struct netlink_ext_ack *extack)
353 struct tbf_sched_data *q = qdisc_priv(sch);
354 struct nlattr *tb[TCA_TBF_MAX + 1];
355 struct tc_tbf_qopt *qopt;
356 struct Qdisc *child = NULL;
357 struct Qdisc *old = NULL;
358 struct psched_ratecfg rate;
359 struct psched_ratecfg peak;
362 u64 rate64 = 0, prate64 = 0;
364 err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
370 if (tb[TCA_TBF_PARMS] == NULL)
373 qopt = nla_data(tb[TCA_TBF_PARMS]);
374 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
375 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
379 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
380 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
384 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
385 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
387 if (tb[TCA_TBF_RATE64])
388 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
389 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
391 if (tb[TCA_TBF_BURST]) {
392 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
393 buffer = psched_l2t_ns(&rate, max_size);
395 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
398 if (qopt->peakrate.rate) {
399 if (tb[TCA_TBF_PRATE64])
400 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
401 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
402 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
403 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
404 peak.rate_bytes_ps, rate.rate_bytes_ps);
409 if (tb[TCA_TBF_PBURST]) {
410 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
411 max_size = min_t(u32, max_size, pburst);
412 mtu = psched_l2t_ns(&peak, pburst);
414 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
417 memset(&peak, 0, sizeof(peak));
420 if (max_size < psched_mtu(qdisc_dev(sch)))
421 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
422 max_size, qdisc_dev(sch)->name,
423 psched_mtu(qdisc_dev(sch)));
430 if (q->qdisc != &noop_qdisc) {
431 err = fifo_set_limit(q->qdisc, qopt->limit);
434 } else if (qopt->limit > 0) {
435 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
438 err = PTR_ERR(child);
442 /* child is fifo, no need to check for noop_qdisc */
443 qdisc_hash_add(child, true);
448 qdisc_tree_flush_backlog(q->qdisc);
452 q->limit = qopt->limit;
453 if (tb[TCA_TBF_PBURST])
456 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
457 q->max_size = max_size;
458 if (tb[TCA_TBF_BURST])
461 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
462 q->tokens = q->buffer;
465 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
466 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
468 sch_tree_unlock(sch);
472 tbf_offload_change(sch);
477 static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
478 struct netlink_ext_ack *extack)
480 struct tbf_sched_data *q = qdisc_priv(sch);
482 qdisc_watchdog_init(&q->watchdog, sch);
483 q->qdisc = &noop_qdisc;
488 q->t_c = ktime_get_ns();
490 return tbf_change(sch, opt, extack);
493 static void tbf_destroy(struct Qdisc *sch)
495 struct tbf_sched_data *q = qdisc_priv(sch);
497 qdisc_watchdog_cancel(&q->watchdog);
498 tbf_offload_destroy(sch);
502 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
504 struct tbf_sched_data *q = qdisc_priv(sch);
506 struct tc_tbf_qopt opt;
509 err = tbf_offload_dump(sch);
513 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
515 goto nla_put_failure;
517 opt.limit = q->limit;
518 psched_ratecfg_getrate(&opt.rate, &q->rate);
519 if (tbf_peak_present(q))
520 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
522 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
523 opt.mtu = PSCHED_NS2TICKS(q->mtu);
524 opt.buffer = PSCHED_NS2TICKS(q->buffer);
525 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
526 goto nla_put_failure;
527 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
528 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
530 goto nla_put_failure;
531 if (tbf_peak_present(q) &&
532 q->peak.rate_bytes_ps >= (1ULL << 32) &&
533 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
535 goto nla_put_failure;
537 return nla_nest_end(skb, nest);
540 nla_nest_cancel(skb, nest);
544 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
545 struct sk_buff *skb, struct tcmsg *tcm)
547 struct tbf_sched_data *q = qdisc_priv(sch);
549 tcm->tcm_handle |= TC_H_MIN(1);
550 tcm->tcm_info = q->qdisc->handle;
555 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
556 struct Qdisc **old, struct netlink_ext_ack *extack)
558 struct tbf_sched_data *q = qdisc_priv(sch);
563 *old = qdisc_replace(sch, new, &q->qdisc);
565 tbf_offload_graft(sch, new, *old, extack);
569 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
571 struct tbf_sched_data *q = qdisc_priv(sch);
575 static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
580 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
583 tc_qdisc_stats_dump(sch, 1, walker);
587 static const struct Qdisc_class_ops tbf_class_ops = {
592 .dump = tbf_dump_class,
595 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
597 .cl_ops = &tbf_class_ops,
599 .priv_size = sizeof(struct tbf_sched_data),
600 .enqueue = tbf_enqueue,
601 .dequeue = tbf_dequeue,
602 .peek = qdisc_peek_dequeued,
605 .destroy = tbf_destroy,
606 .change = tbf_change,
608 .owner = THIS_MODULE,
611 static int __init tbf_module_init(void)
613 return register_qdisc(&tbf_qdisc_ops);
616 static void __exit tbf_module_exit(void)
618 unregister_qdisc(&tbf_qdisc_ops);
620 module_init(tbf_module_init)
621 module_exit(tbf_module_exit)
622 MODULE_LICENSE("GPL");