1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 Comparing to general packet classification problem,
10 RSVP needs only several relatively simple rules:
12 * (dst, protocol) are always specified,
13 so that we are able to hash them.
14 * src may be exact, or may be wildcard, so that
15 we can keep a hash table plus one wildcard entry.
16 * source port (or flow label) is important only if src is given.
20 We use a two level hash table: The top level is keyed by
21 destination address and protocol ID, every bucket contains a list
22 of "rsvp sessions", identified by destination address, protocol and
23 DPI(="Destination Port ID"): triple (key, mask, offset).
25 Every bucket has a smaller hash table keyed by source address
26 (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
27 Every bucket is again a list of "RSVP flows", selected by
28 source address and SPI(="Source Port ID" here rather than
29 "security parameter index"): triple (key, mask, offset).
32 NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
33 and all fragmented packets go to the best-effort traffic class.
36 NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
37 only one "Generalized Port Identifier". So that for classic
38 ah, esp (and udp,tcp) both *pi should coincide or one of them
41 At first sight, this redundancy is just a waste of CPU
42 resources. But DPI and SPI add the possibility to assign different
43 priorities to GPIs. Look also at note 4 about tunnels below.
46 NOTE 3. One complication is the case of tunneled packets.
47 We implement it as following: if the first lookup
48 matches a special session with "tunnelhdr" value not zero,
49 flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
50 In this case, we pull tunnelhdr bytes and restart lookup
51 with tunnel ID added to the list of keys. Simple and stupid 8)8)
52 It's enough for PIMREG and IPIP.
55 NOTE 4. Two GPIs make it possible to parse even GRE packets.
56 F.e. DPI can select ETH_P_IP (and necessary flags to make
57 tunnelhdr correct) in GRE protocol field and SPI matches
58 GRE key. Is it not nice? 8)8)
61 Well, as result, despite its simplicity, we get a pretty
62 powerful classification engine. */
69 struct rsvp_session __rcu *ht[256];
74 struct rsvp_session __rcu *next;
75 __be32 dst[RSVP_DST_LEN];
76 struct tc_rsvp_gpi dpi;
79 /* 16 (src,sport) hash slots, and one wildcard source slot */
80 struct rsvp_filter __rcu *ht[16 + 1];
86 struct rsvp_filter __rcu *next;
87 __be32 src[RSVP_DST_LEN];
88 struct tc_rsvp_gpi spi;
91 struct tcf_result res;
95 struct rsvp_session *sess;
96 struct rcu_work rwork;
99 static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
101 unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
105 return (h ^ protocol ^ tunnelid) & 0xFF;
108 static inline unsigned int hash_src(__be32 *src)
110 unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
118 #define RSVP_APPLY_RESULT() \
120 int r = tcf_exts_exec(skb, &f->exts, res); \
127 static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
128 struct tcf_result *res)
130 struct rsvp_head *head = rcu_dereference_bh(tp->root);
131 struct rsvp_session *s;
132 struct rsvp_filter *f;
138 #if RSVP_DST_LEN == 4
139 struct ipv6hdr *nhptr;
141 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
143 nhptr = ipv6_hdr(skb);
147 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
153 #if RSVP_DST_LEN == 4
154 src = &nhptr->saddr.s6_addr32[0];
155 dst = &nhptr->daddr.s6_addr32[0];
156 protocol = nhptr->nexthdr;
157 xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
161 protocol = nhptr->protocol;
162 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
163 if (ip_is_fragment(nhptr))
167 h1 = hash_dst(dst, protocol, tunnelid);
170 for (s = rcu_dereference_bh(head->ht[h1]); s;
171 s = rcu_dereference_bh(s->next)) {
172 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
173 protocol == s->protocol &&
175 (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
176 #if RSVP_DST_LEN == 4
177 dst[0] == s->dst[0] &&
178 dst[1] == s->dst[1] &&
179 dst[2] == s->dst[2] &&
181 tunnelid == s->tunnelid) {
183 for (f = rcu_dereference_bh(s->ht[h2]); f;
184 f = rcu_dereference_bh(f->next)) {
185 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
186 !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
187 #if RSVP_DST_LEN == 4
189 src[0] == f->src[0] &&
190 src[1] == f->src[1] &&
198 if (f->tunnelhdr == 0)
201 tunnelid = f->res.classid;
202 nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
207 /* And wildcard bucket... */
208 for (f = rcu_dereference_bh(s->ht[16]); f;
209 f = rcu_dereference_bh(f->next)) {
220 static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
222 struct rsvp_head *head = rtnl_dereference(tp->root);
223 struct rsvp_session *s;
224 struct rsvp_filter __rcu **ins;
225 struct rsvp_filter *pins;
226 unsigned int h1 = h & 0xFF;
227 unsigned int h2 = (h >> 8) & 0xFF;
229 for (s = rtnl_dereference(head->ht[h1]); s;
230 s = rtnl_dereference(s->next)) {
231 for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
232 ins = &pins->next, pins = rtnl_dereference(*ins)) {
233 if (pins->handle == h) {
234 RCU_INIT_POINTER(n->next, pins->next);
235 rcu_assign_pointer(*ins, n);
241 /* Something went wrong if we are trying to replace a non-existent
242 * node. Mind as well halt instead of silently failing.
247 static void *rsvp_get(struct tcf_proto *tp, u32 handle)
249 struct rsvp_head *head = rtnl_dereference(tp->root);
250 struct rsvp_session *s;
251 struct rsvp_filter *f;
252 unsigned int h1 = handle & 0xFF;
253 unsigned int h2 = (handle >> 8) & 0xFF;
258 for (s = rtnl_dereference(head->ht[h1]); s;
259 s = rtnl_dereference(s->next)) {
260 for (f = rtnl_dereference(s->ht[h2]); f;
261 f = rtnl_dereference(f->next)) {
262 if (f->handle == handle)
269 static int rsvp_init(struct tcf_proto *tp)
271 struct rsvp_head *data;
273 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
275 rcu_assign_pointer(tp->root, data);
281 static void __rsvp_delete_filter(struct rsvp_filter *f)
283 tcf_exts_destroy(&f->exts);
284 tcf_exts_put_net(&f->exts);
288 static void rsvp_delete_filter_work(struct work_struct *work)
290 struct rsvp_filter *f = container_of(to_rcu_work(work),
294 __rsvp_delete_filter(f);
298 static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
300 tcf_unbind_filter(tp, &f->res);
301 /* all classifiers are required to call tcf_exts_destroy() after rcu
302 * grace period, since converted-to-rcu actions are relying on that
303 * in cleanup() callback
305 if (tcf_exts_get_net(&f->exts))
306 tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
308 __rsvp_delete_filter(f);
311 static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
312 struct netlink_ext_ack *extack)
314 struct rsvp_head *data = rtnl_dereference(tp->root);
320 for (h1 = 0; h1 < 256; h1++) {
321 struct rsvp_session *s;
323 while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
324 RCU_INIT_POINTER(data->ht[h1], s->next);
326 for (h2 = 0; h2 <= 16; h2++) {
327 struct rsvp_filter *f;
329 while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
330 rcu_assign_pointer(s->ht[h2], f->next);
331 rsvp_delete_filter(tp, f);
337 kfree_rcu(data, rcu);
340 static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
341 bool rtnl_held, struct netlink_ext_ack *extack)
343 struct rsvp_head *head = rtnl_dereference(tp->root);
344 struct rsvp_filter *nfp, *f = arg;
345 struct rsvp_filter __rcu **fp;
346 unsigned int h = f->handle;
347 struct rsvp_session __rcu **sp;
348 struct rsvp_session *nsp, *s = f->sess;
351 fp = &s->ht[(h >> 8) & 0xFF];
352 for (nfp = rtnl_dereference(*fp); nfp;
353 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
355 RCU_INIT_POINTER(*fp, f->next);
356 rsvp_delete_filter(tp, f);
360 for (i = 0; i <= 16; i++)
364 /* OK, session has no flows */
365 sp = &head->ht[h & 0xFF];
366 for (nsp = rtnl_dereference(*sp); nsp;
367 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
369 RCU_INIT_POINTER(*sp, s->next);
381 for (h1 = 0; h1 < 256; h1++) {
382 if (rcu_access_pointer(head->ht[h1])) {
391 static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
393 struct rsvp_head *data = rtnl_dereference(tp->root);
399 if ((data->hgenerator += 0x10000) == 0)
400 data->hgenerator = 0x10000;
401 h = data->hgenerator|salt;
402 if (!rsvp_get(tp, h))
408 static int tunnel_bts(struct rsvp_head *data)
410 int n = data->tgenerator >> 5;
411 u32 b = 1 << (data->tgenerator & 0x1F);
413 if (data->tmap[n] & b)
419 static void tunnel_recycle(struct rsvp_head *data)
421 struct rsvp_session __rcu **sht = data->ht;
425 memset(tmap, 0, sizeof(tmap));
427 for (h1 = 0; h1 < 256; h1++) {
428 struct rsvp_session *s;
429 for (s = rtnl_dereference(sht[h1]); s;
430 s = rtnl_dereference(s->next)) {
431 for (h2 = 0; h2 <= 16; h2++) {
432 struct rsvp_filter *f;
434 for (f = rtnl_dereference(s->ht[h2]); f;
435 f = rtnl_dereference(f->next)) {
436 if (f->tunnelhdr == 0)
438 data->tgenerator = f->res.classid;
445 memcpy(data->tmap, tmap, sizeof(tmap));
448 static u32 gen_tunnel(struct rsvp_head *data)
452 for (k = 0; k < 2; k++) {
453 for (i = 255; i > 0; i--) {
454 if (++data->tgenerator == 0)
455 data->tgenerator = 1;
456 if (tunnel_bts(data))
457 return data->tgenerator;
459 tunnel_recycle(data);
464 static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
465 [TCA_RSVP_CLASSID] = { .type = NLA_U32 },
466 [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) },
467 [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) },
468 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
471 static int rsvp_change(struct net *net, struct sk_buff *in_skb,
472 struct tcf_proto *tp, unsigned long base,
473 u32 handle, struct nlattr **tca,
474 void **arg, u32 flags,
475 struct netlink_ext_ack *extack)
477 struct rsvp_head *data = rtnl_dereference(tp->root);
478 struct rsvp_filter *f, *nfp;
479 struct rsvp_filter __rcu **fp;
480 struct rsvp_session *nsp, *s;
481 struct rsvp_session __rcu **sp;
482 struct tc_rsvp_pinfo *pinfo = NULL;
483 struct nlattr *opt = tca[TCA_OPTIONS];
484 struct nlattr *tb[TCA_RSVP_MAX + 1];
491 return handle ? -EINVAL : 0;
493 err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
498 err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
501 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, flags,
508 /* Node exists: adjust only classid */
509 struct rsvp_filter *n;
511 if (f->handle != handle && handle)
514 n = kmemdup(f, sizeof(*f), GFP_KERNEL);
520 err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
527 if (tb[TCA_RSVP_CLASSID]) {
528 n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
529 tcf_bind_filter(tp, &n->res, base);
532 tcf_exts_change(&n->exts, &e);
533 rsvp_replace(tp, n, handle);
537 /* Now more serious part... */
541 if (tb[TCA_RSVP_DST] == NULL)
545 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
549 err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
553 if (tb[TCA_RSVP_SRC]) {
554 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
555 h2 = hash_src(f->src);
557 if (tb[TCA_RSVP_PINFO]) {
558 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
560 f->tunnelhdr = pinfo->tunnelhdr;
562 if (tb[TCA_RSVP_CLASSID])
563 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
565 dst = nla_data(tb[TCA_RSVP_DST]);
566 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
569 if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
574 if (f->res.classid > 255)
578 if (f->res.classid == 0 &&
579 (f->res.classid = gen_tunnel(data)) == 0)
583 for (sp = &data->ht[h1];
584 (s = rtnl_dereference(*sp)) != NULL;
586 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
587 pinfo && pinfo->protocol == s->protocol &&
588 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
589 #if RSVP_DST_LEN == 4
590 dst[0] == s->dst[0] &&
591 dst[1] == s->dst[1] &&
592 dst[2] == s->dst[2] &&
594 pinfo->tunnelid == s->tunnelid) {
597 /* OK, we found appropriate session */
602 if (f->tunnelhdr == 0)
603 tcf_bind_filter(tp, &f->res, base);
605 tcf_exts_change(&f->exts, &e);
608 for (nfp = rtnl_dereference(*fp); nfp;
609 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
610 __u32 mask = nfp->spi.mask & f->spi.mask;
612 if (mask != f->spi.mask)
615 RCU_INIT_POINTER(f->next, nfp);
616 rcu_assign_pointer(*fp, f);
623 /* No session found. Create new one. */
626 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
629 memcpy(s->dst, dst, sizeof(s->dst));
633 s->protocol = pinfo->protocol;
634 s->tunnelid = pinfo->tunnelid;
637 for (nsp = rtnl_dereference(*sp); nsp;
638 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
639 if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
642 RCU_INIT_POINTER(s->next, nsp);
643 rcu_assign_pointer(*sp, s);
648 tcf_exts_destroy(&f->exts);
651 tcf_exts_destroy(&e);
655 static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
658 struct rsvp_head *head = rtnl_dereference(tp->root);
664 for (h = 0; h < 256; h++) {
665 struct rsvp_session *s;
667 for (s = rtnl_dereference(head->ht[h]); s;
668 s = rtnl_dereference(s->next)) {
669 for (h1 = 0; h1 <= 16; h1++) {
670 struct rsvp_filter *f;
672 for (f = rtnl_dereference(s->ht[h1]); f;
673 f = rtnl_dereference(f->next)) {
674 if (arg->count < arg->skip) {
678 if (arg->fn(tp, f, arg) < 0) {
689 static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
690 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
692 struct rsvp_filter *f = fh;
693 struct rsvp_session *s;
695 struct tc_rsvp_pinfo pinfo;
701 t->tcm_handle = f->handle;
703 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
705 goto nla_put_failure;
707 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
708 goto nla_put_failure;
711 pinfo.protocol = s->protocol;
712 pinfo.tunnelid = s->tunnelid;
713 pinfo.tunnelhdr = f->tunnelhdr;
715 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
716 goto nla_put_failure;
717 if (f->res.classid &&
718 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
719 goto nla_put_failure;
720 if (((f->handle >> 8) & 0xFF) != 16 &&
721 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
722 goto nla_put_failure;
724 if (tcf_exts_dump(skb, &f->exts) < 0)
725 goto nla_put_failure;
727 nla_nest_end(skb, nest);
729 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
730 goto nla_put_failure;
734 nla_nest_cancel(skb, nest);
738 static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
741 struct rsvp_filter *f = fh;
743 if (f && f->res.classid == classid) {
745 __tcf_bind_filter(q, &f->res, base);
747 __tcf_unbind_filter(q, &f->res);
751 static struct tcf_proto_ops RSVP_OPS __read_mostly = {
753 .classify = rsvp_classify,
755 .destroy = rsvp_destroy,
757 .change = rsvp_change,
758 .delete = rsvp_delete,
761 .bind_class = rsvp_bind_class,
762 .owner = THIS_MODULE,
765 static int __init init_rsvp(void)
767 return register_tcf_proto_ops(&RSVP_OPS);
770 static void __exit exit_rsvp(void)
772 unregister_tcf_proto_ops(&RSVP_OPS);
775 module_init(init_rsvp)
776 module_exit(exit_rsvp)