2 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB
5 * draft-ietf-forces-interfelfb-03
8 * "Distributing Linux Traffic Control Classifier-Action
10 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * copyright Jamal Hadi Salim (2015)
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/skbuff.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <net/net_namespace.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <uapi/linux/tc_act/tc_ife.h>
33 #include <net/tc_act/tc_ife.h>
34 #include <linux/etherdevice.h>
37 static unsigned int ife_net_id;
38 static int max_metacnt = IFE_META_MAX + 1;
39 static struct tc_action_ops act_ife_ops;
41 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
42 [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
43 [TCA_IFE_DMAC] = { .len = ETH_ALEN},
44 [TCA_IFE_SMAC] = { .len = ETH_ALEN},
45 [TCA_IFE_TYPE] = { .type = NLA_U16},
48 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
53 edata = *(u16 *)mi->metaval;
57 if (!edata) /* will not encode */
61 return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
63 EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
65 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
68 return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
70 return nla_put(skb, mi->metaid, 0, NULL);
72 EXPORT_SYMBOL_GPL(ife_get_meta_u32);
74 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
76 if (metaval || mi->metaval)
77 return 8; /* T+L+V == 2+2+4 */
81 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
83 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
85 if (metaval || mi->metaval)
86 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
90 EXPORT_SYMBOL_GPL(ife_check_meta_u16);
92 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
97 edata = *(u32 *)mi->metaval;
101 if (!edata) /* will not encode */
104 edata = htonl(edata);
105 return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
107 EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
109 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
112 return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
114 return nla_put(skb, mi->metaid, 0, NULL);
116 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
118 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
120 mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
126 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
128 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
130 mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
136 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
138 void ife_release_meta_gen(struct tcf_meta_info *mi)
142 EXPORT_SYMBOL_GPL(ife_release_meta_gen);
144 int ife_validate_meta_u32(void *val, int len)
146 if (len == sizeof(u32))
151 EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
153 int ife_validate_meta_u16(void *val, int len)
155 /* length will not include padding */
156 if (len == sizeof(u16))
161 EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
163 static LIST_HEAD(ifeoplist);
164 static DEFINE_RWLOCK(ife_mod_lock);
166 static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
168 struct tcf_meta_ops *o;
170 read_lock(&ife_mod_lock);
171 list_for_each_entry(o, &ifeoplist, list) {
172 if (o->metaid == metaid) {
173 if (!try_module_get(o->owner))
175 read_unlock(&ife_mod_lock);
179 read_unlock(&ife_mod_lock);
184 int register_ife_op(struct tcf_meta_ops *mops)
186 struct tcf_meta_ops *m;
188 if (!mops->metaid || !mops->metatype || !mops->name ||
189 !mops->check_presence || !mops->encode || !mops->decode ||
190 !mops->get || !mops->alloc)
193 write_lock(&ife_mod_lock);
195 list_for_each_entry(m, &ifeoplist, list) {
196 if (m->metaid == mops->metaid ||
197 (strcmp(mops->name, m->name) == 0)) {
198 write_unlock(&ife_mod_lock);
204 mops->release = ife_release_meta_gen;
206 list_add_tail(&mops->list, &ifeoplist);
207 write_unlock(&ife_mod_lock);
210 EXPORT_SYMBOL_GPL(unregister_ife_op);
212 int unregister_ife_op(struct tcf_meta_ops *mops)
214 struct tcf_meta_ops *m;
217 write_lock(&ife_mod_lock);
218 list_for_each_entry(m, &ifeoplist, list) {
219 if (m->metaid == mops->metaid) {
220 list_del(&mops->list);
225 write_unlock(&ife_mod_lock);
229 EXPORT_SYMBOL_GPL(register_ife_op);
231 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
234 /* XXX: unfortunately cant use nla_policy at this point
235 * because a length of 0 is valid in the case of
236 * "allow". "use" semantics do enforce for proper
237 * length and i couldve use nla_policy but it makes it hard
238 * to use it just for that..
241 return ops->validate(val, len);
243 if (ops->metatype == NLA_U32)
244 ret = ife_validate_meta_u32(val, len);
245 else if (ops->metatype == NLA_U16)
246 ret = ife_validate_meta_u16(val, len);
251 /* called when adding new meta information
252 * under ife->tcf_lock for existing action
254 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
255 void *val, int len, bool exists)
257 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
262 #ifdef CONFIG_MODULES
264 spin_unlock_bh(&ife->tcf_lock);
266 request_module("ifemeta%u", metaid);
269 spin_lock_bh(&ife->tcf_lock);
270 ops = find_ife_oplist(metaid);
277 ret = ife_validate_metatype(ops, val, len);
279 module_put(ops->owner);
285 /* called when adding new meta information
286 * under ife->tcf_lock for existing action
288 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
289 int len, bool atomic)
291 struct tcf_meta_info *mi = NULL;
292 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
298 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
300 /*put back what find_ife_oplist took */
301 module_put(ops->owner);
308 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
311 module_put(ops->owner);
316 list_add_tail(&mi->metalist, &ife->metalist);
321 static int use_all_metadata(struct tcf_ife_info *ife)
323 struct tcf_meta_ops *o;
327 read_lock(&ife_mod_lock);
328 list_for_each_entry(o, &ifeoplist, list) {
329 rc = add_metainfo(ife, o->metaid, NULL, 0, true);
333 read_unlock(&ife_mod_lock);
341 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
343 struct tcf_meta_info *e;
345 unsigned char *b = skb_tail_pointer(skb);
346 int total_encoded = 0;
348 /*can only happen on decode */
349 if (list_empty(&ife->metalist))
352 nest = nla_nest_start(skb, TCA_IFE_METALST);
356 list_for_each_entry(e, &ife->metalist, metalist) {
357 if (!e->ops->get(skb, e))
364 nla_nest_end(skb, nest);
373 /* under ife->tcf_lock */
374 static void _tcf_ife_cleanup(struct tc_action *a, int bind)
376 struct tcf_ife_info *ife = to_ife(a);
377 struct tcf_meta_info *e, *n;
379 list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
380 module_put(e->ops->owner);
381 list_del(&e->metalist);
392 static void tcf_ife_cleanup(struct tc_action *a, int bind)
394 struct tcf_ife_info *ife = to_ife(a);
396 spin_lock_bh(&ife->tcf_lock);
397 _tcf_ife_cleanup(a, bind);
398 spin_unlock_bh(&ife->tcf_lock);
401 /* under ife->tcf_lock for existing action */
402 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
410 for (i = 1; i < max_metacnt; i++) {
412 val = nla_data(tb[i]);
413 len = nla_len(tb[i]);
415 rc = load_metaops_and_vet(ife, i, val, len, exists);
419 rc = add_metainfo(ife, i, val, len, exists);
428 static int tcf_ife_init(struct net *net, struct nlattr *nla,
429 struct nlattr *est, struct tc_action **a,
432 struct tc_action_net *tn = net_generic(net, ife_net_id);
433 struct nlattr *tb[TCA_IFE_MAX + 1];
434 struct nlattr *tb2[IFE_META_MAX + 1];
435 struct tcf_ife_info *ife;
436 u16 ife_type = ETH_P_IFE;
444 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
448 if (!tb[TCA_IFE_PARMS])
451 parm = nla_data(tb[TCA_IFE_PARMS]);
453 exists = tcf_idr_check(tn, parm->index, a, bind);
458 ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
464 tcf_idr_release(*a, bind);
470 ife->flags = parm->flags;
472 if (parm->flags & IFE_ENCODE) {
473 if (tb[TCA_IFE_TYPE])
474 ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
475 if (tb[TCA_IFE_DMAC])
476 daddr = nla_data(tb[TCA_IFE_DMAC]);
477 if (tb[TCA_IFE_SMAC])
478 saddr = nla_data(tb[TCA_IFE_SMAC]);
482 spin_lock_bh(&ife->tcf_lock);
483 ife->tcf_action = parm->action;
485 if (parm->flags & IFE_ENCODE) {
487 ether_addr_copy(ife->eth_dst, daddr);
489 eth_zero_addr(ife->eth_dst);
492 ether_addr_copy(ife->eth_src, saddr);
494 eth_zero_addr(ife->eth_src);
496 ife->eth_type = ife_type;
499 if (ret == ACT_P_CREATED)
500 INIT_LIST_HEAD(&ife->metalist);
502 if (tb[TCA_IFE_METALST]) {
503 err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
508 tcf_idr_release(*a, bind);
509 if (ret == ACT_P_CREATED)
510 _tcf_ife_cleanup(*a, bind);
513 spin_unlock_bh(&ife->tcf_lock);
517 err = populate_metalist(ife, tb2, exists);
519 goto metadata_parse_err;
522 /* if no passed metadata allow list or passed allow-all
523 * then here we process by adding as many supported metadatum
524 * as we can. You better have at least one else we are
527 err = use_all_metadata(ife);
529 if (ret == ACT_P_CREATED)
530 _tcf_ife_cleanup(*a, bind);
533 spin_unlock_bh(&ife->tcf_lock);
539 spin_unlock_bh(&ife->tcf_lock);
541 if (ret == ACT_P_CREATED)
542 tcf_idr_insert(tn, *a);
547 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
550 unsigned char *b = skb_tail_pointer(skb);
551 struct tcf_ife_info *ife = to_ife(a);
552 struct tc_ife opt = {
553 .index = ife->tcf_index,
554 .refcnt = ife->tcf_refcnt - ref,
555 .bindcnt = ife->tcf_bindcnt - bind,
556 .action = ife->tcf_action,
561 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
562 goto nla_put_failure;
564 tcf_tm_dump(&t, &ife->tcf_tm);
565 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
566 goto nla_put_failure;
568 if (!is_zero_ether_addr(ife->eth_dst)) {
569 if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst))
570 goto nla_put_failure;
573 if (!is_zero_ether_addr(ife->eth_src)) {
574 if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src))
575 goto nla_put_failure;
578 if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type))
579 goto nla_put_failure;
581 if (dump_metalist(skb, ife)) {
582 /*ignore failure to dump metalist */
583 pr_info("Failed to dump metalist\n");
593 static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
594 u16 metaid, u16 mlen, void *mdata)
596 struct tcf_meta_info *e;
598 /* XXX: use hash to speed up */
599 list_for_each_entry(e, &ife->metalist, metalist) {
600 if (metaid == e->metaid) {
602 /* We check for decode presence already */
603 return e->ops->decode(skb, mdata, mlen);
611 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
612 struct tcf_result *res)
614 struct tcf_ife_info *ife = to_ife(a);
615 int action = ife->tcf_action;
620 spin_lock(&ife->tcf_lock);
621 bstats_update(&ife->tcf_bstats, skb);
622 tcf_lastuse_update(&ife->tcf_tm);
623 spin_unlock(&ife->tcf_lock);
625 if (skb_at_tc_ingress(skb))
626 skb_push(skb, skb->dev->hard_header_len);
628 tlv_data = ife_decode(skb, &metalen);
629 if (unlikely(!tlv_data)) {
630 spin_lock(&ife->tcf_lock);
631 ife->tcf_qstats.drops++;
632 spin_unlock(&ife->tcf_lock);
636 ifehdr_end = tlv_data + metalen;
637 for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) {
642 curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL);
644 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
645 /* abuse overlimits to count when we receive metadata
646 * but dont have an ops for it
648 pr_info_ratelimited("Unknown metaid %d dlen %d\n",
650 ife->tcf_qstats.overlimits++;
654 if (WARN_ON(tlv_data != ifehdr_end)) {
655 spin_lock(&ife->tcf_lock);
656 ife->tcf_qstats.drops++;
657 spin_unlock(&ife->tcf_lock);
661 skb->protocol = eth_type_trans(skb, skb->dev);
662 skb_reset_network_header(skb);
667 /*XXX: check if we can do this at install time instead of current
670 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
672 struct tcf_meta_info *e, *n;
673 int tot_run_sz = 0, run_sz = 0;
675 list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
676 if (e->ops->check_presence) {
677 run_sz = e->ops->check_presence(skb, e);
678 tot_run_sz += run_sz;
685 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
686 struct tcf_result *res)
688 struct tcf_ife_info *ife = to_ife(a);
689 int action = ife->tcf_action;
690 struct ethhdr *oethh; /* outer ether header */
691 struct tcf_meta_info *e;
693 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
694 where ORIGDATA = original ethernet header ...
696 u16 metalen = ife_get_sz(skb, ife);
697 int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
698 unsigned int skboff = 0;
699 int new_len = skb->len + hdrm;
700 bool exceed_mtu = false;
704 if (!skb_at_tc_ingress(skb)) {
705 if (new_len > skb->dev->mtu)
709 spin_lock(&ife->tcf_lock);
710 bstats_update(&ife->tcf_bstats, skb);
711 tcf_lastuse_update(&ife->tcf_tm);
713 if (!metalen) { /* no metadata to send */
714 /* abuse overlimits to count when we allow packet
717 ife->tcf_qstats.overlimits++;
718 spin_unlock(&ife->tcf_lock);
721 /* could be stupid policy setup or mtu config
722 * so lets be conservative.. */
723 if ((action == TC_ACT_SHOT) || exceed_mtu) {
724 ife->tcf_qstats.drops++;
725 spin_unlock(&ife->tcf_lock);
729 if (skb_at_tc_ingress(skb))
730 skb_push(skb, skb->dev->hard_header_len);
732 ife_meta = ife_encode(skb, metalen);
734 /* XXX: we dont have a clever way of telling encode to
735 * not repeat some of the computations that are done by
736 * ops->presence_check...
738 list_for_each_entry(e, &ife->metalist, metalist) {
739 if (e->ops->encode) {
740 err = e->ops->encode(skb, (void *)(ife_meta + skboff),
744 /* too corrupt to keep around if overwritten */
745 ife->tcf_qstats.drops++;
746 spin_unlock(&ife->tcf_lock);
751 oethh = (struct ethhdr *)skb->data;
753 if (!is_zero_ether_addr(ife->eth_src))
754 ether_addr_copy(oethh->h_source, ife->eth_src);
755 if (!is_zero_ether_addr(ife->eth_dst))
756 ether_addr_copy(oethh->h_dest, ife->eth_dst);
757 oethh->h_proto = htons(ife->eth_type);
759 if (skb_at_tc_ingress(skb))
760 skb_pull(skb, skb->dev->hard_header_len);
762 spin_unlock(&ife->tcf_lock);
767 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
768 struct tcf_result *res)
770 struct tcf_ife_info *ife = to_ife(a);
772 if (ife->flags & IFE_ENCODE)
773 return tcf_ife_encode(skb, a, res);
775 if (!(ife->flags & IFE_ENCODE))
776 return tcf_ife_decode(skb, a, res);
778 pr_info_ratelimited("unknown failure(policy neither de/encode\n");
779 spin_lock(&ife->tcf_lock);
780 bstats_update(&ife->tcf_bstats, skb);
781 tcf_lastuse_update(&ife->tcf_tm);
782 ife->tcf_qstats.drops++;
783 spin_unlock(&ife->tcf_lock);
788 static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
789 struct netlink_callback *cb, int type,
790 const struct tc_action_ops *ops)
792 struct tc_action_net *tn = net_generic(net, ife_net_id);
794 return tcf_generic_walker(tn, skb, cb, type, ops);
797 static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
799 struct tc_action_net *tn = net_generic(net, ife_net_id);
801 return tcf_idr_search(tn, a, index);
804 static struct tc_action_ops act_ife_ops = {
807 .owner = THIS_MODULE,
809 .dump = tcf_ife_dump,
810 .cleanup = tcf_ife_cleanup,
811 .init = tcf_ife_init,
812 .walk = tcf_ife_walker,
813 .lookup = tcf_ife_search,
814 .size = sizeof(struct tcf_ife_info),
817 static __net_init int ife_init_net(struct net *net)
819 struct tc_action_net *tn = net_generic(net, ife_net_id);
821 return tc_action_net_init(tn, &act_ife_ops);
824 static void __net_exit ife_exit_net(struct net *net)
826 struct tc_action_net *tn = net_generic(net, ife_net_id);
828 tc_action_net_exit(tn);
831 static struct pernet_operations ife_net_ops = {
832 .init = ife_init_net,
833 .exit = ife_exit_net,
835 .size = sizeof(struct tc_action_net),
838 static int __init ife_init_module(void)
840 return tcf_register_action(&act_ife_ops, &ife_net_ops);
843 static void __exit ife_cleanup_module(void)
845 tcf_unregister_action(&act_ife_ops, &ife_net_ops);
848 module_init(ife_init_module);
849 module_exit(ife_cleanup_module);
851 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
852 MODULE_DESCRIPTION("Inter-FE LFB action");
853 MODULE_LICENSE("GPL");