1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_flower.c Flower classifier
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 #include <linux/bitfield.h>
16 #include <linux/if_ether.h>
17 #include <linux/in6.h>
19 #include <linux/mpls.h>
20 #include <linux/ppp_defs.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/pkt_sched.h>
26 #include <net/flow_dissector.h>
27 #include <net/geneve.h>
28 #include <net/vxlan.h>
29 #include <net/erspan.h>
31 #include <net/tc_wrapper.h>
34 #include <net/dst_metadata.h>
36 #include <uapi/linux/netfilter/nf_conntrack_common.h>
38 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
39 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
40 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
41 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
44 struct flow_dissector_key_meta meta;
45 struct flow_dissector_key_control control;
46 struct flow_dissector_key_control enc_control;
47 struct flow_dissector_key_basic basic;
48 struct flow_dissector_key_eth_addrs eth;
49 struct flow_dissector_key_vlan vlan;
50 struct flow_dissector_key_vlan cvlan;
52 struct flow_dissector_key_ipv4_addrs ipv4;
53 struct flow_dissector_key_ipv6_addrs ipv6;
55 struct flow_dissector_key_ports tp;
56 struct flow_dissector_key_icmp icmp;
57 struct flow_dissector_key_arp arp;
58 struct flow_dissector_key_keyid enc_key_id;
60 struct flow_dissector_key_ipv4_addrs enc_ipv4;
61 struct flow_dissector_key_ipv6_addrs enc_ipv6;
63 struct flow_dissector_key_ports enc_tp;
64 struct flow_dissector_key_mpls mpls;
65 struct flow_dissector_key_tcp tcp;
66 struct flow_dissector_key_ip ip;
67 struct flow_dissector_key_ip enc_ip;
68 struct flow_dissector_key_enc_opts enc_opts;
69 struct flow_dissector_key_ports_range tp_range;
70 struct flow_dissector_key_ct ct;
71 struct flow_dissector_key_hash hash;
72 struct flow_dissector_key_num_of_vlans num_of_vlans;
73 struct flow_dissector_key_pppoe pppoe;
74 struct flow_dissector_key_l2tpv3 l2tpv3;
75 struct flow_dissector_key_cfm cfm;
76 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
78 struct fl_flow_mask_range {
79 unsigned short int start;
80 unsigned short int end;
84 struct fl_flow_key key;
85 struct fl_flow_mask_range range;
87 struct rhash_head ht_node;
89 struct rhashtable_params filter_ht_params;
90 struct flow_dissector dissector;
91 struct list_head filters;
92 struct rcu_work rwork;
93 struct list_head list;
97 struct fl_flow_tmplt {
98 struct fl_flow_key dummy_key;
99 struct fl_flow_key mask;
100 struct flow_dissector dissector;
101 struct tcf_chain *chain;
105 struct rhashtable ht;
106 spinlock_t masks_lock; /* Protect masks list */
107 struct list_head masks;
108 struct list_head hw_filters;
109 struct rcu_work rwork;
110 struct idr handle_idr;
113 struct cls_fl_filter {
114 struct fl_flow_mask *mask;
115 struct rhash_head ht_node;
116 struct fl_flow_key mkey;
117 struct tcf_exts exts;
118 struct tcf_result res;
119 struct fl_flow_key key;
120 struct list_head list;
121 struct list_head hw_list;
125 u8 needs_tc_skb_ext:1;
126 struct rcu_work rwork;
127 struct net_device *hw_dev;
128 /* Flower classifier is unlocked, which means that its reference counter
129 * can be changed concurrently without any kind of external
130 * synchronization. Use atomic reference counter to be concurrency-safe.
136 static const struct rhashtable_params mask_ht_params = {
137 .key_offset = offsetof(struct fl_flow_mask, key),
138 .key_len = sizeof(struct fl_flow_key),
139 .head_offset = offsetof(struct fl_flow_mask, ht_node),
140 .automatic_shrinking = true,
143 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
145 return mask->range.end - mask->range.start;
148 static void fl_mask_update_range(struct fl_flow_mask *mask)
150 const u8 *bytes = (const u8 *) &mask->key;
151 size_t size = sizeof(mask->key);
152 size_t i, first = 0, last;
154 for (i = 0; i < size; i++) {
161 for (i = size - 1; i != first; i--) {
167 mask->range.start = rounddown(first, sizeof(long));
168 mask->range.end = roundup(last + 1, sizeof(long));
171 static void *fl_key_get_start(struct fl_flow_key *key,
172 const struct fl_flow_mask *mask)
174 return (u8 *) key + mask->range.start;
177 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
178 struct fl_flow_mask *mask)
180 const long *lkey = fl_key_get_start(key, mask);
181 const long *lmask = fl_key_get_start(&mask->key, mask);
182 long *lmkey = fl_key_get_start(mkey, mask);
185 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
186 *lmkey++ = *lkey++ & *lmask++;
189 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
190 struct fl_flow_mask *mask)
192 const long *lmask = fl_key_get_start(&mask->key, mask);
198 ltmplt = fl_key_get_start(&tmplt->mask, mask);
199 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
200 if (~*ltmplt++ & *lmask++)
206 static void fl_clear_masked_range(struct fl_flow_key *key,
207 struct fl_flow_mask *mask)
209 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
212 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
213 struct fl_flow_key *key,
214 struct fl_flow_key *mkey)
216 u16 min_mask, max_mask, min_val, max_val;
218 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
219 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
220 min_val = ntohs(filter->key.tp_range.tp_min.dst);
221 max_val = ntohs(filter->key.tp_range.tp_max.dst);
223 if (min_mask && max_mask) {
224 if (ntohs(key->tp_range.tp.dst) < min_val ||
225 ntohs(key->tp_range.tp.dst) > max_val)
228 /* skb does not have min and max values */
229 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
230 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
235 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
236 struct fl_flow_key *key,
237 struct fl_flow_key *mkey)
239 u16 min_mask, max_mask, min_val, max_val;
241 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
242 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
243 min_val = ntohs(filter->key.tp_range.tp_min.src);
244 max_val = ntohs(filter->key.tp_range.tp_max.src);
246 if (min_mask && max_mask) {
247 if (ntohs(key->tp_range.tp.src) < min_val ||
248 ntohs(key->tp_range.tp.src) > max_val)
251 /* skb does not have min and max values */
252 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
253 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
258 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
259 struct fl_flow_key *mkey)
261 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
262 mask->filter_ht_params);
265 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
266 struct fl_flow_key *mkey,
267 struct fl_flow_key *key)
269 struct cls_fl_filter *filter, *f;
271 list_for_each_entry_rcu(filter, &mask->filters, list) {
272 if (!fl_range_port_dst_cmp(filter, key, mkey))
275 if (!fl_range_port_src_cmp(filter, key, mkey))
278 f = __fl_lookup(mask, mkey);
285 static noinline_for_stack
286 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
288 struct fl_flow_key mkey;
290 fl_set_masked_key(&mkey, key, mask);
291 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
292 return fl_lookup_range(mask, &mkey, key);
294 return __fl_lookup(mask, &mkey);
297 static u16 fl_ct_info_to_flower_map[] = {
298 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
300 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
302 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
304 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
305 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
306 TCA_FLOWER_KEY_CT_FLAGS_RELATED |
307 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
308 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
309 TCA_FLOWER_KEY_CT_FLAGS_NEW,
312 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
313 const struct tcf_proto *tp,
314 struct tcf_result *res)
316 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
317 bool post_ct = tc_skb_cb(skb)->post_ct;
318 u16 zone = tc_skb_cb(skb)->zone;
319 struct fl_flow_key skb_key;
320 struct fl_flow_mask *mask;
321 struct cls_fl_filter *f;
323 list_for_each_entry_rcu(mask, &head->masks, list) {
324 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
325 fl_clear_masked_range(&skb_key, mask);
327 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
328 /* skb_flow_dissect() does not set n_proto in case an unknown
329 * protocol, so do it rather here.
331 skb_key.basic.n_proto = skb_protocol(skb, false);
332 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
333 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
334 fl_ct_info_to_flower_map,
335 ARRAY_SIZE(fl_ct_info_to_flower_map),
337 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
338 skb_flow_dissect(skb, &mask->dissector, &skb_key,
339 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
341 f = fl_mask_lookup(mask, &skb_key);
342 if (f && !tc_skip_sw(f->flags)) {
344 return tcf_exts_exec(skb, &f->exts, res);
350 static int fl_init(struct tcf_proto *tp)
352 struct cls_fl_head *head;
354 head = kzalloc(sizeof(*head), GFP_KERNEL);
358 spin_lock_init(&head->masks_lock);
359 INIT_LIST_HEAD_RCU(&head->masks);
360 INIT_LIST_HEAD(&head->hw_filters);
361 rcu_assign_pointer(tp->root, head);
362 idr_init(&head->handle_idr);
364 return rhashtable_init(&head->ht, &mask_ht_params);
367 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
369 /* temporary masks don't have their filters list and ht initialized */
370 if (mask_init_done) {
371 WARN_ON(!list_empty(&mask->filters));
372 rhashtable_destroy(&mask->ht);
377 static void fl_mask_free_work(struct work_struct *work)
379 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
380 struct fl_flow_mask, rwork);
382 fl_mask_free(mask, true);
385 static void fl_uninit_mask_free_work(struct work_struct *work)
387 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
388 struct fl_flow_mask, rwork);
390 fl_mask_free(mask, false);
393 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
395 if (!refcount_dec_and_test(&mask->refcnt))
398 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
400 spin_lock(&head->masks_lock);
401 list_del_rcu(&mask->list);
402 spin_unlock(&head->masks_lock);
404 tcf_queue_work(&mask->rwork, fl_mask_free_work);
409 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
411 /* Flower classifier only changes root pointer during init and destroy.
412 * Users must obtain reference to tcf_proto instance before calling its
413 * API, so tp->root pointer is protected from concurrent call to
414 * fl_destroy() by reference counting.
416 return rcu_dereference_raw(tp->root);
419 static void __fl_destroy_filter(struct cls_fl_filter *f)
421 if (f->needs_tc_skb_ext)
422 tc_skb_ext_tc_disable();
423 tcf_exts_destroy(&f->exts);
424 tcf_exts_put_net(&f->exts);
428 static void fl_destroy_filter_work(struct work_struct *work)
430 struct cls_fl_filter *f = container_of(to_rcu_work(work),
431 struct cls_fl_filter, rwork);
433 __fl_destroy_filter(f);
436 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
437 bool rtnl_held, struct netlink_ext_ack *extack)
439 struct tcf_block *block = tp->chain->block;
440 struct flow_cls_offload cls_flower = {};
442 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
443 cls_flower.command = FLOW_CLS_DESTROY;
444 cls_flower.cookie = (unsigned long) f;
446 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
447 &f->flags, &f->in_hw_count, rtnl_held);
451 static int fl_hw_replace_filter(struct tcf_proto *tp,
452 struct cls_fl_filter *f, bool rtnl_held,
453 struct netlink_ext_ack *extack)
455 struct tcf_block *block = tp->chain->block;
456 struct flow_cls_offload cls_flower = {};
457 bool skip_sw = tc_skip_sw(f->flags);
460 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
461 if (!cls_flower.rule)
464 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
465 cls_flower.command = FLOW_CLS_REPLACE;
466 cls_flower.cookie = (unsigned long) f;
467 cls_flower.rule->match.dissector = &f->mask->dissector;
468 cls_flower.rule->match.mask = &f->mask->key;
469 cls_flower.rule->match.key = &f->mkey;
470 cls_flower.classid = f->res.classid;
472 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
473 cls_flower.common.extack);
475 kfree(cls_flower.rule);
477 return skip_sw ? err : 0;
480 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
481 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
482 tc_cleanup_offload_action(&cls_flower.rule->action);
483 kfree(cls_flower.rule);
486 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
490 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
496 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
499 struct tcf_block *block = tp->chain->block;
500 struct flow_cls_offload cls_flower = {};
502 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
503 cls_flower.command = FLOW_CLS_STATS;
504 cls_flower.cookie = (unsigned long) f;
505 cls_flower.classid = f->res.classid;
507 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
510 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
513 static void __fl_put(struct cls_fl_filter *f)
515 if (!refcount_dec_and_test(&f->refcnt))
518 if (tcf_exts_get_net(&f->exts))
519 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
521 __fl_destroy_filter(f);
524 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
526 struct cls_fl_filter *f;
529 f = idr_find(&head->handle_idr, handle);
530 if (f && !refcount_inc_not_zero(&f->refcnt))
537 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
539 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
540 struct cls_fl_filter *f;
542 f = idr_find(&head->handle_idr, handle);
543 return f ? &f->exts : NULL;
546 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
547 bool *last, bool rtnl_held,
548 struct netlink_ext_ack *extack)
550 struct cls_fl_head *head = fl_head_dereference(tp);
554 spin_lock(&tp->lock);
556 spin_unlock(&tp->lock);
561 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
562 f->mask->filter_ht_params);
563 idr_remove(&head->handle_idr, f->handle);
564 list_del_rcu(&f->list);
565 spin_unlock(&tp->lock);
567 *last = fl_mask_put(head, f->mask);
568 if (!tc_skip_hw(f->flags))
569 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
570 tcf_unbind_filter(tp, &f->res);
576 static void fl_destroy_sleepable(struct work_struct *work)
578 struct cls_fl_head *head = container_of(to_rcu_work(work),
582 rhashtable_destroy(&head->ht);
584 module_put(THIS_MODULE);
587 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
588 struct netlink_ext_ack *extack)
590 struct cls_fl_head *head = fl_head_dereference(tp);
591 struct fl_flow_mask *mask, *next_mask;
592 struct cls_fl_filter *f, *next;
595 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
596 list_for_each_entry_safe(f, next, &mask->filters, list) {
597 __fl_delete(tp, f, &last, rtnl_held, extack);
602 idr_destroy(&head->handle_idr);
604 __module_get(THIS_MODULE);
605 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
608 static void fl_put(struct tcf_proto *tp, void *arg)
610 struct cls_fl_filter *f = arg;
615 static void *fl_get(struct tcf_proto *tp, u32 handle)
617 struct cls_fl_head *head = fl_head_dereference(tp);
619 return __fl_get(head, handle);
622 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
623 [TCA_FLOWER_UNSPEC] = { .strict_start_type =
624 TCA_FLOWER_L2_MISS },
625 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
626 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
628 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
629 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
630 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
631 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
632 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
633 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
634 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
635 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
636 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
637 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
638 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
639 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
640 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
641 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
642 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
643 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
644 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
645 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
646 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
647 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
648 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
649 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
650 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
651 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
652 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
653 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
654 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
655 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
656 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
657 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
658 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
660 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
662 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
663 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
664 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
665 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
666 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
667 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
668 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
669 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
670 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
671 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
672 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
673 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
675 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
677 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
678 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
679 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
680 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
681 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
682 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
683 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
684 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
685 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
686 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
687 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
688 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
689 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
690 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
694 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
695 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
696 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
697 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
698 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
699 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
700 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
701 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
702 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
703 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
704 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
705 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
706 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
707 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
708 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
709 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
710 [TCA_FLOWER_KEY_CT_STATE] =
711 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
712 [TCA_FLOWER_KEY_CT_STATE_MASK] =
713 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
714 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
715 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
716 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
717 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
718 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
719 .len = 128 / BITS_PER_BYTE },
720 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
721 .len = 128 / BITS_PER_BYTE },
722 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
723 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
724 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
725 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
726 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 },
727 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 },
728 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 },
729 [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1),
730 [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED },
733 static const struct nla_policy
734 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
735 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
736 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
737 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
738 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
739 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
740 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
743 static const struct nla_policy
744 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
745 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
746 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
747 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
751 static const struct nla_policy
752 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
753 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
756 static const struct nla_policy
757 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
758 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
759 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
760 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
761 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
764 static const struct nla_policy
765 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
766 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 },
767 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
770 static const struct nla_policy
771 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
772 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
773 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
774 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
775 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
776 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
779 static const struct nla_policy cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX] = {
780 [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8,
781 FLOW_DIS_CFM_MDL_MAX),
782 [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 },
785 static void fl_set_key_val(struct nlattr **tb,
786 void *val, int val_type,
787 void *mask, int mask_type, int len)
791 nla_memcpy(val, tb[val_type], len);
792 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
793 memset(mask, 0xff, len);
795 nla_memcpy(mask, tb[mask_type], len);
798 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
799 struct fl_flow_key *mask,
800 struct netlink_ext_ack *extack)
802 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
803 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
804 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
805 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
806 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
807 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
808 fl_set_key_val(tb, &key->tp_range.tp_min.src,
809 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
810 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
811 fl_set_key_val(tb, &key->tp_range.tp_max.src,
812 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
813 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
815 if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
816 NL_SET_ERR_MSG(extack,
817 "Both min and max destination ports must be specified");
820 if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
821 NL_SET_ERR_MSG(extack,
822 "Both min and max source ports must be specified");
825 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
826 ntohs(key->tp_range.tp_max.dst) <=
827 ntohs(key->tp_range.tp_min.dst)) {
828 NL_SET_ERR_MSG_ATTR(extack,
829 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
830 "Invalid destination port range (min must be strictly smaller than max)");
833 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
834 ntohs(key->tp_range.tp_max.src) <=
835 ntohs(key->tp_range.tp_min.src)) {
836 NL_SET_ERR_MSG_ATTR(extack,
837 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
838 "Invalid source port range (min must be strictly smaller than max)");
845 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
846 struct flow_dissector_key_mpls *key_val,
847 struct flow_dissector_key_mpls *key_mask,
848 struct netlink_ext_ack *extack)
850 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
851 struct flow_dissector_mpls_lse *lse_mask;
852 struct flow_dissector_mpls_lse *lse_val;
857 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
858 mpls_stack_entry_policy, extack);
862 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
863 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
867 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
869 /* LSE depth starts at 1, for consistency with terminology used by
870 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
872 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
873 NL_SET_ERR_MSG_ATTR(extack,
874 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
875 "Invalid MPLS depth");
878 lse_index = depth - 1;
880 dissector_set_mpls_lse(key_val, lse_index);
881 dissector_set_mpls_lse(key_mask, lse_index);
883 lse_val = &key_val->ls[lse_index];
884 lse_mask = &key_mask->ls[lse_index];
886 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
887 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
888 lse_mask->mpls_ttl = MPLS_TTL_MASK;
890 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
891 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
893 if (bos & ~MPLS_BOS_MASK) {
894 NL_SET_ERR_MSG_ATTR(extack,
895 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
896 "Bottom Of Stack (BOS) must be 0 or 1");
899 lse_val->mpls_bos = bos;
900 lse_mask->mpls_bos = MPLS_BOS_MASK;
902 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
903 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
905 if (tc & ~MPLS_TC_MASK) {
906 NL_SET_ERR_MSG_ATTR(extack,
907 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
908 "Traffic Class (TC) must be between 0 and 7");
911 lse_val->mpls_tc = tc;
912 lse_mask->mpls_tc = MPLS_TC_MASK;
914 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
915 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
917 if (label & ~MPLS_LABEL_MASK) {
918 NL_SET_ERR_MSG_ATTR(extack,
919 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
920 "Label must be between 0 and 1048575");
923 lse_val->mpls_label = label;
924 lse_mask->mpls_label = MPLS_LABEL_MASK;
930 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
931 struct flow_dissector_key_mpls *key_val,
932 struct flow_dissector_key_mpls *key_mask,
933 struct netlink_ext_ack *extack)
935 struct nlattr *nla_lse;
939 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
940 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
941 "NLA_F_NESTED is missing");
945 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
946 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
947 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
948 "Invalid MPLS option type");
952 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
957 NL_SET_ERR_MSG(extack,
958 "Bytes leftover after parsing MPLS options");
965 static int fl_set_key_mpls(struct nlattr **tb,
966 struct flow_dissector_key_mpls *key_val,
967 struct flow_dissector_key_mpls *key_mask,
968 struct netlink_ext_ack *extack)
970 struct flow_dissector_mpls_lse *lse_mask;
971 struct flow_dissector_mpls_lse *lse_val;
973 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
974 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
975 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
976 tb[TCA_FLOWER_KEY_MPLS_TC] ||
977 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
978 NL_SET_ERR_MSG_ATTR(extack,
979 tb[TCA_FLOWER_KEY_MPLS_OPTS],
980 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
984 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
985 key_val, key_mask, extack);
988 lse_val = &key_val->ls[0];
989 lse_mask = &key_mask->ls[0];
991 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
992 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
993 lse_mask->mpls_ttl = MPLS_TTL_MASK;
994 dissector_set_mpls_lse(key_val, 0);
995 dissector_set_mpls_lse(key_mask, 0);
997 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
998 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
1000 if (bos & ~MPLS_BOS_MASK) {
1001 NL_SET_ERR_MSG_ATTR(extack,
1002 tb[TCA_FLOWER_KEY_MPLS_BOS],
1003 "Bottom Of Stack (BOS) must be 0 or 1");
1006 lse_val->mpls_bos = bos;
1007 lse_mask->mpls_bos = MPLS_BOS_MASK;
1008 dissector_set_mpls_lse(key_val, 0);
1009 dissector_set_mpls_lse(key_mask, 0);
1011 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
1012 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
1014 if (tc & ~MPLS_TC_MASK) {
1015 NL_SET_ERR_MSG_ATTR(extack,
1016 tb[TCA_FLOWER_KEY_MPLS_TC],
1017 "Traffic Class (TC) must be between 0 and 7");
1020 lse_val->mpls_tc = tc;
1021 lse_mask->mpls_tc = MPLS_TC_MASK;
1022 dissector_set_mpls_lse(key_val, 0);
1023 dissector_set_mpls_lse(key_mask, 0);
1025 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1026 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
1028 if (label & ~MPLS_LABEL_MASK) {
1029 NL_SET_ERR_MSG_ATTR(extack,
1030 tb[TCA_FLOWER_KEY_MPLS_LABEL],
1031 "Label must be between 0 and 1048575");
1034 lse_val->mpls_label = label;
1035 lse_mask->mpls_label = MPLS_LABEL_MASK;
1036 dissector_set_mpls_lse(key_val, 0);
1037 dissector_set_mpls_lse(key_mask, 0);
1042 static void fl_set_key_vlan(struct nlattr **tb,
1044 int vlan_id_key, int vlan_prio_key,
1045 int vlan_next_eth_type_key,
1046 struct flow_dissector_key_vlan *key_val,
1047 struct flow_dissector_key_vlan *key_mask)
1049 #define VLAN_PRIORITY_MASK 0x7
1051 if (tb[vlan_id_key]) {
1053 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1054 key_mask->vlan_id = VLAN_VID_MASK;
1056 if (tb[vlan_prio_key]) {
1057 key_val->vlan_priority =
1058 nla_get_u8(tb[vlan_prio_key]) &
1060 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1063 key_val->vlan_tpid = ethertype;
1064 key_mask->vlan_tpid = cpu_to_be16(~0);
1066 if (tb[vlan_next_eth_type_key]) {
1067 key_val->vlan_eth_type =
1068 nla_get_be16(tb[vlan_next_eth_type_key]);
1069 key_mask->vlan_eth_type = cpu_to_be16(~0);
1073 static void fl_set_key_pppoe(struct nlattr **tb,
1074 struct flow_dissector_key_pppoe *key_val,
1075 struct flow_dissector_key_pppoe *key_mask,
1076 struct fl_flow_key *key,
1077 struct fl_flow_key *mask)
1079 /* key_val::type must be set to ETH_P_PPP_SES
1080 * because ETH_P_PPP_SES was stored in basic.n_proto
1081 * which might get overwritten by ppp_proto
1082 * or might be set to 0, the role of key_val::type
1083 * is similar to vlan_key::tpid
1085 key_val->type = htons(ETH_P_PPP_SES);
1086 key_mask->type = cpu_to_be16(~0);
1088 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1089 key_val->session_id =
1090 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1091 key_mask->session_id = cpu_to_be16(~0);
1093 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1094 key_val->ppp_proto =
1095 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1096 key_mask->ppp_proto = cpu_to_be16(~0);
1098 if (key_val->ppp_proto == htons(PPP_IP)) {
1099 key->basic.n_proto = htons(ETH_P_IP);
1100 mask->basic.n_proto = cpu_to_be16(~0);
1101 } else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1102 key->basic.n_proto = htons(ETH_P_IPV6);
1103 mask->basic.n_proto = cpu_to_be16(~0);
1104 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1105 key->basic.n_proto = htons(ETH_P_MPLS_UC);
1106 mask->basic.n_proto = cpu_to_be16(~0);
1107 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1108 key->basic.n_proto = htons(ETH_P_MPLS_MC);
1109 mask->basic.n_proto = cpu_to_be16(~0);
1112 key->basic.n_proto = 0;
1113 mask->basic.n_proto = cpu_to_be16(0);
1117 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1118 u32 *dissector_key, u32 *dissector_mask,
1119 u32 flower_flag_bit, u32 dissector_flag_bit)
1121 if (flower_mask & flower_flag_bit) {
1122 *dissector_mask |= dissector_flag_bit;
1123 if (flower_key & flower_flag_bit)
1124 *dissector_key |= dissector_flag_bit;
1128 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1129 u32 *flags_mask, struct netlink_ext_ack *extack)
1133 /* mask is mandatory for flags */
1134 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1135 NL_SET_ERR_MSG(extack, "Missing flags mask");
1139 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1140 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1145 fl_set_key_flag(key, mask, flags_key, flags_mask,
1146 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1147 fl_set_key_flag(key, mask, flags_key, flags_mask,
1148 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1149 FLOW_DIS_FIRST_FRAG);
1154 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1155 struct flow_dissector_key_ip *key,
1156 struct flow_dissector_key_ip *mask)
1158 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1159 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1160 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1161 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1163 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1164 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1167 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1168 int depth, int option_len,
1169 struct netlink_ext_ack *extack)
1171 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1172 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1173 struct geneve_opt *opt;
1174 int err, data_len = 0;
1176 if (option_len > sizeof(struct geneve_opt))
1177 data_len = option_len - sizeof(struct geneve_opt);
1179 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1182 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1183 memset(opt, 0xff, option_len);
1184 opt->length = data_len / 4;
1189 /* If no mask has been prodived we assume an exact match. */
1191 return sizeof(struct geneve_opt) + data_len;
1193 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1194 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1198 err = nla_parse_nested_deprecated(tb,
1199 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1200 nla, geneve_opt_policy, extack);
1204 /* We are not allowed to omit any of CLASS, TYPE or DATA
1205 * fields from the key.
1208 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1209 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1210 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1211 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1215 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1218 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1219 int new_len = key->enc_opts.len;
1221 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1222 data_len = nla_len(data);
1224 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1228 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1232 new_len += sizeof(struct geneve_opt) + data_len;
1233 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1234 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1235 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1238 opt->length = data_len / 4;
1239 memcpy(opt->opt_data, nla_data(data), data_len);
1242 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1243 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1244 opt->opt_class = nla_get_be16(class);
1247 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1248 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1249 opt->type = nla_get_u8(type);
1252 return sizeof(struct geneve_opt) + data_len;
1255 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1256 int depth, int option_len,
1257 struct netlink_ext_ack *extack)
1259 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1260 struct vxlan_metadata *md;
1263 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1264 memset(md, 0xff, sizeof(*md));
1269 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1270 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1274 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1275 vxlan_opt_policy, extack);
1279 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1280 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1284 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1285 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1286 md->gbp &= VXLAN_GBP_MASK;
1292 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1293 int depth, int option_len,
1294 struct netlink_ext_ack *extack)
1296 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1297 struct erspan_metadata *md;
1300 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1301 memset(md, 0xff, sizeof(*md));
1307 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1308 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1312 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1313 erspan_opt_policy, extack);
1317 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1318 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1322 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1323 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1325 if (md->version == 1) {
1326 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1327 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1330 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1331 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1332 memset(&md->u, 0x00, sizeof(md->u));
1333 md->u.index = nla_get_be32(nla);
1335 } else if (md->version == 2) {
1336 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1337 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1338 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1341 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1342 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1343 md->u.md2.dir = nla_get_u8(nla);
1345 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1346 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1347 set_hwid(&md->u.md2, nla_get_u8(nla));
1350 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1357 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1358 int depth, int option_len,
1359 struct netlink_ext_ack *extack)
1361 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1362 struct gtp_pdu_session_info *sinfo;
1363 u8 len = key->enc_opts.len;
1366 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1367 memset(sinfo, 0xff, option_len);
1370 return sizeof(*sinfo);
1372 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1373 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1377 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1378 gtp_opt_policy, extack);
1383 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1384 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1385 NL_SET_ERR_MSG_MOD(extack,
1386 "Missing tunnel key gtp option pdu type or qfi");
1390 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1392 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1394 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1395 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1397 return sizeof(*sinfo);
1400 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1401 struct fl_flow_key *mask,
1402 struct netlink_ext_ack *extack)
1404 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1405 int err, option_len, key_depth, msk_depth = 0;
1407 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1408 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1409 enc_opts_policy, extack);
1413 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1415 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1416 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1417 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1418 enc_opts_policy, extack);
1422 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1423 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1424 if (!nla_ok(nla_opt_msk, msk_depth)) {
1425 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1430 nla_for_each_attr(nla_opt_key, nla_enc_key,
1431 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1432 switch (nla_type(nla_opt_key)) {
1433 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1434 if (key->enc_opts.dst_opt_type &&
1435 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1436 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1440 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1441 option_len = fl_set_geneve_opt(nla_opt_key, key,
1442 key_depth, option_len,
1447 key->enc_opts.len += option_len;
1448 /* At the same time we need to parse through the mask
1449 * in order to verify exact and mask attribute lengths.
1451 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1452 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1453 msk_depth, option_len,
1458 mask->enc_opts.len += option_len;
1459 if (key->enc_opts.len != mask->enc_opts.len) {
1460 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1464 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1465 if (key->enc_opts.dst_opt_type) {
1466 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1470 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1471 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1472 key_depth, option_len,
1477 key->enc_opts.len += option_len;
1478 /* At the same time we need to parse through the mask
1479 * in order to verify exact and mask attribute lengths.
1481 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1482 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1483 msk_depth, option_len,
1488 mask->enc_opts.len += option_len;
1489 if (key->enc_opts.len != mask->enc_opts.len) {
1490 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1494 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1495 if (key->enc_opts.dst_opt_type) {
1496 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1500 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1501 option_len = fl_set_erspan_opt(nla_opt_key, key,
1502 key_depth, option_len,
1507 key->enc_opts.len += option_len;
1508 /* At the same time we need to parse through the mask
1509 * in order to verify exact and mask attribute lengths.
1511 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1512 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1513 msk_depth, option_len,
1518 mask->enc_opts.len += option_len;
1519 if (key->enc_opts.len != mask->enc_opts.len) {
1520 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1524 case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1525 if (key->enc_opts.dst_opt_type) {
1526 NL_SET_ERR_MSG_MOD(extack,
1527 "Duplicate type for gtp options");
1531 key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1532 option_len = fl_set_gtp_opt(nla_opt_key, key,
1533 key_depth, option_len,
1538 key->enc_opts.len += option_len;
1539 /* At the same time we need to parse through the mask
1540 * in order to verify exact and mask attribute lengths.
1542 mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1543 option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1544 msk_depth, option_len,
1549 mask->enc_opts.len += option_len;
1550 if (key->enc_opts.len != mask->enc_opts.len) {
1551 NL_SET_ERR_MSG_MOD(extack,
1552 "Key and mask miss aligned");
1557 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1564 if (!nla_ok(nla_opt_msk, msk_depth)) {
1565 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1568 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1574 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1575 struct netlink_ext_ack *extack)
1577 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1578 NL_SET_ERR_MSG_ATTR(extack, tb,
1579 "no trk, so no other flag can be set");
1583 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1584 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1585 NL_SET_ERR_MSG_ATTR(extack, tb,
1586 "new and est are mutually exclusive");
1590 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1591 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1592 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1593 NL_SET_ERR_MSG_ATTR(extack, tb,
1594 "when inv is set, only trk may be set");
1598 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1599 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1600 NL_SET_ERR_MSG_ATTR(extack, tb,
1601 "new and rpl are mutually exclusive");
1608 static int fl_set_key_ct(struct nlattr **tb,
1609 struct flow_dissector_key_ct *key,
1610 struct flow_dissector_key_ct *mask,
1611 struct netlink_ext_ack *extack)
1613 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1616 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1617 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1620 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1621 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1622 sizeof(key->ct_state));
1624 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1625 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1631 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1632 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1633 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1636 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1637 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1638 sizeof(key->ct_zone));
1640 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1641 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1642 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1645 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1646 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1647 sizeof(key->ct_mark));
1649 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1650 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1651 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1654 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1655 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1656 sizeof(key->ct_labels));
1662 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1663 struct fl_flow_key *key, struct fl_flow_key *mask,
1666 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1670 return good_num_of_vlans;
1673 *ethertype = nla_get_be16(tb);
1674 if (good_num_of_vlans || eth_type_vlan(*ethertype))
1677 key->basic.n_proto = *ethertype;
1678 mask->basic.n_proto = cpu_to_be16(~0);
1682 static void fl_set_key_cfm_md_level(struct nlattr **tb,
1683 struct fl_flow_key *key,
1684 struct fl_flow_key *mask,
1685 struct netlink_ext_ack *extack)
1689 if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL])
1692 level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]);
1693 key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level);
1694 mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK;
1697 static void fl_set_key_cfm_opcode(struct nlattr **tb,
1698 struct fl_flow_key *key,
1699 struct fl_flow_key *mask,
1700 struct netlink_ext_ack *extack)
1702 fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE,
1703 &mask->cfm.opcode, TCA_FLOWER_UNSPEC,
1704 sizeof(key->cfm.opcode));
1707 static int fl_set_key_cfm(struct nlattr **tb,
1708 struct fl_flow_key *key,
1709 struct fl_flow_key *mask,
1710 struct netlink_ext_ack *extack)
1712 struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX];
1715 if (!tb[TCA_FLOWER_KEY_CFM])
1718 err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX,
1719 tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack);
1723 fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack);
1724 fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack);
1729 static int fl_set_key(struct net *net, struct nlattr **tb,
1730 struct fl_flow_key *key, struct fl_flow_key *mask,
1731 struct netlink_ext_ack *extack)
1736 if (tb[TCA_FLOWER_INDEV]) {
1737 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1740 key->meta.ingress_ifindex = err;
1741 mask->meta.ingress_ifindex = 0xffffffff;
1744 fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS,
1745 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC,
1746 sizeof(key->meta.l2_miss));
1748 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1749 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1750 sizeof(key->eth.dst));
1751 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1752 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1753 sizeof(key->eth.src));
1754 fl_set_key_val(tb, &key->num_of_vlans,
1755 TCA_FLOWER_KEY_NUM_OF_VLANS,
1756 &mask->num_of_vlans,
1758 sizeof(key->num_of_vlans));
1760 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) {
1761 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1762 TCA_FLOWER_KEY_VLAN_PRIO,
1763 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1764 &key->vlan, &mask->vlan);
1766 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1767 ðertype, key, mask, 1)) {
1768 fl_set_key_vlan(tb, ethertype,
1769 TCA_FLOWER_KEY_CVLAN_ID,
1770 TCA_FLOWER_KEY_CVLAN_PRIO,
1771 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1772 &key->cvlan, &mask->cvlan);
1773 fl_set_key_val(tb, &key->basic.n_proto,
1774 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1775 &mask->basic.n_proto,
1777 sizeof(key->basic.n_proto));
1781 if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1782 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1784 if (key->basic.n_proto == htons(ETH_P_IP) ||
1785 key->basic.n_proto == htons(ETH_P_IPV6)) {
1786 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1787 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1788 sizeof(key->basic.ip_proto));
1789 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1792 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1793 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1794 mask->control.addr_type = ~0;
1795 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1796 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1797 sizeof(key->ipv4.src));
1798 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1799 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1800 sizeof(key->ipv4.dst));
1801 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1802 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1803 mask->control.addr_type = ~0;
1804 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1805 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1806 sizeof(key->ipv6.src));
1807 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1808 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1809 sizeof(key->ipv6.dst));
1812 if (key->basic.ip_proto == IPPROTO_TCP) {
1813 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1814 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1815 sizeof(key->tp.src));
1816 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1817 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1818 sizeof(key->tp.dst));
1819 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1820 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1821 sizeof(key->tcp.flags));
1822 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1823 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1824 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1825 sizeof(key->tp.src));
1826 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1827 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1828 sizeof(key->tp.dst));
1829 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1830 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1831 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1832 sizeof(key->tp.src));
1833 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1834 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1835 sizeof(key->tp.dst));
1836 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1837 key->basic.ip_proto == IPPROTO_ICMP) {
1838 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1840 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1841 sizeof(key->icmp.type));
1842 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1844 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1845 sizeof(key->icmp.code));
1846 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1847 key->basic.ip_proto == IPPROTO_ICMPV6) {
1848 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1850 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1851 sizeof(key->icmp.type));
1852 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1854 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1855 sizeof(key->icmp.code));
1856 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1857 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1858 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1861 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1862 key->basic.n_proto == htons(ETH_P_RARP)) {
1863 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1864 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1865 sizeof(key->arp.sip));
1866 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1867 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1868 sizeof(key->arp.tip));
1869 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1870 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1871 sizeof(key->arp.op));
1872 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1873 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1874 sizeof(key->arp.sha));
1875 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1876 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1877 sizeof(key->arp.tha));
1878 } else if (key->basic.ip_proto == IPPROTO_L2TP) {
1879 fl_set_key_val(tb, &key->l2tpv3.session_id,
1880 TCA_FLOWER_KEY_L2TPV3_SID,
1881 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
1882 sizeof(key->l2tpv3.session_id));
1883 } else if (key->basic.n_proto == htons(ETH_P_CFM)) {
1884 ret = fl_set_key_cfm(tb, key, mask, extack);
1889 if (key->basic.ip_proto == IPPROTO_TCP ||
1890 key->basic.ip_proto == IPPROTO_UDP ||
1891 key->basic.ip_proto == IPPROTO_SCTP) {
1892 ret = fl_set_key_port_range(tb, key, mask, extack);
1897 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1898 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1899 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1900 mask->enc_control.addr_type = ~0;
1901 fl_set_key_val(tb, &key->enc_ipv4.src,
1902 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1903 &mask->enc_ipv4.src,
1904 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1905 sizeof(key->enc_ipv4.src));
1906 fl_set_key_val(tb, &key->enc_ipv4.dst,
1907 TCA_FLOWER_KEY_ENC_IPV4_DST,
1908 &mask->enc_ipv4.dst,
1909 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1910 sizeof(key->enc_ipv4.dst));
1913 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1914 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1915 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1916 mask->enc_control.addr_type = ~0;
1917 fl_set_key_val(tb, &key->enc_ipv6.src,
1918 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1919 &mask->enc_ipv6.src,
1920 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1921 sizeof(key->enc_ipv6.src));
1922 fl_set_key_val(tb, &key->enc_ipv6.dst,
1923 TCA_FLOWER_KEY_ENC_IPV6_DST,
1924 &mask->enc_ipv6.dst,
1925 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1926 sizeof(key->enc_ipv6.dst));
1929 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1930 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1931 sizeof(key->enc_key_id.keyid));
1933 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1934 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1935 sizeof(key->enc_tp.src));
1937 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1938 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1939 sizeof(key->enc_tp.dst));
1941 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1943 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1944 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1945 sizeof(key->hash.hash));
1947 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1948 ret = fl_set_enc_opt(tb, key, mask, extack);
1953 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1957 if (tb[TCA_FLOWER_KEY_FLAGS])
1958 ret = fl_set_key_flags(tb, &key->control.flags,
1959 &mask->control.flags, extack);
1964 static void fl_mask_copy(struct fl_flow_mask *dst,
1965 struct fl_flow_mask *src)
1967 const void *psrc = fl_key_get_start(&src->key, src);
1968 void *pdst = fl_key_get_start(&dst->key, src);
1970 memcpy(pdst, psrc, fl_mask_range(src));
1971 dst->range = src->range;
1974 static const struct rhashtable_params fl_ht_params = {
1975 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1976 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1977 .automatic_shrinking = true,
1980 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1982 mask->filter_ht_params = fl_ht_params;
1983 mask->filter_ht_params.key_len = fl_mask_range(mask);
1984 mask->filter_ht_params.key_offset += mask->range.start;
1986 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1989 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1990 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1992 #define FL_KEY_IS_MASKED(mask, member) \
1993 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1994 0, FL_KEY_MEMBER_SIZE(member)) \
1996 #define FL_KEY_SET(keys, cnt, id, member) \
1998 keys[cnt].key_id = id; \
1999 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
2003 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
2005 if (FL_KEY_IS_MASKED(mask, member)) \
2006 FL_KEY_SET(keys, cnt, id, member); \
2009 static void fl_init_dissector(struct flow_dissector *dissector,
2010 struct fl_flow_key *mask)
2012 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
2015 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2016 FLOW_DISSECTOR_KEY_META, meta);
2017 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
2018 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
2019 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2020 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
2021 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2022 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
2023 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2024 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
2025 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2026 FLOW_DISSECTOR_KEY_PORTS, tp);
2027 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2028 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
2029 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2030 FLOW_DISSECTOR_KEY_IP, ip);
2031 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2032 FLOW_DISSECTOR_KEY_TCP, tcp);
2033 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2034 FLOW_DISSECTOR_KEY_ICMP, icmp);
2035 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2036 FLOW_DISSECTOR_KEY_ARP, arp);
2037 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2038 FLOW_DISSECTOR_KEY_MPLS, mpls);
2039 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2040 FLOW_DISSECTOR_KEY_VLAN, vlan);
2041 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2042 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
2043 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2044 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
2045 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2046 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
2047 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2048 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
2049 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
2050 FL_KEY_IS_MASKED(mask, enc_ipv6))
2051 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2053 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2054 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
2055 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2056 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
2057 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2058 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
2059 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2060 FLOW_DISSECTOR_KEY_CT, ct);
2061 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2062 FLOW_DISSECTOR_KEY_HASH, hash);
2063 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2064 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
2065 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2066 FLOW_DISSECTOR_KEY_PPPOE, pppoe);
2067 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2068 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
2069 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2070 FLOW_DISSECTOR_KEY_CFM, cfm);
2072 skb_flow_dissector_init(dissector, keys, cnt);
2075 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
2076 struct fl_flow_mask *mask)
2078 struct fl_flow_mask *newmask;
2081 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
2083 return ERR_PTR(-ENOMEM);
2085 fl_mask_copy(newmask, mask);
2087 if ((newmask->key.tp_range.tp_min.dst &&
2088 newmask->key.tp_range.tp_max.dst) ||
2089 (newmask->key.tp_range.tp_min.src &&
2090 newmask->key.tp_range.tp_max.src))
2091 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2093 err = fl_init_mask_hashtable(newmask);
2097 fl_init_dissector(&newmask->dissector, &newmask->key);
2099 INIT_LIST_HEAD_RCU(&newmask->filters);
2101 refcount_set(&newmask->refcnt, 1);
2102 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2103 &newmask->ht_node, mask_ht_params);
2105 goto errout_destroy;
2107 spin_lock(&head->masks_lock);
2108 list_add_tail_rcu(&newmask->list, &head->masks);
2109 spin_unlock(&head->masks_lock);
2114 rhashtable_destroy(&newmask->ht);
2118 return ERR_PTR(err);
2121 static int fl_check_assign_mask(struct cls_fl_head *head,
2122 struct cls_fl_filter *fnew,
2123 struct cls_fl_filter *fold,
2124 struct fl_flow_mask *mask)
2126 struct fl_flow_mask *newmask;
2131 /* Insert mask as temporary node to prevent concurrent creation of mask
2132 * with same key. Any concurrent lookups with same key will return
2133 * -EAGAIN because mask's refcnt is zero.
2135 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2143 goto errout_cleanup;
2146 newmask = fl_create_new_mask(head, mask);
2147 if (IS_ERR(newmask)) {
2148 ret = PTR_ERR(newmask);
2149 goto errout_cleanup;
2152 fnew->mask = newmask;
2154 } else if (IS_ERR(fnew->mask)) {
2155 ret = PTR_ERR(fnew->mask);
2156 } else if (fold && fold->mask != fnew->mask) {
2158 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2159 /* Mask was deleted concurrently, try again */
2166 rhashtable_remove_fast(&head->ht, &mask->ht_node,
2171 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
2173 return mask->meta.l2_miss;
2176 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2177 struct cls_fl_filter *f, struct fl_flow_mask *mask,
2178 unsigned long base, struct nlattr **tb,
2180 struct fl_flow_tmplt *tmplt,
2181 u32 flags, u32 fl_flags,
2182 struct netlink_ext_ack *extack)
2186 err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2191 if (tb[TCA_FLOWER_CLASSID]) {
2192 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2193 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2195 tcf_bind_filter(tp, &f->res, base);
2196 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2200 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2204 fl_mask_update_range(mask);
2205 fl_set_masked_key(&f->mkey, &f->key, mask);
2207 if (!fl_mask_fits_tmplt(tmplt, mask)) {
2208 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2212 /* Enable tc skb extension if filter matches on data extracted from
2215 if (fl_needs_tc_skb_ext(&mask->key)) {
2216 f->needs_tc_skb_ext = 1;
2217 tc_skb_ext_tc_enable();
2223 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2224 struct cls_fl_filter *fold,
2227 struct fl_flow_mask *mask = fnew->mask;
2230 err = rhashtable_lookup_insert_fast(&mask->ht,
2232 mask->filter_ht_params);
2235 /* It is okay if filter with same key exists when
2238 return fold && err == -EEXIST ? 0 : err;
2245 static int fl_change(struct net *net, struct sk_buff *in_skb,
2246 struct tcf_proto *tp, unsigned long base,
2247 u32 handle, struct nlattr **tca,
2248 void **arg, u32 flags,
2249 struct netlink_ext_ack *extack)
2251 struct cls_fl_head *head = fl_head_dereference(tp);
2252 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2253 struct cls_fl_filter *fold = *arg;
2254 struct cls_fl_filter *fnew;
2255 struct fl_flow_mask *mask;
2260 if (!tca[TCA_OPTIONS]) {
2265 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2271 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2274 goto errout_mask_alloc;
2277 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2278 tca[TCA_OPTIONS], fl_policy, NULL);
2282 if (fold && handle && fold->handle != handle) {
2287 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2292 INIT_LIST_HEAD(&fnew->hw_list);
2293 refcount_set(&fnew->refcnt, 1);
2295 if (tb[TCA_FLOWER_FLAGS]) {
2296 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2298 if (!tc_flags_valid(fnew->flags)) {
2306 spin_lock(&tp->lock);
2309 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2310 INT_MAX, GFP_ATOMIC);
2312 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2313 handle, GFP_ATOMIC);
2315 /* Filter with specified handle was concurrently
2316 * inserted after initial check in cls_api. This is not
2317 * necessarily an error if NLM_F_EXCL is not set in
2318 * message flags. Returning EAGAIN will cause cls_api to
2319 * try to update concurrently inserted rule.
2324 spin_unlock(&tp->lock);
2331 fnew->handle = handle;
2333 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
2334 !tc_skip_hw(fnew->flags));
2338 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2339 tp->chain->tmplt_priv, flags, fnew->flags,
2344 err = fl_check_assign_mask(head, fnew, fold, mask);
2348 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2352 if (!tc_skip_hw(fnew->flags)) {
2353 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2358 if (!tc_in_hw(fnew->flags))
2359 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2361 spin_lock(&tp->lock);
2363 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2364 * proto again or create new one, if necessary.
2372 /* Fold filter was deleted concurrently. Retry lookup. */
2373 if (fold->deleted) {
2378 fnew->handle = handle;
2381 struct rhashtable_params params =
2382 fnew->mask->filter_ht_params;
2384 err = rhashtable_insert_fast(&fnew->mask->ht,
2392 refcount_inc(&fnew->refcnt);
2393 rhashtable_remove_fast(&fold->mask->ht,
2395 fold->mask->filter_ht_params);
2396 idr_replace(&head->handle_idr, fnew, fnew->handle);
2397 list_replace_rcu(&fold->list, &fnew->list);
2398 fold->deleted = true;
2400 spin_unlock(&tp->lock);
2402 fl_mask_put(head, fold->mask);
2403 if (!tc_skip_hw(fold->flags))
2404 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2405 tcf_unbind_filter(tp, &fold->res);
2406 /* Caller holds reference to fold, so refcnt is always > 0
2409 refcount_dec(&fold->refcnt);
2412 idr_replace(&head->handle_idr, fnew, fnew->handle);
2414 refcount_inc(&fnew->refcnt);
2415 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2416 spin_unlock(&tp->lock);
2422 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2426 spin_lock(&tp->lock);
2428 fnew->deleted = true;
2429 spin_unlock(&tp->lock);
2430 if (!tc_skip_hw(fnew->flags))
2431 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2433 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2434 fnew->mask->filter_ht_params);
2436 fl_mask_put(head, fnew->mask);
2439 idr_remove(&head->handle_idr, fnew->handle);
2444 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2451 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2452 bool rtnl_held, struct netlink_ext_ack *extack)
2454 struct cls_fl_head *head = fl_head_dereference(tp);
2455 struct cls_fl_filter *f = arg;
2459 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2460 *last = list_empty(&head->masks);
2466 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2469 struct cls_fl_head *head = fl_head_dereference(tp);
2470 unsigned long id = arg->cookie, tmp;
2471 struct cls_fl_filter *f;
2473 arg->count = arg->skip;
2476 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2477 /* don't return filters that are being deleted */
2478 if (!f || !refcount_inc_not_zero(&f->refcnt))
2482 if (arg->fn(tp, f, arg) < 0) {
2496 static struct cls_fl_filter *
2497 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2499 struct cls_fl_head *head = fl_head_dereference(tp);
2501 spin_lock(&tp->lock);
2502 if (list_empty(&head->hw_filters)) {
2503 spin_unlock(&tp->lock);
2508 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2510 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2511 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2512 spin_unlock(&tp->lock);
2517 spin_unlock(&tp->lock);
2521 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2522 void *cb_priv, struct netlink_ext_ack *extack)
2524 struct tcf_block *block = tp->chain->block;
2525 struct flow_cls_offload cls_flower = {};
2526 struct cls_fl_filter *f = NULL;
2529 /* hw_filters list can only be changed by hw offload functions after
2530 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2535 while ((f = fl_get_next_hw_filter(tp, f, add))) {
2537 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2538 if (!cls_flower.rule) {
2543 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2545 cls_flower.command = add ?
2546 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2547 cls_flower.cookie = (unsigned long)f;
2548 cls_flower.rule->match.dissector = &f->mask->dissector;
2549 cls_flower.rule->match.mask = &f->mask->key;
2550 cls_flower.rule->match.key = &f->mkey;
2552 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2553 cls_flower.common.extack);
2555 kfree(cls_flower.rule);
2556 if (tc_skip_sw(f->flags)) {
2563 cls_flower.classid = f->res.classid;
2565 err = tc_setup_cb_reoffload(block, tp, add, cb,
2566 TC_SETUP_CLSFLOWER, &cls_flower,
2569 tc_cleanup_offload_action(&cls_flower.rule->action);
2570 kfree(cls_flower.rule);
2583 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2585 struct flow_cls_offload *cls_flower = type_data;
2586 struct cls_fl_filter *f =
2587 (struct cls_fl_filter *) cls_flower->cookie;
2588 struct cls_fl_head *head = fl_head_dereference(tp);
2590 spin_lock(&tp->lock);
2591 list_add(&f->hw_list, &head->hw_filters);
2592 spin_unlock(&tp->lock);
2595 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2597 struct flow_cls_offload *cls_flower = type_data;
2598 struct cls_fl_filter *f =
2599 (struct cls_fl_filter *) cls_flower->cookie;
2601 spin_lock(&tp->lock);
2602 if (!list_empty(&f->hw_list))
2603 list_del_init(&f->hw_list);
2604 spin_unlock(&tp->lock);
2607 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2608 struct fl_flow_tmplt *tmplt)
2610 struct flow_cls_offload cls_flower = {};
2611 struct tcf_block *block = chain->block;
2613 cls_flower.rule = flow_rule_alloc(0);
2614 if (!cls_flower.rule)
2617 cls_flower.common.chain_index = chain->index;
2618 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2619 cls_flower.cookie = (unsigned long) tmplt;
2620 cls_flower.rule->match.dissector = &tmplt->dissector;
2621 cls_flower.rule->match.mask = &tmplt->mask;
2622 cls_flower.rule->match.key = &tmplt->dummy_key;
2624 /* We don't care if driver (any of them) fails to handle this
2625 * call. It serves just as a hint for it.
2627 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2628 kfree(cls_flower.rule);
2633 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2634 struct fl_flow_tmplt *tmplt)
2636 struct flow_cls_offload cls_flower = {};
2637 struct tcf_block *block = chain->block;
2639 cls_flower.common.chain_index = chain->index;
2640 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2641 cls_flower.cookie = (unsigned long) tmplt;
2643 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2646 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2647 struct nlattr **tca,
2648 struct netlink_ext_ack *extack)
2650 struct fl_flow_tmplt *tmplt;
2654 if (!tca[TCA_OPTIONS])
2655 return ERR_PTR(-EINVAL);
2657 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2659 return ERR_PTR(-ENOBUFS);
2660 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2661 tca[TCA_OPTIONS], fl_policy, NULL);
2665 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2670 tmplt->chain = chain;
2671 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2675 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2677 err = fl_hw_create_tmplt(chain, tmplt);
2688 return ERR_PTR(err);
2691 static void fl_tmplt_destroy(void *tmplt_priv)
2693 struct fl_flow_tmplt *tmplt = tmplt_priv;
2695 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2699 static int fl_dump_key_val(struct sk_buff *skb,
2700 void *val, int val_type,
2701 void *mask, int mask_type, int len)
2705 if (!memchr_inv(mask, 0, len))
2707 err = nla_put(skb, val_type, len, val);
2710 if (mask_type != TCA_FLOWER_UNSPEC) {
2711 err = nla_put(skb, mask_type, len, mask);
2718 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2719 struct fl_flow_key *mask)
2721 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2722 TCA_FLOWER_KEY_PORT_DST_MIN,
2723 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2724 sizeof(key->tp_range.tp_min.dst)) ||
2725 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2726 TCA_FLOWER_KEY_PORT_DST_MAX,
2727 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2728 sizeof(key->tp_range.tp_max.dst)) ||
2729 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2730 TCA_FLOWER_KEY_PORT_SRC_MIN,
2731 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2732 sizeof(key->tp_range.tp_min.src)) ||
2733 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2734 TCA_FLOWER_KEY_PORT_SRC_MAX,
2735 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2736 sizeof(key->tp_range.tp_max.src)))
2742 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2743 struct flow_dissector_key_mpls *mpls_key,
2744 struct flow_dissector_key_mpls *mpls_mask,
2747 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2748 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2751 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2756 if (lse_mask->mpls_ttl) {
2757 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2762 if (lse_mask->mpls_bos) {
2763 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2768 if (lse_mask->mpls_tc) {
2769 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2774 if (lse_mask->mpls_label) {
2775 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2776 lse_key->mpls_label);
2784 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2785 struct flow_dissector_key_mpls *mpls_key,
2786 struct flow_dissector_key_mpls *mpls_mask)
2788 struct nlattr *opts;
2793 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2797 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2798 if (!(mpls_mask->used_lses & 1 << lse_index))
2801 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2807 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2811 nla_nest_end(skb, lse);
2813 nla_nest_end(skb, opts);
2818 nla_nest_cancel(skb, lse);
2820 nla_nest_cancel(skb, opts);
2825 static int fl_dump_key_mpls(struct sk_buff *skb,
2826 struct flow_dissector_key_mpls *mpls_key,
2827 struct flow_dissector_key_mpls *mpls_mask)
2829 struct flow_dissector_mpls_lse *lse_mask;
2830 struct flow_dissector_mpls_lse *lse_key;
2833 if (!mpls_mask->used_lses)
2836 lse_mask = &mpls_mask->ls[0];
2837 lse_key = &mpls_key->ls[0];
2839 /* For backward compatibility, don't use the MPLS nested attributes if
2840 * the rule can be expressed using the old attributes.
2842 if (mpls_mask->used_lses & ~1 ||
2843 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2844 !lse_mask->mpls_tc && !lse_mask->mpls_label))
2845 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2847 if (lse_mask->mpls_ttl) {
2848 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2853 if (lse_mask->mpls_tc) {
2854 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2859 if (lse_mask->mpls_label) {
2860 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2861 lse_key->mpls_label);
2865 if (lse_mask->mpls_bos) {
2866 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2874 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2875 struct flow_dissector_key_ip *key,
2876 struct flow_dissector_key_ip *mask)
2878 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2879 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2880 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2881 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2883 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2884 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2890 static int fl_dump_key_vlan(struct sk_buff *skb,
2891 int vlan_id_key, int vlan_prio_key,
2892 struct flow_dissector_key_vlan *vlan_key,
2893 struct flow_dissector_key_vlan *vlan_mask)
2897 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2899 if (vlan_mask->vlan_id) {
2900 err = nla_put_u16(skb, vlan_id_key,
2905 if (vlan_mask->vlan_priority) {
2906 err = nla_put_u8(skb, vlan_prio_key,
2907 vlan_key->vlan_priority);
2914 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2915 u32 *flower_key, u32 *flower_mask,
2916 u32 flower_flag_bit, u32 dissector_flag_bit)
2918 if (dissector_mask & dissector_flag_bit) {
2919 *flower_mask |= flower_flag_bit;
2920 if (dissector_key & dissector_flag_bit)
2921 *flower_key |= flower_flag_bit;
2925 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2931 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2937 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2938 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2939 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2940 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2941 FLOW_DIS_FIRST_FRAG);
2943 _key = cpu_to_be32(key);
2944 _mask = cpu_to_be32(mask);
2946 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2950 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2953 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2954 struct flow_dissector_key_enc_opts *enc_opts)
2956 struct geneve_opt *opt;
2957 struct nlattr *nest;
2960 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2962 goto nla_put_failure;
2964 while (enc_opts->len > opt_off) {
2965 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2967 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2969 goto nla_put_failure;
2970 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2972 goto nla_put_failure;
2973 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2974 opt->length * 4, opt->opt_data))
2975 goto nla_put_failure;
2977 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2979 nla_nest_end(skb, nest);
2983 nla_nest_cancel(skb, nest);
2987 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2988 struct flow_dissector_key_enc_opts *enc_opts)
2990 struct vxlan_metadata *md;
2991 struct nlattr *nest;
2993 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2995 goto nla_put_failure;
2997 md = (struct vxlan_metadata *)&enc_opts->data[0];
2998 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2999 goto nla_put_failure;
3001 nla_nest_end(skb, nest);
3005 nla_nest_cancel(skb, nest);
3009 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
3010 struct flow_dissector_key_enc_opts *enc_opts)
3012 struct erspan_metadata *md;
3013 struct nlattr *nest;
3015 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
3017 goto nla_put_failure;
3019 md = (struct erspan_metadata *)&enc_opts->data[0];
3020 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
3021 goto nla_put_failure;
3023 if (md->version == 1 &&
3024 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
3025 goto nla_put_failure;
3027 if (md->version == 2 &&
3028 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
3030 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
3031 get_hwid(&md->u.md2))))
3032 goto nla_put_failure;
3034 nla_nest_end(skb, nest);
3038 nla_nest_cancel(skb, nest);
3042 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
3043 struct flow_dissector_key_enc_opts *enc_opts)
3046 struct gtp_pdu_session_info *session_info;
3047 struct nlattr *nest;
3049 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
3051 goto nla_put_failure;
3053 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
3055 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
3056 session_info->pdu_type))
3057 goto nla_put_failure;
3059 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
3060 goto nla_put_failure;
3062 nla_nest_end(skb, nest);
3066 nla_nest_cancel(skb, nest);
3070 static int fl_dump_key_ct(struct sk_buff *skb,
3071 struct flow_dissector_key_ct *key,
3072 struct flow_dissector_key_ct *mask)
3074 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
3075 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
3076 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
3077 sizeof(key->ct_state)))
3078 goto nla_put_failure;
3080 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
3081 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
3082 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
3083 sizeof(key->ct_zone)))
3084 goto nla_put_failure;
3086 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
3087 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
3088 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
3089 sizeof(key->ct_mark)))
3090 goto nla_put_failure;
3092 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
3093 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
3094 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
3095 sizeof(key->ct_labels)))
3096 goto nla_put_failure;
3104 static int fl_dump_key_cfm(struct sk_buff *skb,
3105 struct flow_dissector_key_cfm *key,
3106 struct flow_dissector_key_cfm *mask)
3108 struct nlattr *opts;
3112 if (!memchr_inv(mask, 0, sizeof(*mask)))
3115 opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM);
3119 if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) {
3120 mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver);
3121 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl);
3127 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode);
3132 nla_nest_end(skb, opts);
3137 nla_nest_cancel(skb, opts);
3141 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
3142 struct flow_dissector_key_enc_opts *enc_opts)
3144 struct nlattr *nest;
3150 nest = nla_nest_start_noflag(skb, enc_opt_type);
3152 goto nla_put_failure;
3154 switch (enc_opts->dst_opt_type) {
3155 case TUNNEL_GENEVE_OPT:
3156 err = fl_dump_key_geneve_opt(skb, enc_opts);
3158 goto nla_put_failure;
3160 case TUNNEL_VXLAN_OPT:
3161 err = fl_dump_key_vxlan_opt(skb, enc_opts);
3163 goto nla_put_failure;
3165 case TUNNEL_ERSPAN_OPT:
3166 err = fl_dump_key_erspan_opt(skb, enc_opts);
3168 goto nla_put_failure;
3170 case TUNNEL_GTP_OPT:
3171 err = fl_dump_key_gtp_opt(skb, enc_opts);
3173 goto nla_put_failure;
3176 goto nla_put_failure;
3178 nla_nest_end(skb, nest);
3182 nla_nest_cancel(skb, nest);
3186 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3187 struct flow_dissector_key_enc_opts *key_opts,
3188 struct flow_dissector_key_enc_opts *msk_opts)
3192 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3196 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3199 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3200 struct fl_flow_key *key, struct fl_flow_key *mask)
3202 if (mask->meta.ingress_ifindex) {
3203 struct net_device *dev;
3205 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3206 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3207 goto nla_put_failure;
3210 if (fl_dump_key_val(skb, &key->meta.l2_miss,
3211 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss,
3212 TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss)))
3213 goto nla_put_failure;
3215 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3216 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3217 sizeof(key->eth.dst)) ||
3218 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3219 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3220 sizeof(key->eth.src)) ||
3221 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3222 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3223 sizeof(key->basic.n_proto)))
3224 goto nla_put_failure;
3226 if (mask->num_of_vlans.num_of_vlans) {
3227 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3228 goto nla_put_failure;
3231 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3232 goto nla_put_failure;
3234 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3235 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3236 goto nla_put_failure;
3238 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3239 TCA_FLOWER_KEY_CVLAN_PRIO,
3240 &key->cvlan, &mask->cvlan) ||
3241 (mask->cvlan.vlan_tpid &&
3242 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3243 key->cvlan.vlan_tpid)))
3244 goto nla_put_failure;
3246 if (mask->basic.n_proto) {
3247 if (mask->cvlan.vlan_eth_type) {
3248 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3249 key->basic.n_proto))
3250 goto nla_put_failure;
3251 } else if (mask->vlan.vlan_eth_type) {
3252 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3253 key->vlan.vlan_eth_type))
3254 goto nla_put_failure;
3258 if ((key->basic.n_proto == htons(ETH_P_IP) ||
3259 key->basic.n_proto == htons(ETH_P_IPV6)) &&
3260 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3261 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3262 sizeof(key->basic.ip_proto)) ||
3263 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3264 goto nla_put_failure;
3266 if (mask->pppoe.session_id) {
3267 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3268 key->pppoe.session_id))
3269 goto nla_put_failure;
3271 if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3272 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3273 key->pppoe.ppp_proto))
3274 goto nla_put_failure;
3277 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3278 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3279 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3280 sizeof(key->ipv4.src)) ||
3281 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3282 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3283 sizeof(key->ipv4.dst))))
3284 goto nla_put_failure;
3285 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3286 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3287 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3288 sizeof(key->ipv6.src)) ||
3289 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3290 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3291 sizeof(key->ipv6.dst))))
3292 goto nla_put_failure;
3294 if (key->basic.ip_proto == IPPROTO_TCP &&
3295 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3296 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3297 sizeof(key->tp.src)) ||
3298 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3299 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3300 sizeof(key->tp.dst)) ||
3301 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3302 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3303 sizeof(key->tcp.flags))))
3304 goto nla_put_failure;
3305 else if (key->basic.ip_proto == IPPROTO_UDP &&
3306 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3307 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3308 sizeof(key->tp.src)) ||
3309 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3310 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3311 sizeof(key->tp.dst))))
3312 goto nla_put_failure;
3313 else if (key->basic.ip_proto == IPPROTO_SCTP &&
3314 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3315 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3316 sizeof(key->tp.src)) ||
3317 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3318 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3319 sizeof(key->tp.dst))))
3320 goto nla_put_failure;
3321 else if (key->basic.n_proto == htons(ETH_P_IP) &&
3322 key->basic.ip_proto == IPPROTO_ICMP &&
3323 (fl_dump_key_val(skb, &key->icmp.type,
3324 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3325 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3326 sizeof(key->icmp.type)) ||
3327 fl_dump_key_val(skb, &key->icmp.code,
3328 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3329 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3330 sizeof(key->icmp.code))))
3331 goto nla_put_failure;
3332 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3333 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3334 (fl_dump_key_val(skb, &key->icmp.type,
3335 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3336 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3337 sizeof(key->icmp.type)) ||
3338 fl_dump_key_val(skb, &key->icmp.code,
3339 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3340 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3341 sizeof(key->icmp.code))))
3342 goto nla_put_failure;
3343 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3344 key->basic.n_proto == htons(ETH_P_RARP)) &&
3345 (fl_dump_key_val(skb, &key->arp.sip,
3346 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3347 TCA_FLOWER_KEY_ARP_SIP_MASK,
3348 sizeof(key->arp.sip)) ||
3349 fl_dump_key_val(skb, &key->arp.tip,
3350 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3351 TCA_FLOWER_KEY_ARP_TIP_MASK,
3352 sizeof(key->arp.tip)) ||
3353 fl_dump_key_val(skb, &key->arp.op,
3354 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3355 TCA_FLOWER_KEY_ARP_OP_MASK,
3356 sizeof(key->arp.op)) ||
3357 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3358 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3359 sizeof(key->arp.sha)) ||
3360 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3361 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3362 sizeof(key->arp.tha))))
3363 goto nla_put_failure;
3364 else if (key->basic.ip_proto == IPPROTO_L2TP &&
3365 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3366 TCA_FLOWER_KEY_L2TPV3_SID,
3367 &mask->l2tpv3.session_id,
3369 sizeof(key->l2tpv3.session_id)))
3370 goto nla_put_failure;
3372 if ((key->basic.ip_proto == IPPROTO_TCP ||
3373 key->basic.ip_proto == IPPROTO_UDP ||
3374 key->basic.ip_proto == IPPROTO_SCTP) &&
3375 fl_dump_key_port_range(skb, key, mask))
3376 goto nla_put_failure;
3378 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3379 (fl_dump_key_val(skb, &key->enc_ipv4.src,
3380 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3381 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3382 sizeof(key->enc_ipv4.src)) ||
3383 fl_dump_key_val(skb, &key->enc_ipv4.dst,
3384 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3385 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3386 sizeof(key->enc_ipv4.dst))))
3387 goto nla_put_failure;
3388 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3389 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3390 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3391 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3392 sizeof(key->enc_ipv6.src)) ||
3393 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3394 TCA_FLOWER_KEY_ENC_IPV6_DST,
3395 &mask->enc_ipv6.dst,
3396 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3397 sizeof(key->enc_ipv6.dst))))
3398 goto nla_put_failure;
3400 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3401 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3402 sizeof(key->enc_key_id)) ||
3403 fl_dump_key_val(skb, &key->enc_tp.src,
3404 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3406 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3407 sizeof(key->enc_tp.src)) ||
3408 fl_dump_key_val(skb, &key->enc_tp.dst,
3409 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3411 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3412 sizeof(key->enc_tp.dst)) ||
3413 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3414 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3415 goto nla_put_failure;
3417 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3418 goto nla_put_failure;
3420 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3421 goto nla_put_failure;
3423 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3424 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3425 sizeof(key->hash.hash)))
3426 goto nla_put_failure;
3428 if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm))
3429 goto nla_put_failure;
3437 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3438 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3440 struct cls_fl_filter *f = fh;
3441 struct nlattr *nest;
3442 struct fl_flow_key *key, *mask;
3448 t->tcm_handle = f->handle;
3450 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3452 goto nla_put_failure;
3454 spin_lock(&tp->lock);
3456 if (f->res.classid &&
3457 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3458 goto nla_put_failure_locked;
3461 mask = &f->mask->key;
3462 skip_hw = tc_skip_hw(f->flags);
3464 if (fl_dump_key(skb, net, key, mask))
3465 goto nla_put_failure_locked;
3467 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3468 goto nla_put_failure_locked;
3470 spin_unlock(&tp->lock);
3473 fl_hw_update_stats(tp, f, rtnl_held);
3475 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3476 goto nla_put_failure;
3478 if (tcf_exts_dump(skb, &f->exts))
3479 goto nla_put_failure;
3481 nla_nest_end(skb, nest);
3483 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3484 goto nla_put_failure;
3488 nla_put_failure_locked:
3489 spin_unlock(&tp->lock);
3491 nla_nest_cancel(skb, nest);
3495 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3496 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3498 struct cls_fl_filter *f = fh;
3499 struct nlattr *nest;
3505 t->tcm_handle = f->handle;
3507 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3509 goto nla_put_failure;
3511 spin_lock(&tp->lock);
3513 skip_hw = tc_skip_hw(f->flags);
3515 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3516 goto nla_put_failure_locked;
3518 spin_unlock(&tp->lock);
3521 fl_hw_update_stats(tp, f, rtnl_held);
3523 if (tcf_exts_terse_dump(skb, &f->exts))
3524 goto nla_put_failure;
3526 nla_nest_end(skb, nest);
3530 nla_put_failure_locked:
3531 spin_unlock(&tp->lock);
3533 nla_nest_cancel(skb, nest);
3537 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3539 struct fl_flow_tmplt *tmplt = tmplt_priv;
3540 struct fl_flow_key *key, *mask;
3541 struct nlattr *nest;
3543 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3545 goto nla_put_failure;
3547 key = &tmplt->dummy_key;
3548 mask = &tmplt->mask;
3550 if (fl_dump_key(skb, net, key, mask))
3551 goto nla_put_failure;
3553 nla_nest_end(skb, nest);
3558 nla_nest_cancel(skb, nest);
3562 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3565 struct cls_fl_filter *f = fh;
3567 tc_cls_bind_class(classid, cl, q, &f->res, base);
3570 static bool fl_delete_empty(struct tcf_proto *tp)
3572 struct cls_fl_head *head = fl_head_dereference(tp);
3574 spin_lock(&tp->lock);
3575 tp->deleting = idr_is_empty(&head->handle_idr);
3576 spin_unlock(&tp->lock);
3578 return tp->deleting;
3581 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3583 .classify = fl_classify,
3585 .destroy = fl_destroy,
3588 .change = fl_change,
3589 .delete = fl_delete,
3590 .delete_empty = fl_delete_empty,
3592 .reoffload = fl_reoffload,
3593 .hw_add = fl_hw_add,
3594 .hw_del = fl_hw_del,
3596 .terse_dump = fl_terse_dump,
3597 .bind_class = fl_bind_class,
3598 .tmplt_create = fl_tmplt_create,
3599 .tmplt_destroy = fl_tmplt_destroy,
3600 .tmplt_dump = fl_tmplt_dump,
3601 .get_exts = fl_get_exts,
3602 .owner = THIS_MODULE,
3603 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
3606 static int __init cls_fl_init(void)
3608 return register_tcf_proto_ops(&cls_fl_ops);
3611 static void __exit cls_fl_exit(void)
3613 unregister_tcf_proto_ops(&cls_fl_ops);
3616 module_init(cls_fl_init);
3617 module_exit(cls_fl_exit);
3619 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3620 MODULE_DESCRIPTION("Flower classifier");
3621 MODULE_LICENSE("GPL v2");