1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_flower.c Flower classifier
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
18 #include <linux/mpls.h>
19 #include <linux/ppp_defs.h>
21 #include <net/sch_generic.h>
22 #include <net/pkt_cls.h>
23 #include <net/pkt_sched.h>
25 #include <net/flow_dissector.h>
26 #include <net/geneve.h>
27 #include <net/vxlan.h>
28 #include <net/erspan.h>
30 #include <net/tc_wrapper.h>
33 #include <net/dst_metadata.h>
35 #include <uapi/linux/netfilter/nf_conntrack_common.h>
37 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
38 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
39 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
40 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
43 struct flow_dissector_key_meta meta;
44 struct flow_dissector_key_control control;
45 struct flow_dissector_key_control enc_control;
46 struct flow_dissector_key_basic basic;
47 struct flow_dissector_key_eth_addrs eth;
48 struct flow_dissector_key_vlan vlan;
49 struct flow_dissector_key_vlan cvlan;
51 struct flow_dissector_key_ipv4_addrs ipv4;
52 struct flow_dissector_key_ipv6_addrs ipv6;
54 struct flow_dissector_key_ports tp;
55 struct flow_dissector_key_icmp icmp;
56 struct flow_dissector_key_arp arp;
57 struct flow_dissector_key_keyid enc_key_id;
59 struct flow_dissector_key_ipv4_addrs enc_ipv4;
60 struct flow_dissector_key_ipv6_addrs enc_ipv6;
62 struct flow_dissector_key_ports enc_tp;
63 struct flow_dissector_key_mpls mpls;
64 struct flow_dissector_key_tcp tcp;
65 struct flow_dissector_key_ip ip;
66 struct flow_dissector_key_ip enc_ip;
67 struct flow_dissector_key_enc_opts enc_opts;
68 struct flow_dissector_key_ports_range tp_range;
69 struct flow_dissector_key_ct ct;
70 struct flow_dissector_key_hash hash;
71 struct flow_dissector_key_num_of_vlans num_of_vlans;
72 struct flow_dissector_key_pppoe pppoe;
73 struct flow_dissector_key_l2tpv3 l2tpv3;
74 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
76 struct fl_flow_mask_range {
77 unsigned short int start;
78 unsigned short int end;
82 struct fl_flow_key key;
83 struct fl_flow_mask_range range;
85 struct rhash_head ht_node;
87 struct rhashtable_params filter_ht_params;
88 struct flow_dissector dissector;
89 struct list_head filters;
90 struct rcu_work rwork;
91 struct list_head list;
95 struct fl_flow_tmplt {
96 struct fl_flow_key dummy_key;
97 struct fl_flow_key mask;
98 struct flow_dissector dissector;
99 struct tcf_chain *chain;
103 struct rhashtable ht;
104 spinlock_t masks_lock; /* Protect masks list */
105 struct list_head masks;
106 struct list_head hw_filters;
107 struct rcu_work rwork;
108 struct idr handle_idr;
111 struct cls_fl_filter {
112 struct fl_flow_mask *mask;
113 struct rhash_head ht_node;
114 struct fl_flow_key mkey;
115 struct tcf_exts exts;
116 struct tcf_result res;
117 struct fl_flow_key key;
118 struct list_head list;
119 struct list_head hw_list;
123 struct rcu_work rwork;
124 struct net_device *hw_dev;
125 /* Flower classifier is unlocked, which means that its reference counter
126 * can be changed concurrently without any kind of external
127 * synchronization. Use atomic reference counter to be concurrency-safe.
133 static const struct rhashtable_params mask_ht_params = {
134 .key_offset = offsetof(struct fl_flow_mask, key),
135 .key_len = sizeof(struct fl_flow_key),
136 .head_offset = offsetof(struct fl_flow_mask, ht_node),
137 .automatic_shrinking = true,
140 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
142 return mask->range.end - mask->range.start;
145 static void fl_mask_update_range(struct fl_flow_mask *mask)
147 const u8 *bytes = (const u8 *) &mask->key;
148 size_t size = sizeof(mask->key);
149 size_t i, first = 0, last;
151 for (i = 0; i < size; i++) {
158 for (i = size - 1; i != first; i--) {
164 mask->range.start = rounddown(first, sizeof(long));
165 mask->range.end = roundup(last + 1, sizeof(long));
168 static void *fl_key_get_start(struct fl_flow_key *key,
169 const struct fl_flow_mask *mask)
171 return (u8 *) key + mask->range.start;
174 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
175 struct fl_flow_mask *mask)
177 const long *lkey = fl_key_get_start(key, mask);
178 const long *lmask = fl_key_get_start(&mask->key, mask);
179 long *lmkey = fl_key_get_start(mkey, mask);
182 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
183 *lmkey++ = *lkey++ & *lmask++;
186 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
187 struct fl_flow_mask *mask)
189 const long *lmask = fl_key_get_start(&mask->key, mask);
195 ltmplt = fl_key_get_start(&tmplt->mask, mask);
196 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
197 if (~*ltmplt++ & *lmask++)
203 static void fl_clear_masked_range(struct fl_flow_key *key,
204 struct fl_flow_mask *mask)
206 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
209 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
210 struct fl_flow_key *key,
211 struct fl_flow_key *mkey)
213 u16 min_mask, max_mask, min_val, max_val;
215 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
216 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
217 min_val = ntohs(filter->key.tp_range.tp_min.dst);
218 max_val = ntohs(filter->key.tp_range.tp_max.dst);
220 if (min_mask && max_mask) {
221 if (ntohs(key->tp_range.tp.dst) < min_val ||
222 ntohs(key->tp_range.tp.dst) > max_val)
225 /* skb does not have min and max values */
226 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
227 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
232 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
233 struct fl_flow_key *key,
234 struct fl_flow_key *mkey)
236 u16 min_mask, max_mask, min_val, max_val;
238 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
239 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
240 min_val = ntohs(filter->key.tp_range.tp_min.src);
241 max_val = ntohs(filter->key.tp_range.tp_max.src);
243 if (min_mask && max_mask) {
244 if (ntohs(key->tp_range.tp.src) < min_val ||
245 ntohs(key->tp_range.tp.src) > max_val)
248 /* skb does not have min and max values */
249 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
250 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
255 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
256 struct fl_flow_key *mkey)
258 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
259 mask->filter_ht_params);
262 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
263 struct fl_flow_key *mkey,
264 struct fl_flow_key *key)
266 struct cls_fl_filter *filter, *f;
268 list_for_each_entry_rcu(filter, &mask->filters, list) {
269 if (!fl_range_port_dst_cmp(filter, key, mkey))
272 if (!fl_range_port_src_cmp(filter, key, mkey))
275 f = __fl_lookup(mask, mkey);
282 static noinline_for_stack
283 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
285 struct fl_flow_key mkey;
287 fl_set_masked_key(&mkey, key, mask);
288 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
289 return fl_lookup_range(mask, &mkey, key);
291 return __fl_lookup(mask, &mkey);
294 static u16 fl_ct_info_to_flower_map[] = {
295 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
297 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
298 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
299 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
300 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
301 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
302 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 TCA_FLOWER_KEY_CT_FLAGS_RELATED |
304 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
305 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
306 TCA_FLOWER_KEY_CT_FLAGS_NEW,
309 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
310 const struct tcf_proto *tp,
311 struct tcf_result *res)
313 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
314 bool post_ct = tc_skb_cb(skb)->post_ct;
315 u16 zone = tc_skb_cb(skb)->zone;
316 struct fl_flow_key skb_key;
317 struct fl_flow_mask *mask;
318 struct cls_fl_filter *f;
320 list_for_each_entry_rcu(mask, &head->masks, list) {
321 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
322 fl_clear_masked_range(&skb_key, mask);
324 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
325 /* skb_flow_dissect() does not set n_proto in case an unknown
326 * protocol, so do it rather here.
328 skb_key.basic.n_proto = skb_protocol(skb, false);
329 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
330 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
331 fl_ct_info_to_flower_map,
332 ARRAY_SIZE(fl_ct_info_to_flower_map),
334 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
335 skb_flow_dissect(skb, &mask->dissector, &skb_key,
336 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
338 f = fl_mask_lookup(mask, &skb_key);
339 if (f && !tc_skip_sw(f->flags)) {
341 return tcf_exts_exec(skb, &f->exts, res);
347 static int fl_init(struct tcf_proto *tp)
349 struct cls_fl_head *head;
351 head = kzalloc(sizeof(*head), GFP_KERNEL);
355 spin_lock_init(&head->masks_lock);
356 INIT_LIST_HEAD_RCU(&head->masks);
357 INIT_LIST_HEAD(&head->hw_filters);
358 rcu_assign_pointer(tp->root, head);
359 idr_init(&head->handle_idr);
361 return rhashtable_init(&head->ht, &mask_ht_params);
364 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
366 /* temporary masks don't have their filters list and ht initialized */
367 if (mask_init_done) {
368 WARN_ON(!list_empty(&mask->filters));
369 rhashtable_destroy(&mask->ht);
374 static void fl_mask_free_work(struct work_struct *work)
376 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377 struct fl_flow_mask, rwork);
379 fl_mask_free(mask, true);
382 static void fl_uninit_mask_free_work(struct work_struct *work)
384 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
385 struct fl_flow_mask, rwork);
387 fl_mask_free(mask, false);
390 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
392 if (!refcount_dec_and_test(&mask->refcnt))
395 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
397 spin_lock(&head->masks_lock);
398 list_del_rcu(&mask->list);
399 spin_unlock(&head->masks_lock);
401 tcf_queue_work(&mask->rwork, fl_mask_free_work);
406 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
408 /* Flower classifier only changes root pointer during init and destroy.
409 * Users must obtain reference to tcf_proto instance before calling its
410 * API, so tp->root pointer is protected from concurrent call to
411 * fl_destroy() by reference counting.
413 return rcu_dereference_raw(tp->root);
416 static void __fl_destroy_filter(struct cls_fl_filter *f)
418 tcf_exts_destroy(&f->exts);
419 tcf_exts_put_net(&f->exts);
423 static void fl_destroy_filter_work(struct work_struct *work)
425 struct cls_fl_filter *f = container_of(to_rcu_work(work),
426 struct cls_fl_filter, rwork);
428 __fl_destroy_filter(f);
431 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
432 bool rtnl_held, struct netlink_ext_ack *extack)
434 struct tcf_block *block = tp->chain->block;
435 struct flow_cls_offload cls_flower = {};
437 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
438 cls_flower.command = FLOW_CLS_DESTROY;
439 cls_flower.cookie = (unsigned long) f;
441 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
442 &f->flags, &f->in_hw_count, rtnl_held);
446 static int fl_hw_replace_filter(struct tcf_proto *tp,
447 struct cls_fl_filter *f, bool rtnl_held,
448 struct netlink_ext_ack *extack)
450 struct tcf_block *block = tp->chain->block;
451 struct flow_cls_offload cls_flower = {};
452 bool skip_sw = tc_skip_sw(f->flags);
455 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
456 if (!cls_flower.rule)
459 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
460 cls_flower.command = FLOW_CLS_REPLACE;
461 cls_flower.cookie = (unsigned long) f;
462 cls_flower.rule->match.dissector = &f->mask->dissector;
463 cls_flower.rule->match.mask = &f->mask->key;
464 cls_flower.rule->match.key = &f->mkey;
465 cls_flower.classid = f->res.classid;
467 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
468 cls_flower.common.extack);
470 kfree(cls_flower.rule);
472 return skip_sw ? err : 0;
475 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
476 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
477 tc_cleanup_offload_action(&cls_flower.rule->action);
478 kfree(cls_flower.rule);
481 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
485 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
491 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
494 struct tcf_block *block = tp->chain->block;
495 struct flow_cls_offload cls_flower = {};
497 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
498 cls_flower.command = FLOW_CLS_STATS;
499 cls_flower.cookie = (unsigned long) f;
500 cls_flower.classid = f->res.classid;
502 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
505 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
508 static void __fl_put(struct cls_fl_filter *f)
510 if (!refcount_dec_and_test(&f->refcnt))
513 if (tcf_exts_get_net(&f->exts))
514 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
516 __fl_destroy_filter(f);
519 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
521 struct cls_fl_filter *f;
524 f = idr_find(&head->handle_idr, handle);
525 if (f && !refcount_inc_not_zero(&f->refcnt))
532 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
534 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
535 struct cls_fl_filter *f;
537 f = idr_find(&head->handle_idr, handle);
538 return f ? &f->exts : NULL;
541 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
542 bool *last, bool rtnl_held,
543 struct netlink_ext_ack *extack)
545 struct cls_fl_head *head = fl_head_dereference(tp);
549 spin_lock(&tp->lock);
551 spin_unlock(&tp->lock);
556 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
557 f->mask->filter_ht_params);
558 idr_remove(&head->handle_idr, f->handle);
559 list_del_rcu(&f->list);
560 spin_unlock(&tp->lock);
562 *last = fl_mask_put(head, f->mask);
563 if (!tc_skip_hw(f->flags))
564 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
565 tcf_unbind_filter(tp, &f->res);
571 static void fl_destroy_sleepable(struct work_struct *work)
573 struct cls_fl_head *head = container_of(to_rcu_work(work),
577 rhashtable_destroy(&head->ht);
579 module_put(THIS_MODULE);
582 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
583 struct netlink_ext_ack *extack)
585 struct cls_fl_head *head = fl_head_dereference(tp);
586 struct fl_flow_mask *mask, *next_mask;
587 struct cls_fl_filter *f, *next;
590 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
591 list_for_each_entry_safe(f, next, &mask->filters, list) {
592 __fl_delete(tp, f, &last, rtnl_held, extack);
597 idr_destroy(&head->handle_idr);
599 __module_get(THIS_MODULE);
600 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
603 static void fl_put(struct tcf_proto *tp, void *arg)
605 struct cls_fl_filter *f = arg;
610 static void *fl_get(struct tcf_proto *tp, u32 handle)
612 struct cls_fl_head *head = fl_head_dereference(tp);
614 return __fl_get(head, handle);
617 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
618 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
619 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
620 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
622 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
623 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
624 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
625 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
626 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
627 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
628 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
629 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
630 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
631 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
632 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
633 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
634 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
635 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
636 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
637 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
638 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
639 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
640 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
641 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
642 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
643 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
644 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
645 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
646 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
647 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
648 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
649 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
650 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
651 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
652 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
653 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
657 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
660 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
662 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
663 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
664 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
665 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
666 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
667 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
668 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
669 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
670 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
671 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
672 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
673 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
675 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
676 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
677 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
678 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
679 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
680 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
681 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
682 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
683 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
684 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
685 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
686 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
688 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
689 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
690 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
691 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
695 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
696 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
697 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
698 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
699 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
700 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
701 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
702 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
703 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
704 [TCA_FLOWER_KEY_CT_STATE] =
705 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
706 [TCA_FLOWER_KEY_CT_STATE_MASK] =
707 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
708 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
709 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
710 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
711 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
712 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
713 .len = 128 / BITS_PER_BYTE },
714 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
715 .len = 128 / BITS_PER_BYTE },
716 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
717 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
718 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
719 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
720 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 },
721 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 },
722 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 },
726 static const struct nla_policy
727 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
728 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
729 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
730 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
731 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
732 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
733 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
736 static const struct nla_policy
737 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
738 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
739 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
740 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
744 static const struct nla_policy
745 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
746 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
749 static const struct nla_policy
750 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
751 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
752 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
753 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
754 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
757 static const struct nla_policy
758 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
759 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 },
760 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
763 static const struct nla_policy
764 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
765 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
766 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
767 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
768 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
769 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
772 static void fl_set_key_val(struct nlattr **tb,
773 void *val, int val_type,
774 void *mask, int mask_type, int len)
778 nla_memcpy(val, tb[val_type], len);
779 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
780 memset(mask, 0xff, len);
782 nla_memcpy(mask, tb[mask_type], len);
785 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
786 struct fl_flow_key *mask,
787 struct netlink_ext_ack *extack)
789 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
790 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
791 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
792 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
793 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
794 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
795 fl_set_key_val(tb, &key->tp_range.tp_min.src,
796 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
797 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
798 fl_set_key_val(tb, &key->tp_range.tp_max.src,
799 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
800 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
802 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
803 ntohs(key->tp_range.tp_max.dst) <=
804 ntohs(key->tp_range.tp_min.dst)) {
805 NL_SET_ERR_MSG_ATTR(extack,
806 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
807 "Invalid destination port range (min must be strictly smaller than max)");
810 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
811 ntohs(key->tp_range.tp_max.src) <=
812 ntohs(key->tp_range.tp_min.src)) {
813 NL_SET_ERR_MSG_ATTR(extack,
814 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
815 "Invalid source port range (min must be strictly smaller than max)");
822 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
823 struct flow_dissector_key_mpls *key_val,
824 struct flow_dissector_key_mpls *key_mask,
825 struct netlink_ext_ack *extack)
827 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
828 struct flow_dissector_mpls_lse *lse_mask;
829 struct flow_dissector_mpls_lse *lse_val;
834 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
835 mpls_stack_entry_policy, extack);
839 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
840 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
844 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
846 /* LSE depth starts at 1, for consistency with terminology used by
847 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
849 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
850 NL_SET_ERR_MSG_ATTR(extack,
851 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
852 "Invalid MPLS depth");
855 lse_index = depth - 1;
857 dissector_set_mpls_lse(key_val, lse_index);
858 dissector_set_mpls_lse(key_mask, lse_index);
860 lse_val = &key_val->ls[lse_index];
861 lse_mask = &key_mask->ls[lse_index];
863 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
864 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
865 lse_mask->mpls_ttl = MPLS_TTL_MASK;
867 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
868 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
870 if (bos & ~MPLS_BOS_MASK) {
871 NL_SET_ERR_MSG_ATTR(extack,
872 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
873 "Bottom Of Stack (BOS) must be 0 or 1");
876 lse_val->mpls_bos = bos;
877 lse_mask->mpls_bos = MPLS_BOS_MASK;
879 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
880 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
882 if (tc & ~MPLS_TC_MASK) {
883 NL_SET_ERR_MSG_ATTR(extack,
884 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
885 "Traffic Class (TC) must be between 0 and 7");
888 lse_val->mpls_tc = tc;
889 lse_mask->mpls_tc = MPLS_TC_MASK;
891 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
892 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
894 if (label & ~MPLS_LABEL_MASK) {
895 NL_SET_ERR_MSG_ATTR(extack,
896 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
897 "Label must be between 0 and 1048575");
900 lse_val->mpls_label = label;
901 lse_mask->mpls_label = MPLS_LABEL_MASK;
907 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
908 struct flow_dissector_key_mpls *key_val,
909 struct flow_dissector_key_mpls *key_mask,
910 struct netlink_ext_ack *extack)
912 struct nlattr *nla_lse;
916 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
917 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
918 "NLA_F_NESTED is missing");
922 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
923 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
924 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
925 "Invalid MPLS option type");
929 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
934 NL_SET_ERR_MSG(extack,
935 "Bytes leftover after parsing MPLS options");
942 static int fl_set_key_mpls(struct nlattr **tb,
943 struct flow_dissector_key_mpls *key_val,
944 struct flow_dissector_key_mpls *key_mask,
945 struct netlink_ext_ack *extack)
947 struct flow_dissector_mpls_lse *lse_mask;
948 struct flow_dissector_mpls_lse *lse_val;
950 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
951 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
952 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
953 tb[TCA_FLOWER_KEY_MPLS_TC] ||
954 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
955 NL_SET_ERR_MSG_ATTR(extack,
956 tb[TCA_FLOWER_KEY_MPLS_OPTS],
957 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
961 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
962 key_val, key_mask, extack);
965 lse_val = &key_val->ls[0];
966 lse_mask = &key_mask->ls[0];
968 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
969 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
970 lse_mask->mpls_ttl = MPLS_TTL_MASK;
971 dissector_set_mpls_lse(key_val, 0);
972 dissector_set_mpls_lse(key_mask, 0);
974 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
975 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
977 if (bos & ~MPLS_BOS_MASK) {
978 NL_SET_ERR_MSG_ATTR(extack,
979 tb[TCA_FLOWER_KEY_MPLS_BOS],
980 "Bottom Of Stack (BOS) must be 0 or 1");
983 lse_val->mpls_bos = bos;
984 lse_mask->mpls_bos = MPLS_BOS_MASK;
985 dissector_set_mpls_lse(key_val, 0);
986 dissector_set_mpls_lse(key_mask, 0);
988 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
989 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
991 if (tc & ~MPLS_TC_MASK) {
992 NL_SET_ERR_MSG_ATTR(extack,
993 tb[TCA_FLOWER_KEY_MPLS_TC],
994 "Traffic Class (TC) must be between 0 and 7");
997 lse_val->mpls_tc = tc;
998 lse_mask->mpls_tc = MPLS_TC_MASK;
999 dissector_set_mpls_lse(key_val, 0);
1000 dissector_set_mpls_lse(key_mask, 0);
1002 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1003 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
1005 if (label & ~MPLS_LABEL_MASK) {
1006 NL_SET_ERR_MSG_ATTR(extack,
1007 tb[TCA_FLOWER_KEY_MPLS_LABEL],
1008 "Label must be between 0 and 1048575");
1011 lse_val->mpls_label = label;
1012 lse_mask->mpls_label = MPLS_LABEL_MASK;
1013 dissector_set_mpls_lse(key_val, 0);
1014 dissector_set_mpls_lse(key_mask, 0);
1019 static void fl_set_key_vlan(struct nlattr **tb,
1021 int vlan_id_key, int vlan_prio_key,
1022 int vlan_next_eth_type_key,
1023 struct flow_dissector_key_vlan *key_val,
1024 struct flow_dissector_key_vlan *key_mask)
1026 #define VLAN_PRIORITY_MASK 0x7
1028 if (tb[vlan_id_key]) {
1030 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1031 key_mask->vlan_id = VLAN_VID_MASK;
1033 if (tb[vlan_prio_key]) {
1034 key_val->vlan_priority =
1035 nla_get_u8(tb[vlan_prio_key]) &
1037 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1040 key_val->vlan_tpid = ethertype;
1041 key_mask->vlan_tpid = cpu_to_be16(~0);
1043 if (tb[vlan_next_eth_type_key]) {
1044 key_val->vlan_eth_type =
1045 nla_get_be16(tb[vlan_next_eth_type_key]);
1046 key_mask->vlan_eth_type = cpu_to_be16(~0);
1050 static void fl_set_key_pppoe(struct nlattr **tb,
1051 struct flow_dissector_key_pppoe *key_val,
1052 struct flow_dissector_key_pppoe *key_mask,
1053 struct fl_flow_key *key,
1054 struct fl_flow_key *mask)
1056 /* key_val::type must be set to ETH_P_PPP_SES
1057 * because ETH_P_PPP_SES was stored in basic.n_proto
1058 * which might get overwritten by ppp_proto
1059 * or might be set to 0, the role of key_val::type
1060 * is similar to vlan_key::tpid
1062 key_val->type = htons(ETH_P_PPP_SES);
1063 key_mask->type = cpu_to_be16(~0);
1065 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1066 key_val->session_id =
1067 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1068 key_mask->session_id = cpu_to_be16(~0);
1070 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1071 key_val->ppp_proto =
1072 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1073 key_mask->ppp_proto = cpu_to_be16(~0);
1075 if (key_val->ppp_proto == htons(PPP_IP)) {
1076 key->basic.n_proto = htons(ETH_P_IP);
1077 mask->basic.n_proto = cpu_to_be16(~0);
1078 } else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1079 key->basic.n_proto = htons(ETH_P_IPV6);
1080 mask->basic.n_proto = cpu_to_be16(~0);
1081 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1082 key->basic.n_proto = htons(ETH_P_MPLS_UC);
1083 mask->basic.n_proto = cpu_to_be16(~0);
1084 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1085 key->basic.n_proto = htons(ETH_P_MPLS_MC);
1086 mask->basic.n_proto = cpu_to_be16(~0);
1089 key->basic.n_proto = 0;
1090 mask->basic.n_proto = cpu_to_be16(0);
1094 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1095 u32 *dissector_key, u32 *dissector_mask,
1096 u32 flower_flag_bit, u32 dissector_flag_bit)
1098 if (flower_mask & flower_flag_bit) {
1099 *dissector_mask |= dissector_flag_bit;
1100 if (flower_key & flower_flag_bit)
1101 *dissector_key |= dissector_flag_bit;
1105 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1106 u32 *flags_mask, struct netlink_ext_ack *extack)
1110 /* mask is mandatory for flags */
1111 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1112 NL_SET_ERR_MSG(extack, "Missing flags mask");
1116 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1117 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1122 fl_set_key_flag(key, mask, flags_key, flags_mask,
1123 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1124 fl_set_key_flag(key, mask, flags_key, flags_mask,
1125 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1126 FLOW_DIS_FIRST_FRAG);
1131 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1132 struct flow_dissector_key_ip *key,
1133 struct flow_dissector_key_ip *mask)
1135 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1136 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1137 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1138 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1140 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1141 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1144 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1145 int depth, int option_len,
1146 struct netlink_ext_ack *extack)
1148 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1149 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1150 struct geneve_opt *opt;
1151 int err, data_len = 0;
1153 if (option_len > sizeof(struct geneve_opt))
1154 data_len = option_len - sizeof(struct geneve_opt);
1156 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1159 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1160 memset(opt, 0xff, option_len);
1161 opt->length = data_len / 4;
1166 /* If no mask has been prodived we assume an exact match. */
1168 return sizeof(struct geneve_opt) + data_len;
1170 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1171 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1175 err = nla_parse_nested_deprecated(tb,
1176 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1177 nla, geneve_opt_policy, extack);
1181 /* We are not allowed to omit any of CLASS, TYPE or DATA
1182 * fields from the key.
1185 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1186 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1187 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1188 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1192 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1195 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1196 int new_len = key->enc_opts.len;
1198 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1199 data_len = nla_len(data);
1201 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1205 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1209 new_len += sizeof(struct geneve_opt) + data_len;
1210 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1211 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1212 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1215 opt->length = data_len / 4;
1216 memcpy(opt->opt_data, nla_data(data), data_len);
1219 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1220 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1221 opt->opt_class = nla_get_be16(class);
1224 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1225 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1226 opt->type = nla_get_u8(type);
1229 return sizeof(struct geneve_opt) + data_len;
1232 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1233 int depth, int option_len,
1234 struct netlink_ext_ack *extack)
1236 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1237 struct vxlan_metadata *md;
1240 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1241 memset(md, 0xff, sizeof(*md));
1246 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1247 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1251 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1252 vxlan_opt_policy, extack);
1256 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1257 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1261 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1262 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1263 md->gbp &= VXLAN_GBP_MASK;
1269 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1270 int depth, int option_len,
1271 struct netlink_ext_ack *extack)
1273 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1274 struct erspan_metadata *md;
1277 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1278 memset(md, 0xff, sizeof(*md));
1284 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1285 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1289 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1290 erspan_opt_policy, extack);
1294 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1295 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1299 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1300 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1302 if (md->version == 1) {
1303 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1304 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1307 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1308 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1309 memset(&md->u, 0x00, sizeof(md->u));
1310 md->u.index = nla_get_be32(nla);
1312 } else if (md->version == 2) {
1313 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1314 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1315 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1318 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1319 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1320 md->u.md2.dir = nla_get_u8(nla);
1322 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1323 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1324 set_hwid(&md->u.md2, nla_get_u8(nla));
1327 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1334 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1335 int depth, int option_len,
1336 struct netlink_ext_ack *extack)
1338 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1339 struct gtp_pdu_session_info *sinfo;
1340 u8 len = key->enc_opts.len;
1343 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1344 memset(sinfo, 0xff, option_len);
1347 return sizeof(*sinfo);
1349 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1350 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1354 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1355 gtp_opt_policy, extack);
1360 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1361 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1362 NL_SET_ERR_MSG_MOD(extack,
1363 "Missing tunnel key gtp option pdu type or qfi");
1367 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1369 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1371 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1372 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1374 return sizeof(*sinfo);
1377 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1378 struct fl_flow_key *mask,
1379 struct netlink_ext_ack *extack)
1381 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1382 int err, option_len, key_depth, msk_depth = 0;
1384 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1385 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1386 enc_opts_policy, extack);
1390 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1392 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1393 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1394 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1395 enc_opts_policy, extack);
1399 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1400 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1401 if (!nla_ok(nla_opt_msk, msk_depth)) {
1402 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1407 nla_for_each_attr(nla_opt_key, nla_enc_key,
1408 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1409 switch (nla_type(nla_opt_key)) {
1410 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1411 if (key->enc_opts.dst_opt_type &&
1412 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1413 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1417 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1418 option_len = fl_set_geneve_opt(nla_opt_key, key,
1419 key_depth, option_len,
1424 key->enc_opts.len += option_len;
1425 /* At the same time we need to parse through the mask
1426 * in order to verify exact and mask attribute lengths.
1428 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1429 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1430 msk_depth, option_len,
1435 mask->enc_opts.len += option_len;
1436 if (key->enc_opts.len != mask->enc_opts.len) {
1437 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1441 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1442 if (key->enc_opts.dst_opt_type) {
1443 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1447 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1448 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1449 key_depth, option_len,
1454 key->enc_opts.len += option_len;
1455 /* At the same time we need to parse through the mask
1456 * in order to verify exact and mask attribute lengths.
1458 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1459 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1460 msk_depth, option_len,
1465 mask->enc_opts.len += option_len;
1466 if (key->enc_opts.len != mask->enc_opts.len) {
1467 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1471 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1472 if (key->enc_opts.dst_opt_type) {
1473 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1477 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1478 option_len = fl_set_erspan_opt(nla_opt_key, key,
1479 key_depth, option_len,
1484 key->enc_opts.len += option_len;
1485 /* At the same time we need to parse through the mask
1486 * in order to verify exact and mask attribute lengths.
1488 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1489 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1490 msk_depth, option_len,
1495 mask->enc_opts.len += option_len;
1496 if (key->enc_opts.len != mask->enc_opts.len) {
1497 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1501 case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1502 if (key->enc_opts.dst_opt_type) {
1503 NL_SET_ERR_MSG_MOD(extack,
1504 "Duplicate type for gtp options");
1508 key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1509 option_len = fl_set_gtp_opt(nla_opt_key, key,
1510 key_depth, option_len,
1515 key->enc_opts.len += option_len;
1516 /* At the same time we need to parse through the mask
1517 * in order to verify exact and mask attribute lengths.
1519 mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1520 option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1521 msk_depth, option_len,
1526 mask->enc_opts.len += option_len;
1527 if (key->enc_opts.len != mask->enc_opts.len) {
1528 NL_SET_ERR_MSG_MOD(extack,
1529 "Key and mask miss aligned");
1534 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1541 if (!nla_ok(nla_opt_msk, msk_depth)) {
1542 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1545 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1551 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1552 struct netlink_ext_ack *extack)
1554 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1555 NL_SET_ERR_MSG_ATTR(extack, tb,
1556 "no trk, so no other flag can be set");
1560 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1561 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1562 NL_SET_ERR_MSG_ATTR(extack, tb,
1563 "new and est are mutually exclusive");
1567 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1568 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1569 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1570 NL_SET_ERR_MSG_ATTR(extack, tb,
1571 "when inv is set, only trk may be set");
1575 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1576 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1577 NL_SET_ERR_MSG_ATTR(extack, tb,
1578 "new and rpl are mutually exclusive");
1585 static int fl_set_key_ct(struct nlattr **tb,
1586 struct flow_dissector_key_ct *key,
1587 struct flow_dissector_key_ct *mask,
1588 struct netlink_ext_ack *extack)
1590 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1593 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1594 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1597 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1598 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1599 sizeof(key->ct_state));
1601 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1602 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1608 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1609 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1610 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1613 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1614 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1615 sizeof(key->ct_zone));
1617 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1618 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1619 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1622 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1623 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1624 sizeof(key->ct_mark));
1626 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1627 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1628 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1631 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1632 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1633 sizeof(key->ct_labels));
1639 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1640 struct fl_flow_key *key, struct fl_flow_key *mask,
1643 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1647 return good_num_of_vlans;
1650 *ethertype = nla_get_be16(tb);
1651 if (good_num_of_vlans || eth_type_vlan(*ethertype))
1654 key->basic.n_proto = *ethertype;
1655 mask->basic.n_proto = cpu_to_be16(~0);
1659 static int fl_set_key(struct net *net, struct nlattr **tb,
1660 struct fl_flow_key *key, struct fl_flow_key *mask,
1661 struct netlink_ext_ack *extack)
1666 if (tb[TCA_FLOWER_INDEV]) {
1667 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1670 key->meta.ingress_ifindex = err;
1671 mask->meta.ingress_ifindex = 0xffffffff;
1674 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1675 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1676 sizeof(key->eth.dst));
1677 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1678 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1679 sizeof(key->eth.src));
1680 fl_set_key_val(tb, &key->num_of_vlans,
1681 TCA_FLOWER_KEY_NUM_OF_VLANS,
1682 &mask->num_of_vlans,
1684 sizeof(key->num_of_vlans));
1686 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) {
1687 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1688 TCA_FLOWER_KEY_VLAN_PRIO,
1689 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1690 &key->vlan, &mask->vlan);
1692 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1693 ðertype, key, mask, 1)) {
1694 fl_set_key_vlan(tb, ethertype,
1695 TCA_FLOWER_KEY_CVLAN_ID,
1696 TCA_FLOWER_KEY_CVLAN_PRIO,
1697 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1698 &key->cvlan, &mask->cvlan);
1699 fl_set_key_val(tb, &key->basic.n_proto,
1700 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1701 &mask->basic.n_proto,
1703 sizeof(key->basic.n_proto));
1707 if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1708 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1710 if (key->basic.n_proto == htons(ETH_P_IP) ||
1711 key->basic.n_proto == htons(ETH_P_IPV6)) {
1712 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1713 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1714 sizeof(key->basic.ip_proto));
1715 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1718 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1719 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1720 mask->control.addr_type = ~0;
1721 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1722 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1723 sizeof(key->ipv4.src));
1724 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1725 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1726 sizeof(key->ipv4.dst));
1727 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1728 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1729 mask->control.addr_type = ~0;
1730 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1731 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1732 sizeof(key->ipv6.src));
1733 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1734 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1735 sizeof(key->ipv6.dst));
1738 if (key->basic.ip_proto == IPPROTO_TCP) {
1739 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1740 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1741 sizeof(key->tp.src));
1742 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1743 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1744 sizeof(key->tp.dst));
1745 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1746 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1747 sizeof(key->tcp.flags));
1748 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1749 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1750 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1751 sizeof(key->tp.src));
1752 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1753 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1754 sizeof(key->tp.dst));
1755 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1756 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1757 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1758 sizeof(key->tp.src));
1759 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1760 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1761 sizeof(key->tp.dst));
1762 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1763 key->basic.ip_proto == IPPROTO_ICMP) {
1764 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1766 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1767 sizeof(key->icmp.type));
1768 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1770 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1771 sizeof(key->icmp.code));
1772 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1773 key->basic.ip_proto == IPPROTO_ICMPV6) {
1774 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1776 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1777 sizeof(key->icmp.type));
1778 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1780 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1781 sizeof(key->icmp.code));
1782 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1783 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1784 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1787 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1788 key->basic.n_proto == htons(ETH_P_RARP)) {
1789 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1790 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1791 sizeof(key->arp.sip));
1792 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1793 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1794 sizeof(key->arp.tip));
1795 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1796 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1797 sizeof(key->arp.op));
1798 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1799 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1800 sizeof(key->arp.sha));
1801 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1802 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1803 sizeof(key->arp.tha));
1804 } else if (key->basic.ip_proto == IPPROTO_L2TP) {
1805 fl_set_key_val(tb, &key->l2tpv3.session_id,
1806 TCA_FLOWER_KEY_L2TPV3_SID,
1807 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
1808 sizeof(key->l2tpv3.session_id));
1811 if (key->basic.ip_proto == IPPROTO_TCP ||
1812 key->basic.ip_proto == IPPROTO_UDP ||
1813 key->basic.ip_proto == IPPROTO_SCTP) {
1814 ret = fl_set_key_port_range(tb, key, mask, extack);
1819 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1820 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1821 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1822 mask->enc_control.addr_type = ~0;
1823 fl_set_key_val(tb, &key->enc_ipv4.src,
1824 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1825 &mask->enc_ipv4.src,
1826 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1827 sizeof(key->enc_ipv4.src));
1828 fl_set_key_val(tb, &key->enc_ipv4.dst,
1829 TCA_FLOWER_KEY_ENC_IPV4_DST,
1830 &mask->enc_ipv4.dst,
1831 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1832 sizeof(key->enc_ipv4.dst));
1835 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1836 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1837 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1838 mask->enc_control.addr_type = ~0;
1839 fl_set_key_val(tb, &key->enc_ipv6.src,
1840 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1841 &mask->enc_ipv6.src,
1842 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1843 sizeof(key->enc_ipv6.src));
1844 fl_set_key_val(tb, &key->enc_ipv6.dst,
1845 TCA_FLOWER_KEY_ENC_IPV6_DST,
1846 &mask->enc_ipv6.dst,
1847 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1848 sizeof(key->enc_ipv6.dst));
1851 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1852 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1853 sizeof(key->enc_key_id.keyid));
1855 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1856 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1857 sizeof(key->enc_tp.src));
1859 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1860 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1861 sizeof(key->enc_tp.dst));
1863 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1865 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1866 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1867 sizeof(key->hash.hash));
1869 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1870 ret = fl_set_enc_opt(tb, key, mask, extack);
1875 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1879 if (tb[TCA_FLOWER_KEY_FLAGS])
1880 ret = fl_set_key_flags(tb, &key->control.flags,
1881 &mask->control.flags, extack);
1886 static void fl_mask_copy(struct fl_flow_mask *dst,
1887 struct fl_flow_mask *src)
1889 const void *psrc = fl_key_get_start(&src->key, src);
1890 void *pdst = fl_key_get_start(&dst->key, src);
1892 memcpy(pdst, psrc, fl_mask_range(src));
1893 dst->range = src->range;
1896 static const struct rhashtable_params fl_ht_params = {
1897 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1898 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1899 .automatic_shrinking = true,
1902 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1904 mask->filter_ht_params = fl_ht_params;
1905 mask->filter_ht_params.key_len = fl_mask_range(mask);
1906 mask->filter_ht_params.key_offset += mask->range.start;
1908 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1911 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1912 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1914 #define FL_KEY_IS_MASKED(mask, member) \
1915 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1916 0, FL_KEY_MEMBER_SIZE(member)) \
1918 #define FL_KEY_SET(keys, cnt, id, member) \
1920 keys[cnt].key_id = id; \
1921 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1925 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1927 if (FL_KEY_IS_MASKED(mask, member)) \
1928 FL_KEY_SET(keys, cnt, id, member); \
1931 static void fl_init_dissector(struct flow_dissector *dissector,
1932 struct fl_flow_key *mask)
1934 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1937 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1938 FLOW_DISSECTOR_KEY_META, meta);
1939 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1940 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1941 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1942 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1943 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1944 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1945 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1946 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1947 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1948 FLOW_DISSECTOR_KEY_PORTS, tp);
1949 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1950 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1951 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1952 FLOW_DISSECTOR_KEY_IP, ip);
1953 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1954 FLOW_DISSECTOR_KEY_TCP, tcp);
1955 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1956 FLOW_DISSECTOR_KEY_ICMP, icmp);
1957 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1958 FLOW_DISSECTOR_KEY_ARP, arp);
1959 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1960 FLOW_DISSECTOR_KEY_MPLS, mpls);
1961 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1962 FLOW_DISSECTOR_KEY_VLAN, vlan);
1963 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1964 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1965 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1966 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1967 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1968 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1969 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1970 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1971 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1972 FL_KEY_IS_MASKED(mask, enc_ipv6))
1973 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1975 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1976 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1977 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1978 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1979 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1980 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1981 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1982 FLOW_DISSECTOR_KEY_CT, ct);
1983 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1984 FLOW_DISSECTOR_KEY_HASH, hash);
1985 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1986 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
1987 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1988 FLOW_DISSECTOR_KEY_PPPOE, pppoe);
1989 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1990 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
1992 skb_flow_dissector_init(dissector, keys, cnt);
1995 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1996 struct fl_flow_mask *mask)
1998 struct fl_flow_mask *newmask;
2001 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
2003 return ERR_PTR(-ENOMEM);
2005 fl_mask_copy(newmask, mask);
2007 if ((newmask->key.tp_range.tp_min.dst &&
2008 newmask->key.tp_range.tp_max.dst) ||
2009 (newmask->key.tp_range.tp_min.src &&
2010 newmask->key.tp_range.tp_max.src))
2011 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2013 err = fl_init_mask_hashtable(newmask);
2017 fl_init_dissector(&newmask->dissector, &newmask->key);
2019 INIT_LIST_HEAD_RCU(&newmask->filters);
2021 refcount_set(&newmask->refcnt, 1);
2022 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2023 &newmask->ht_node, mask_ht_params);
2025 goto errout_destroy;
2027 spin_lock(&head->masks_lock);
2028 list_add_tail_rcu(&newmask->list, &head->masks);
2029 spin_unlock(&head->masks_lock);
2034 rhashtable_destroy(&newmask->ht);
2038 return ERR_PTR(err);
2041 static int fl_check_assign_mask(struct cls_fl_head *head,
2042 struct cls_fl_filter *fnew,
2043 struct cls_fl_filter *fold,
2044 struct fl_flow_mask *mask)
2046 struct fl_flow_mask *newmask;
2051 /* Insert mask as temporary node to prevent concurrent creation of mask
2052 * with same key. Any concurrent lookups with same key will return
2053 * -EAGAIN because mask's refcnt is zero.
2055 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2063 goto errout_cleanup;
2066 newmask = fl_create_new_mask(head, mask);
2067 if (IS_ERR(newmask)) {
2068 ret = PTR_ERR(newmask);
2069 goto errout_cleanup;
2072 fnew->mask = newmask;
2074 } else if (IS_ERR(fnew->mask)) {
2075 ret = PTR_ERR(fnew->mask);
2076 } else if (fold && fold->mask != fnew->mask) {
2078 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2079 /* Mask was deleted concurrently, try again */
2086 rhashtable_remove_fast(&head->ht, &mask->ht_node,
2091 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2092 struct cls_fl_filter *f, struct fl_flow_mask *mask,
2093 unsigned long base, struct nlattr **tb,
2095 struct fl_flow_tmplt *tmplt,
2096 u32 flags, u32 fl_flags,
2097 struct netlink_ext_ack *extack)
2101 err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2106 if (tb[TCA_FLOWER_CLASSID]) {
2107 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2108 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2110 tcf_bind_filter(tp, &f->res, base);
2111 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2115 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2119 fl_mask_update_range(mask);
2120 fl_set_masked_key(&f->mkey, &f->key, mask);
2122 if (!fl_mask_fits_tmplt(tmplt, mask)) {
2123 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2130 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2131 struct cls_fl_filter *fold,
2134 struct fl_flow_mask *mask = fnew->mask;
2137 err = rhashtable_lookup_insert_fast(&mask->ht,
2139 mask->filter_ht_params);
2142 /* It is okay if filter with same key exists when
2145 return fold && err == -EEXIST ? 0 : err;
2152 static int fl_change(struct net *net, struct sk_buff *in_skb,
2153 struct tcf_proto *tp, unsigned long base,
2154 u32 handle, struct nlattr **tca,
2155 void **arg, u32 flags,
2156 struct netlink_ext_ack *extack)
2158 struct cls_fl_head *head = fl_head_dereference(tp);
2159 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2160 struct cls_fl_filter *fold = *arg;
2161 struct cls_fl_filter *fnew;
2162 struct fl_flow_mask *mask;
2167 if (!tca[TCA_OPTIONS]) {
2172 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2178 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2181 goto errout_mask_alloc;
2184 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2185 tca[TCA_OPTIONS], fl_policy, NULL);
2189 if (fold && handle && fold->handle != handle) {
2194 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2199 INIT_LIST_HEAD(&fnew->hw_list);
2200 refcount_set(&fnew->refcnt, 1);
2202 if (tb[TCA_FLOWER_FLAGS]) {
2203 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2205 if (!tc_flags_valid(fnew->flags)) {
2213 spin_lock(&tp->lock);
2216 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2217 INT_MAX, GFP_ATOMIC);
2219 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2220 handle, GFP_ATOMIC);
2222 /* Filter with specified handle was concurrently
2223 * inserted after initial check in cls_api. This is not
2224 * necessarily an error if NLM_F_EXCL is not set in
2225 * message flags. Returning EAGAIN will cause cls_api to
2226 * try to update concurrently inserted rule.
2231 spin_unlock(&tp->lock);
2238 fnew->handle = handle;
2240 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
2241 !tc_skip_hw(fnew->flags));
2245 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2246 tp->chain->tmplt_priv, flags, fnew->flags,
2251 err = fl_check_assign_mask(head, fnew, fold, mask);
2255 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2259 if (!tc_skip_hw(fnew->flags)) {
2260 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2265 if (!tc_in_hw(fnew->flags))
2266 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2268 spin_lock(&tp->lock);
2270 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2271 * proto again or create new one, if necessary.
2279 /* Fold filter was deleted concurrently. Retry lookup. */
2280 if (fold->deleted) {
2285 fnew->handle = handle;
2288 struct rhashtable_params params =
2289 fnew->mask->filter_ht_params;
2291 err = rhashtable_insert_fast(&fnew->mask->ht,
2299 refcount_inc(&fnew->refcnt);
2300 rhashtable_remove_fast(&fold->mask->ht,
2302 fold->mask->filter_ht_params);
2303 idr_replace(&head->handle_idr, fnew, fnew->handle);
2304 list_replace_rcu(&fold->list, &fnew->list);
2305 fold->deleted = true;
2307 spin_unlock(&tp->lock);
2309 fl_mask_put(head, fold->mask);
2310 if (!tc_skip_hw(fold->flags))
2311 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2312 tcf_unbind_filter(tp, &fold->res);
2313 /* Caller holds reference to fold, so refcnt is always > 0
2316 refcount_dec(&fold->refcnt);
2319 idr_replace(&head->handle_idr, fnew, fnew->handle);
2321 refcount_inc(&fnew->refcnt);
2322 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2323 spin_unlock(&tp->lock);
2329 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2333 spin_lock(&tp->lock);
2335 fnew->deleted = true;
2336 spin_unlock(&tp->lock);
2337 if (!tc_skip_hw(fnew->flags))
2338 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2340 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2341 fnew->mask->filter_ht_params);
2343 fl_mask_put(head, fnew->mask);
2346 idr_remove(&head->handle_idr, fnew->handle);
2351 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2358 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2359 bool rtnl_held, struct netlink_ext_ack *extack)
2361 struct cls_fl_head *head = fl_head_dereference(tp);
2362 struct cls_fl_filter *f = arg;
2366 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2367 *last = list_empty(&head->masks);
2373 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2376 struct cls_fl_head *head = fl_head_dereference(tp);
2377 unsigned long id = arg->cookie, tmp;
2378 struct cls_fl_filter *f;
2380 arg->count = arg->skip;
2383 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2384 /* don't return filters that are being deleted */
2385 if (!f || !refcount_inc_not_zero(&f->refcnt))
2389 if (arg->fn(tp, f, arg) < 0) {
2403 static struct cls_fl_filter *
2404 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2406 struct cls_fl_head *head = fl_head_dereference(tp);
2408 spin_lock(&tp->lock);
2409 if (list_empty(&head->hw_filters)) {
2410 spin_unlock(&tp->lock);
2415 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2417 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2418 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2419 spin_unlock(&tp->lock);
2424 spin_unlock(&tp->lock);
2428 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2429 void *cb_priv, struct netlink_ext_ack *extack)
2431 struct tcf_block *block = tp->chain->block;
2432 struct flow_cls_offload cls_flower = {};
2433 struct cls_fl_filter *f = NULL;
2436 /* hw_filters list can only be changed by hw offload functions after
2437 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2442 while ((f = fl_get_next_hw_filter(tp, f, add))) {
2444 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2445 if (!cls_flower.rule) {
2450 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2452 cls_flower.command = add ?
2453 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2454 cls_flower.cookie = (unsigned long)f;
2455 cls_flower.rule->match.dissector = &f->mask->dissector;
2456 cls_flower.rule->match.mask = &f->mask->key;
2457 cls_flower.rule->match.key = &f->mkey;
2459 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2460 cls_flower.common.extack);
2462 kfree(cls_flower.rule);
2463 if (tc_skip_sw(f->flags)) {
2470 cls_flower.classid = f->res.classid;
2472 err = tc_setup_cb_reoffload(block, tp, add, cb,
2473 TC_SETUP_CLSFLOWER, &cls_flower,
2476 tc_cleanup_offload_action(&cls_flower.rule->action);
2477 kfree(cls_flower.rule);
2490 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2492 struct flow_cls_offload *cls_flower = type_data;
2493 struct cls_fl_filter *f =
2494 (struct cls_fl_filter *) cls_flower->cookie;
2495 struct cls_fl_head *head = fl_head_dereference(tp);
2497 spin_lock(&tp->lock);
2498 list_add(&f->hw_list, &head->hw_filters);
2499 spin_unlock(&tp->lock);
2502 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2504 struct flow_cls_offload *cls_flower = type_data;
2505 struct cls_fl_filter *f =
2506 (struct cls_fl_filter *) cls_flower->cookie;
2508 spin_lock(&tp->lock);
2509 if (!list_empty(&f->hw_list))
2510 list_del_init(&f->hw_list);
2511 spin_unlock(&tp->lock);
2514 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2515 struct fl_flow_tmplt *tmplt)
2517 struct flow_cls_offload cls_flower = {};
2518 struct tcf_block *block = chain->block;
2520 cls_flower.rule = flow_rule_alloc(0);
2521 if (!cls_flower.rule)
2524 cls_flower.common.chain_index = chain->index;
2525 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2526 cls_flower.cookie = (unsigned long) tmplt;
2527 cls_flower.rule->match.dissector = &tmplt->dissector;
2528 cls_flower.rule->match.mask = &tmplt->mask;
2529 cls_flower.rule->match.key = &tmplt->dummy_key;
2531 /* We don't care if driver (any of them) fails to handle this
2532 * call. It serves just as a hint for it.
2534 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2535 kfree(cls_flower.rule);
2540 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2541 struct fl_flow_tmplt *tmplt)
2543 struct flow_cls_offload cls_flower = {};
2544 struct tcf_block *block = chain->block;
2546 cls_flower.common.chain_index = chain->index;
2547 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2548 cls_flower.cookie = (unsigned long) tmplt;
2550 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2553 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2554 struct nlattr **tca,
2555 struct netlink_ext_ack *extack)
2557 struct fl_flow_tmplt *tmplt;
2561 if (!tca[TCA_OPTIONS])
2562 return ERR_PTR(-EINVAL);
2564 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2566 return ERR_PTR(-ENOBUFS);
2567 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2568 tca[TCA_OPTIONS], fl_policy, NULL);
2572 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2577 tmplt->chain = chain;
2578 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2582 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2584 err = fl_hw_create_tmplt(chain, tmplt);
2595 return ERR_PTR(err);
2598 static void fl_tmplt_destroy(void *tmplt_priv)
2600 struct fl_flow_tmplt *tmplt = tmplt_priv;
2602 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2606 static int fl_dump_key_val(struct sk_buff *skb,
2607 void *val, int val_type,
2608 void *mask, int mask_type, int len)
2612 if (!memchr_inv(mask, 0, len))
2614 err = nla_put(skb, val_type, len, val);
2617 if (mask_type != TCA_FLOWER_UNSPEC) {
2618 err = nla_put(skb, mask_type, len, mask);
2625 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2626 struct fl_flow_key *mask)
2628 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2629 TCA_FLOWER_KEY_PORT_DST_MIN,
2630 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2631 sizeof(key->tp_range.tp_min.dst)) ||
2632 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2633 TCA_FLOWER_KEY_PORT_DST_MAX,
2634 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2635 sizeof(key->tp_range.tp_max.dst)) ||
2636 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2637 TCA_FLOWER_KEY_PORT_SRC_MIN,
2638 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2639 sizeof(key->tp_range.tp_min.src)) ||
2640 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2641 TCA_FLOWER_KEY_PORT_SRC_MAX,
2642 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2643 sizeof(key->tp_range.tp_max.src)))
2649 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2650 struct flow_dissector_key_mpls *mpls_key,
2651 struct flow_dissector_key_mpls *mpls_mask,
2654 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2655 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2658 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2663 if (lse_mask->mpls_ttl) {
2664 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2669 if (lse_mask->mpls_bos) {
2670 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2675 if (lse_mask->mpls_tc) {
2676 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2681 if (lse_mask->mpls_label) {
2682 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2683 lse_key->mpls_label);
2691 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2692 struct flow_dissector_key_mpls *mpls_key,
2693 struct flow_dissector_key_mpls *mpls_mask)
2695 struct nlattr *opts;
2700 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2704 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2705 if (!(mpls_mask->used_lses & 1 << lse_index))
2708 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2714 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2718 nla_nest_end(skb, lse);
2720 nla_nest_end(skb, opts);
2725 nla_nest_cancel(skb, lse);
2727 nla_nest_cancel(skb, opts);
2732 static int fl_dump_key_mpls(struct sk_buff *skb,
2733 struct flow_dissector_key_mpls *mpls_key,
2734 struct flow_dissector_key_mpls *mpls_mask)
2736 struct flow_dissector_mpls_lse *lse_mask;
2737 struct flow_dissector_mpls_lse *lse_key;
2740 if (!mpls_mask->used_lses)
2743 lse_mask = &mpls_mask->ls[0];
2744 lse_key = &mpls_key->ls[0];
2746 /* For backward compatibility, don't use the MPLS nested attributes if
2747 * the rule can be expressed using the old attributes.
2749 if (mpls_mask->used_lses & ~1 ||
2750 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2751 !lse_mask->mpls_tc && !lse_mask->mpls_label))
2752 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2754 if (lse_mask->mpls_ttl) {
2755 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2760 if (lse_mask->mpls_tc) {
2761 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2766 if (lse_mask->mpls_label) {
2767 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2768 lse_key->mpls_label);
2772 if (lse_mask->mpls_bos) {
2773 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2781 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2782 struct flow_dissector_key_ip *key,
2783 struct flow_dissector_key_ip *mask)
2785 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2786 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2787 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2788 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2790 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2791 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2797 static int fl_dump_key_vlan(struct sk_buff *skb,
2798 int vlan_id_key, int vlan_prio_key,
2799 struct flow_dissector_key_vlan *vlan_key,
2800 struct flow_dissector_key_vlan *vlan_mask)
2804 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2806 if (vlan_mask->vlan_id) {
2807 err = nla_put_u16(skb, vlan_id_key,
2812 if (vlan_mask->vlan_priority) {
2813 err = nla_put_u8(skb, vlan_prio_key,
2814 vlan_key->vlan_priority);
2821 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2822 u32 *flower_key, u32 *flower_mask,
2823 u32 flower_flag_bit, u32 dissector_flag_bit)
2825 if (dissector_mask & dissector_flag_bit) {
2826 *flower_mask |= flower_flag_bit;
2827 if (dissector_key & dissector_flag_bit)
2828 *flower_key |= flower_flag_bit;
2832 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2838 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2844 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2845 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2846 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2847 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2848 FLOW_DIS_FIRST_FRAG);
2850 _key = cpu_to_be32(key);
2851 _mask = cpu_to_be32(mask);
2853 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2857 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2860 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2861 struct flow_dissector_key_enc_opts *enc_opts)
2863 struct geneve_opt *opt;
2864 struct nlattr *nest;
2867 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2869 goto nla_put_failure;
2871 while (enc_opts->len > opt_off) {
2872 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2874 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2876 goto nla_put_failure;
2877 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2879 goto nla_put_failure;
2880 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2881 opt->length * 4, opt->opt_data))
2882 goto nla_put_failure;
2884 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2886 nla_nest_end(skb, nest);
2890 nla_nest_cancel(skb, nest);
2894 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2895 struct flow_dissector_key_enc_opts *enc_opts)
2897 struct vxlan_metadata *md;
2898 struct nlattr *nest;
2900 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2902 goto nla_put_failure;
2904 md = (struct vxlan_metadata *)&enc_opts->data[0];
2905 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2906 goto nla_put_failure;
2908 nla_nest_end(skb, nest);
2912 nla_nest_cancel(skb, nest);
2916 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2917 struct flow_dissector_key_enc_opts *enc_opts)
2919 struct erspan_metadata *md;
2920 struct nlattr *nest;
2922 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2924 goto nla_put_failure;
2926 md = (struct erspan_metadata *)&enc_opts->data[0];
2927 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2928 goto nla_put_failure;
2930 if (md->version == 1 &&
2931 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2932 goto nla_put_failure;
2934 if (md->version == 2 &&
2935 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2937 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2938 get_hwid(&md->u.md2))))
2939 goto nla_put_failure;
2941 nla_nest_end(skb, nest);
2945 nla_nest_cancel(skb, nest);
2949 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2950 struct flow_dissector_key_enc_opts *enc_opts)
2953 struct gtp_pdu_session_info *session_info;
2954 struct nlattr *nest;
2956 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2958 goto nla_put_failure;
2960 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2962 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2963 session_info->pdu_type))
2964 goto nla_put_failure;
2966 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2967 goto nla_put_failure;
2969 nla_nest_end(skb, nest);
2973 nla_nest_cancel(skb, nest);
2977 static int fl_dump_key_ct(struct sk_buff *skb,
2978 struct flow_dissector_key_ct *key,
2979 struct flow_dissector_key_ct *mask)
2981 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2982 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2983 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2984 sizeof(key->ct_state)))
2985 goto nla_put_failure;
2987 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2988 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2989 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2990 sizeof(key->ct_zone)))
2991 goto nla_put_failure;
2993 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2994 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2995 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2996 sizeof(key->ct_mark)))
2997 goto nla_put_failure;
2999 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
3000 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
3001 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
3002 sizeof(key->ct_labels)))
3003 goto nla_put_failure;
3011 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
3012 struct flow_dissector_key_enc_opts *enc_opts)
3014 struct nlattr *nest;
3020 nest = nla_nest_start_noflag(skb, enc_opt_type);
3022 goto nla_put_failure;
3024 switch (enc_opts->dst_opt_type) {
3025 case TUNNEL_GENEVE_OPT:
3026 err = fl_dump_key_geneve_opt(skb, enc_opts);
3028 goto nla_put_failure;
3030 case TUNNEL_VXLAN_OPT:
3031 err = fl_dump_key_vxlan_opt(skb, enc_opts);
3033 goto nla_put_failure;
3035 case TUNNEL_ERSPAN_OPT:
3036 err = fl_dump_key_erspan_opt(skb, enc_opts);
3038 goto nla_put_failure;
3040 case TUNNEL_GTP_OPT:
3041 err = fl_dump_key_gtp_opt(skb, enc_opts);
3043 goto nla_put_failure;
3046 goto nla_put_failure;
3048 nla_nest_end(skb, nest);
3052 nla_nest_cancel(skb, nest);
3056 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3057 struct flow_dissector_key_enc_opts *key_opts,
3058 struct flow_dissector_key_enc_opts *msk_opts)
3062 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3066 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3069 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3070 struct fl_flow_key *key, struct fl_flow_key *mask)
3072 if (mask->meta.ingress_ifindex) {
3073 struct net_device *dev;
3075 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3076 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3077 goto nla_put_failure;
3080 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3081 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3082 sizeof(key->eth.dst)) ||
3083 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3084 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3085 sizeof(key->eth.src)) ||
3086 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3087 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3088 sizeof(key->basic.n_proto)))
3089 goto nla_put_failure;
3091 if (mask->num_of_vlans.num_of_vlans) {
3092 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3093 goto nla_put_failure;
3096 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3097 goto nla_put_failure;
3099 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3100 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3101 goto nla_put_failure;
3103 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3104 TCA_FLOWER_KEY_CVLAN_PRIO,
3105 &key->cvlan, &mask->cvlan) ||
3106 (mask->cvlan.vlan_tpid &&
3107 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3108 key->cvlan.vlan_tpid)))
3109 goto nla_put_failure;
3111 if (mask->basic.n_proto) {
3112 if (mask->cvlan.vlan_eth_type) {
3113 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3114 key->basic.n_proto))
3115 goto nla_put_failure;
3116 } else if (mask->vlan.vlan_eth_type) {
3117 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3118 key->vlan.vlan_eth_type))
3119 goto nla_put_failure;
3123 if ((key->basic.n_proto == htons(ETH_P_IP) ||
3124 key->basic.n_proto == htons(ETH_P_IPV6)) &&
3125 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3126 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3127 sizeof(key->basic.ip_proto)) ||
3128 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3129 goto nla_put_failure;
3131 if (mask->pppoe.session_id) {
3132 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3133 key->pppoe.session_id))
3134 goto nla_put_failure;
3136 if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3137 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3138 key->pppoe.ppp_proto))
3139 goto nla_put_failure;
3142 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3143 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3144 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3145 sizeof(key->ipv4.src)) ||
3146 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3147 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3148 sizeof(key->ipv4.dst))))
3149 goto nla_put_failure;
3150 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3151 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3152 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3153 sizeof(key->ipv6.src)) ||
3154 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3155 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3156 sizeof(key->ipv6.dst))))
3157 goto nla_put_failure;
3159 if (key->basic.ip_proto == IPPROTO_TCP &&
3160 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3161 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3162 sizeof(key->tp.src)) ||
3163 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3164 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3165 sizeof(key->tp.dst)) ||
3166 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3167 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3168 sizeof(key->tcp.flags))))
3169 goto nla_put_failure;
3170 else if (key->basic.ip_proto == IPPROTO_UDP &&
3171 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3172 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3173 sizeof(key->tp.src)) ||
3174 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3175 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3176 sizeof(key->tp.dst))))
3177 goto nla_put_failure;
3178 else if (key->basic.ip_proto == IPPROTO_SCTP &&
3179 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3180 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3181 sizeof(key->tp.src)) ||
3182 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3183 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3184 sizeof(key->tp.dst))))
3185 goto nla_put_failure;
3186 else if (key->basic.n_proto == htons(ETH_P_IP) &&
3187 key->basic.ip_proto == IPPROTO_ICMP &&
3188 (fl_dump_key_val(skb, &key->icmp.type,
3189 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3190 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3191 sizeof(key->icmp.type)) ||
3192 fl_dump_key_val(skb, &key->icmp.code,
3193 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3194 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3195 sizeof(key->icmp.code))))
3196 goto nla_put_failure;
3197 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3198 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3199 (fl_dump_key_val(skb, &key->icmp.type,
3200 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3201 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3202 sizeof(key->icmp.type)) ||
3203 fl_dump_key_val(skb, &key->icmp.code,
3204 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3205 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3206 sizeof(key->icmp.code))))
3207 goto nla_put_failure;
3208 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3209 key->basic.n_proto == htons(ETH_P_RARP)) &&
3210 (fl_dump_key_val(skb, &key->arp.sip,
3211 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3212 TCA_FLOWER_KEY_ARP_SIP_MASK,
3213 sizeof(key->arp.sip)) ||
3214 fl_dump_key_val(skb, &key->arp.tip,
3215 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3216 TCA_FLOWER_KEY_ARP_TIP_MASK,
3217 sizeof(key->arp.tip)) ||
3218 fl_dump_key_val(skb, &key->arp.op,
3219 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3220 TCA_FLOWER_KEY_ARP_OP_MASK,
3221 sizeof(key->arp.op)) ||
3222 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3223 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3224 sizeof(key->arp.sha)) ||
3225 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3226 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3227 sizeof(key->arp.tha))))
3228 goto nla_put_failure;
3229 else if (key->basic.ip_proto == IPPROTO_L2TP &&
3230 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3231 TCA_FLOWER_KEY_L2TPV3_SID,
3232 &mask->l2tpv3.session_id,
3234 sizeof(key->l2tpv3.session_id)))
3235 goto nla_put_failure;
3237 if ((key->basic.ip_proto == IPPROTO_TCP ||
3238 key->basic.ip_proto == IPPROTO_UDP ||
3239 key->basic.ip_proto == IPPROTO_SCTP) &&
3240 fl_dump_key_port_range(skb, key, mask))
3241 goto nla_put_failure;
3243 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3244 (fl_dump_key_val(skb, &key->enc_ipv4.src,
3245 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3246 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3247 sizeof(key->enc_ipv4.src)) ||
3248 fl_dump_key_val(skb, &key->enc_ipv4.dst,
3249 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3250 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3251 sizeof(key->enc_ipv4.dst))))
3252 goto nla_put_failure;
3253 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3254 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3255 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3256 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3257 sizeof(key->enc_ipv6.src)) ||
3258 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3259 TCA_FLOWER_KEY_ENC_IPV6_DST,
3260 &mask->enc_ipv6.dst,
3261 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3262 sizeof(key->enc_ipv6.dst))))
3263 goto nla_put_failure;
3265 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3266 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3267 sizeof(key->enc_key_id)) ||
3268 fl_dump_key_val(skb, &key->enc_tp.src,
3269 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3271 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3272 sizeof(key->enc_tp.src)) ||
3273 fl_dump_key_val(skb, &key->enc_tp.dst,
3274 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3276 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3277 sizeof(key->enc_tp.dst)) ||
3278 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3279 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3280 goto nla_put_failure;
3282 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3283 goto nla_put_failure;
3285 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3286 goto nla_put_failure;
3288 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3289 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3290 sizeof(key->hash.hash)))
3291 goto nla_put_failure;
3299 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3300 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3302 struct cls_fl_filter *f = fh;
3303 struct nlattr *nest;
3304 struct fl_flow_key *key, *mask;
3310 t->tcm_handle = f->handle;
3312 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3314 goto nla_put_failure;
3316 spin_lock(&tp->lock);
3318 if (f->res.classid &&
3319 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3320 goto nla_put_failure_locked;
3323 mask = &f->mask->key;
3324 skip_hw = tc_skip_hw(f->flags);
3326 if (fl_dump_key(skb, net, key, mask))
3327 goto nla_put_failure_locked;
3329 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3330 goto nla_put_failure_locked;
3332 spin_unlock(&tp->lock);
3335 fl_hw_update_stats(tp, f, rtnl_held);
3337 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3338 goto nla_put_failure;
3340 if (tcf_exts_dump(skb, &f->exts))
3341 goto nla_put_failure;
3343 nla_nest_end(skb, nest);
3345 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3346 goto nla_put_failure;
3350 nla_put_failure_locked:
3351 spin_unlock(&tp->lock);
3353 nla_nest_cancel(skb, nest);
3357 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3358 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3360 struct cls_fl_filter *f = fh;
3361 struct nlattr *nest;
3367 t->tcm_handle = f->handle;
3369 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3371 goto nla_put_failure;
3373 spin_lock(&tp->lock);
3375 skip_hw = tc_skip_hw(f->flags);
3377 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3378 goto nla_put_failure_locked;
3380 spin_unlock(&tp->lock);
3383 fl_hw_update_stats(tp, f, rtnl_held);
3385 if (tcf_exts_terse_dump(skb, &f->exts))
3386 goto nla_put_failure;
3388 nla_nest_end(skb, nest);
3392 nla_put_failure_locked:
3393 spin_unlock(&tp->lock);
3395 nla_nest_cancel(skb, nest);
3399 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3401 struct fl_flow_tmplt *tmplt = tmplt_priv;
3402 struct fl_flow_key *key, *mask;
3403 struct nlattr *nest;
3405 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3407 goto nla_put_failure;
3409 key = &tmplt->dummy_key;
3410 mask = &tmplt->mask;
3412 if (fl_dump_key(skb, net, key, mask))
3413 goto nla_put_failure;
3415 nla_nest_end(skb, nest);
3420 nla_nest_cancel(skb, nest);
3424 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3427 struct cls_fl_filter *f = fh;
3429 tc_cls_bind_class(classid, cl, q, &f->res, base);
3432 static bool fl_delete_empty(struct tcf_proto *tp)
3434 struct cls_fl_head *head = fl_head_dereference(tp);
3436 spin_lock(&tp->lock);
3437 tp->deleting = idr_is_empty(&head->handle_idr);
3438 spin_unlock(&tp->lock);
3440 return tp->deleting;
3443 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3445 .classify = fl_classify,
3447 .destroy = fl_destroy,
3450 .change = fl_change,
3451 .delete = fl_delete,
3452 .delete_empty = fl_delete_empty,
3454 .reoffload = fl_reoffload,
3455 .hw_add = fl_hw_add,
3456 .hw_del = fl_hw_del,
3458 .terse_dump = fl_terse_dump,
3459 .bind_class = fl_bind_class,
3460 .tmplt_create = fl_tmplt_create,
3461 .tmplt_destroy = fl_tmplt_destroy,
3462 .tmplt_dump = fl_tmplt_dump,
3463 .get_exts = fl_get_exts,
3464 .owner = THIS_MODULE,
3465 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
3468 static int __init cls_fl_init(void)
3470 return register_tcf_proto_ops(&cls_fl_ops);
3473 static void __exit cls_fl_exit(void)
3475 unregister_tcf_proto_ops(&cls_fl_ops);
3478 module_init(cls_fl_init);
3479 module_exit(cls_fl_exit);
3481 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3482 MODULE_DESCRIPTION("Flower classifier");
3483 MODULE_LICENSE("GPL v2");