1 #ifndef _NET_FLOW_OFFLOAD_H
2 #define _NET_FLOW_OFFLOAD_H
4 #include <linux/kernel.h>
5 #include <linux/list.h>
6 #include <linux/netlink.h>
7 #include <net/flow_dissector.h>
8 #include <linux/rhashtable.h>
11 struct flow_dissector *dissector;
16 struct flow_match_meta {
17 struct flow_dissector_key_meta *key, *mask;
20 struct flow_match_basic {
21 struct flow_dissector_key_basic *key, *mask;
24 struct flow_match_control {
25 struct flow_dissector_key_control *key, *mask;
28 struct flow_match_eth_addrs {
29 struct flow_dissector_key_eth_addrs *key, *mask;
32 struct flow_match_vlan {
33 struct flow_dissector_key_vlan *key, *mask;
36 struct flow_match_ipv4_addrs {
37 struct flow_dissector_key_ipv4_addrs *key, *mask;
40 struct flow_match_ipv6_addrs {
41 struct flow_dissector_key_ipv6_addrs *key, *mask;
44 struct flow_match_ip {
45 struct flow_dissector_key_ip *key, *mask;
48 struct flow_match_ports {
49 struct flow_dissector_key_ports *key, *mask;
52 struct flow_match_icmp {
53 struct flow_dissector_key_icmp *key, *mask;
56 struct flow_match_tcp {
57 struct flow_dissector_key_tcp *key, *mask;
60 struct flow_match_mpls {
61 struct flow_dissector_key_mpls *key, *mask;
64 struct flow_match_enc_keyid {
65 struct flow_dissector_key_keyid *key, *mask;
68 struct flow_match_enc_opts {
69 struct flow_dissector_key_enc_opts *key, *mask;
72 struct flow_match_ct {
73 struct flow_dissector_key_ct *key, *mask;
78 void flow_rule_match_meta(const struct flow_rule *rule,
79 struct flow_match_meta *out);
80 void flow_rule_match_basic(const struct flow_rule *rule,
81 struct flow_match_basic *out);
82 void flow_rule_match_control(const struct flow_rule *rule,
83 struct flow_match_control *out);
84 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
85 struct flow_match_eth_addrs *out);
86 void flow_rule_match_vlan(const struct flow_rule *rule,
87 struct flow_match_vlan *out);
88 void flow_rule_match_cvlan(const struct flow_rule *rule,
89 struct flow_match_vlan *out);
90 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
91 struct flow_match_ipv4_addrs *out);
92 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
93 struct flow_match_ipv6_addrs *out);
94 void flow_rule_match_ip(const struct flow_rule *rule,
95 struct flow_match_ip *out);
96 void flow_rule_match_ports(const struct flow_rule *rule,
97 struct flow_match_ports *out);
98 void flow_rule_match_tcp(const struct flow_rule *rule,
99 struct flow_match_tcp *out);
100 void flow_rule_match_icmp(const struct flow_rule *rule,
101 struct flow_match_icmp *out);
102 void flow_rule_match_mpls(const struct flow_rule *rule,
103 struct flow_match_mpls *out);
104 void flow_rule_match_enc_control(const struct flow_rule *rule,
105 struct flow_match_control *out);
106 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
107 struct flow_match_ipv4_addrs *out);
108 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
109 struct flow_match_ipv6_addrs *out);
110 void flow_rule_match_enc_ip(const struct flow_rule *rule,
111 struct flow_match_ip *out);
112 void flow_rule_match_enc_ports(const struct flow_rule *rule,
113 struct flow_match_ports *out);
114 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
115 struct flow_match_enc_keyid *out);
116 void flow_rule_match_enc_opts(const struct flow_rule *rule,
117 struct flow_match_enc_opts *out);
118 void flow_rule_match_ct(const struct flow_rule *rule,
119 struct flow_match_ct *out);
121 enum flow_action_id {
122 FLOW_ACTION_ACCEPT = 0,
126 FLOW_ACTION_REDIRECT,
128 FLOW_ACTION_REDIRECT_INGRESS,
129 FLOW_ACTION_MIRRED_INGRESS,
130 FLOW_ACTION_VLAN_PUSH,
131 FLOW_ACTION_VLAN_POP,
132 FLOW_ACTION_VLAN_MANGLE,
133 FLOW_ACTION_TUNNEL_ENCAP,
134 FLOW_ACTION_TUNNEL_DECAP,
145 FLOW_ACTION_CT_METADATA,
146 FLOW_ACTION_MPLS_PUSH,
147 FLOW_ACTION_MPLS_POP,
148 FLOW_ACTION_MPLS_MANGLE,
152 /* This is mirroring enum pedit_header_type definition for easy mapping between
153 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
154 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
156 enum flow_action_mangle_base {
157 FLOW_ACT_MANGLE_UNSPEC = 0,
158 FLOW_ACT_MANGLE_HDR_TYPE_ETH,
159 FLOW_ACT_MANGLE_HDR_TYPE_IP4,
160 FLOW_ACT_MANGLE_HDR_TYPE_IP6,
161 FLOW_ACT_MANGLE_HDR_TYPE_TCP,
162 FLOW_ACT_MANGLE_HDR_TYPE_UDP,
165 enum flow_action_hw_stats_type_bit {
166 FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE_BIT,
167 FLOW_ACTION_HW_STATS_TYPE_DELAYED_BIT,
170 enum flow_action_hw_stats_type {
171 FLOW_ACTION_HW_STATS_TYPE_DISABLED = 0,
172 FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE =
173 BIT(FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE_BIT),
174 FLOW_ACTION_HW_STATS_TYPE_DELAYED =
175 BIT(FLOW_ACTION_HW_STATS_TYPE_DELAYED_BIT),
176 FLOW_ACTION_HW_STATS_TYPE_ANY =
177 FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE |
178 FLOW_ACTION_HW_STATS_TYPE_DELAYED,
181 typedef void (*action_destr)(void *priv);
183 struct flow_action_cookie {
188 struct flow_action_cookie *flow_action_cookie_create(void *data,
191 void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
193 struct flow_action_entry {
194 enum flow_action_id id;
195 enum flow_action_hw_stats_type hw_stats_type;
196 action_destr destructor;
197 void *destructor_priv;
199 u32 chain_index; /* FLOW_ACTION_GOTO */
200 struct net_device *dev; /* FLOW_ACTION_REDIRECT */
201 struct { /* FLOW_ACTION_VLAN */
206 struct { /* FLOW_ACTION_PACKET_EDIT */
207 enum flow_action_mangle_base htype;
212 struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
213 u32 csum_flags; /* FLOW_ACTION_CSUM */
214 u32 mark; /* FLOW_ACTION_MARK */
215 u16 ptype; /* FLOW_ACTION_PTYPE */
216 struct { /* FLOW_ACTION_QUEUE */
221 struct { /* FLOW_ACTION_SAMPLE */
222 struct psample_group *psample_group;
227 struct { /* FLOW_ACTION_POLICE */
231 struct { /* FLOW_ACTION_CT */
234 struct nf_flowtable *flow_table;
237 unsigned long cookie;
241 struct { /* FLOW_ACTION_MPLS_PUSH */
248 struct { /* FLOW_ACTION_MPLS_POP */
251 struct { /* FLOW_ACTION_MPLS_MANGLE */
258 struct flow_action_cookie *cookie; /* user defined action cookie */
262 unsigned int num_entries;
263 struct flow_action_entry entries[];
266 static inline bool flow_action_has_entries(const struct flow_action *action)
268 return action->num_entries;
272 * flow_action_has_one_action() - check if exactly one action is present
273 * @action: tc filter flow offload action
275 * Returns true if exactly one action is present.
277 static inline bool flow_offload_has_one_action(const struct flow_action *action)
279 return action->num_entries == 1;
282 #define flow_action_for_each(__i, __act, __actions) \
283 for (__i = 0, __act = &(__actions)->entries[0]; \
284 __i < (__actions)->num_entries; \
285 __act = &(__actions)->entries[++__i])
288 flow_action_mixed_hw_stats_types_check(const struct flow_action *action,
289 struct netlink_ext_ack *extack)
291 const struct flow_action_entry *action_entry;
292 u8 uninitialized_var(last_hw_stats_type);
295 if (flow_offload_has_one_action(action))
298 flow_action_for_each(i, action_entry, action) {
299 if (i && action_entry->hw_stats_type != last_hw_stats_type) {
300 NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
303 last_hw_stats_type = action_entry->hw_stats_type;
308 static inline const struct flow_action_entry *
309 flow_action_first_entry_get(const struct flow_action *action)
311 WARN_ON(!flow_action_has_entries(action));
312 return &action->entries[0];
316 __flow_action_hw_stats_types_check(const struct flow_action *action,
317 struct netlink_ext_ack *extack,
318 bool check_allow_bit,
319 enum flow_action_hw_stats_type_bit allow_bit)
321 const struct flow_action_entry *action_entry;
323 if (!flow_action_has_entries(action))
325 if (!flow_action_mixed_hw_stats_types_check(action, extack))
327 action_entry = flow_action_first_entry_get(action);
328 if (!check_allow_bit &&
329 action_entry->hw_stats_type != FLOW_ACTION_HW_STATS_TYPE_ANY) {
330 NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
332 } else if (check_allow_bit &&
333 !(action_entry->hw_stats_type & BIT(allow_bit))) {
334 NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
341 flow_action_hw_stats_types_check(const struct flow_action *action,
342 struct netlink_ext_ack *extack,
343 enum flow_action_hw_stats_type_bit allow_bit)
345 return __flow_action_hw_stats_types_check(action, extack,
350 flow_action_basic_hw_stats_types_check(const struct flow_action *action,
351 struct netlink_ext_ack *extack)
353 return __flow_action_hw_stats_types_check(action, extack, false, 0);
357 struct flow_match match;
358 struct flow_action action;
361 struct flow_rule *flow_rule_alloc(unsigned int num_actions);
363 static inline bool flow_rule_match_key(const struct flow_rule *rule,
364 enum flow_dissector_key_id key)
366 return dissector_uses_key(rule->match.dissector, key);
375 static inline void flow_stats_update(struct flow_stats *flow_stats,
376 u64 bytes, u64 pkts, u64 lastused)
378 flow_stats->pkts += pkts;
379 flow_stats->bytes += bytes;
380 flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
383 enum flow_block_command {
388 enum flow_block_binder_type {
389 FLOW_BLOCK_BINDER_TYPE_UNSPEC,
390 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
391 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
395 struct list_head cb_list;
398 struct netlink_ext_ack;
400 struct flow_block_offload {
401 enum flow_block_command command;
402 enum flow_block_binder_type binder_type;
404 bool unlocked_driver_cb;
406 struct flow_block *block;
407 struct list_head cb_list;
408 struct list_head *driver_block_list;
409 struct netlink_ext_ack *extack;
413 typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
416 struct flow_block_cb {
417 struct list_head driver_list;
418 struct list_head list;
422 void (*release)(void *cb_priv);
426 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
427 void *cb_ident, void *cb_priv,
428 void (*release)(void *cb_priv));
429 void flow_block_cb_free(struct flow_block_cb *block_cb);
431 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
432 flow_setup_cb_t *cb, void *cb_ident);
434 void *flow_block_cb_priv(struct flow_block_cb *block_cb);
435 void flow_block_cb_incref(struct flow_block_cb *block_cb);
436 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
438 static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
439 struct flow_block_offload *offload)
441 list_add_tail(&block_cb->list, &offload->cb_list);
444 static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
445 struct flow_block_offload *offload)
447 list_move(&block_cb->list, &offload->cb_list);
450 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
451 struct list_head *driver_block_list);
453 int flow_block_cb_setup_simple(struct flow_block_offload *f,
454 struct list_head *driver_list,
456 void *cb_ident, void *cb_priv, bool ingress_only);
458 enum flow_cls_command {
462 FLOW_CLS_TMPLT_CREATE,
463 FLOW_CLS_TMPLT_DESTROY,
466 struct flow_cls_common_offload {
470 struct netlink_ext_ack *extack;
473 struct flow_cls_offload {
474 struct flow_cls_common_offload common;
475 enum flow_cls_command command;
476 unsigned long cookie;
477 struct flow_rule *rule;
478 struct flow_stats stats;
482 static inline struct flow_rule *
483 flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
485 return flow_cmd->rule;
488 static inline void flow_block_init(struct flow_block *flow_block)
490 INIT_LIST_HEAD(&flow_block->cb_list);
493 typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
494 enum tc_setup_type type, void *type_data);
496 typedef void flow_indr_block_cmd_t(struct net_device *dev,
497 flow_indr_block_bind_cb_t *cb, void *cb_priv,
498 enum flow_block_command command);
500 struct flow_indr_block_entry {
501 flow_indr_block_cmd_t *cb;
502 struct list_head list;
505 void flow_indr_add_block_cb(struct flow_indr_block_entry *entry);
507 void flow_indr_del_block_cb(struct flow_indr_block_entry *entry);
509 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
510 flow_indr_block_bind_cb_t *cb,
513 void __flow_indr_block_cb_unregister(struct net_device *dev,
514 flow_indr_block_bind_cb_t *cb,
517 int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
518 flow_indr_block_bind_cb_t *cb, void *cb_ident);
520 void flow_indr_block_cb_unregister(struct net_device *dev,
521 flow_indr_block_bind_cb_t *cb,
524 void flow_indr_block_call(struct net_device *dev,
525 struct flow_block_offload *bo,
526 enum flow_block_command command);
528 #endif /* _NET_FLOW_OFFLOAD_H */