2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_EN_TC_H__
34 #define __MLX5_EN_TC_H__
36 #include <net/pkt_cls.h>
40 #include "en/tc_tun.h"
41 #include "en/tc/int_port.h"
44 #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
46 #ifdef CONFIG_MLX5_ESWITCH
48 #define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
49 sizeof(struct mlx5_nic_flow_attr))
50 #define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
51 sizeof(struct mlx5_esw_flow_attr))
52 #define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
56 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
58 struct mlx5e_tc_update_priv {
59 struct net_device *fwd_dev;
62 struct mlx5_nic_flow_attr {
65 struct mlx5_flow_table *hairpin_ft;
68 struct mlx5_flow_attr {
70 struct mlx5_fc *counter;
71 struct mlx5_modify_hdr *modify_hdr;
72 struct mlx5_ct_attr ct_attr;
73 struct mlx5e_sample_attr sample_attr;
74 struct mlx5e_tc_flow_parse_attr *parse_attr;
78 struct mlx5_flow_table *ft;
79 struct mlx5_flow_table *dest_ft;
84 int tunnel_id; /* mapped tunnel id */
86 struct list_head list;
87 struct mlx5e_post_act_handle *post_act_handle;
89 /* Indicate whether the parsed flow should be counted for lag mode decision
94 /* keep this union last */
96 struct mlx5_esw_flow_attr esw_attr[0];
97 struct mlx5_nic_flow_attr nic_attr[0];
102 MLX5_ATTR_FLAG_VLAN_HANDLED = BIT(0),
103 MLX5_ATTR_FLAG_SLOW_PATH = BIT(1),
104 MLX5_ATTR_FLAG_NO_IN_PORT = BIT(2),
105 MLX5_ATTR_FLAG_SRC_REWRITE = BIT(3),
106 MLX5_ATTR_FLAG_SAMPLE = BIT(4),
107 MLX5_ATTR_FLAG_ACCEPT = BIT(5),
108 MLX5_ATTR_FLAG_CT = BIT(6),
111 /* Returns true if any of the flags that require skipping further TC/NF processing are set. */
113 mlx5e_tc_attr_flags_skip(u32 attr_flags)
115 return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT);
118 struct mlx5_rx_tun_attr {
123 } src_ip; /* Valid if decap_vport is not zero */
127 } dst_ip; /* Valid if decap_vport is not zero */
131 #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
132 #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
134 #define MLX5E_TC_MAX_INT_PORT_NUM (8)
136 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
138 struct tunnel_match_key {
139 struct flow_dissector_key_control enc_control;
140 struct flow_dissector_key_keyid enc_key_id;
141 struct flow_dissector_key_ports enc_tp;
142 struct flow_dissector_key_ip enc_ip;
144 struct flow_dissector_key_ipv4_addrs enc_ipv4;
145 struct flow_dissector_key_ipv6_addrs enc_ipv6;
151 struct tunnel_match_enc_opts {
152 struct flow_dissector_key_enc_opts key;
153 struct flow_dissector_key_enc_opts mask;
156 /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
157 * Upper TUNNEL_INFO_BITS for general tunnel info.
158 * Lower ENC_OPTS_BITS bits for enc_opts.
160 #define TUNNEL_INFO_BITS 12
161 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
162 #define ENC_OPTS_BITS 11
163 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
164 #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
165 #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
168 MLX5E_TC_FLAG_INGRESS_BIT,
169 MLX5E_TC_FLAG_EGRESS_BIT,
170 MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
171 MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
172 MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
173 MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
176 #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
178 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
179 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
181 int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
182 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
184 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
185 struct flow_cls_offload *f, unsigned long flags);
186 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
187 struct flow_cls_offload *f, unsigned long flags);
189 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
190 struct flow_cls_offload *f, unsigned long flags);
192 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
193 struct tc_cls_matchall_offload *f);
194 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
195 struct tc_cls_matchall_offload *f);
196 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
197 struct tc_cls_matchall_offload *ma);
199 struct mlx5e_encap_entry;
200 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
201 struct mlx5e_encap_entry *e,
202 struct list_head *flow_list);
203 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
204 struct mlx5e_encap_entry *e,
205 struct list_head *flow_list);
206 bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
207 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
209 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
210 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
212 struct mlx5e_neigh_hash_entry;
213 struct mlx5e_encap_entry *
214 mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
215 struct mlx5e_encap_entry *e);
216 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
218 void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
220 enum mlx5e_tc_attr_to_reg {
231 NIC_ZONE_RESTORE_TO_REG,
234 struct mlx5e_tc_attr_to_reg_mapping {
235 int mfield; /* rewrite field */
236 int moffset; /* bit offset of mfield */
237 int mlen; /* bits to rewrite/match */
239 int soffset; /* byte offset of spec for match */
242 extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
244 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
245 struct net_device *out_dev);
247 int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
248 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
249 enum mlx5_flow_namespace_type ns,
250 enum mlx5e_tc_attr_to_reg type,
253 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
254 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
255 enum mlx5e_tc_attr_to_reg type,
256 int act_id, u32 data);
258 void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
259 enum mlx5e_tc_attr_to_reg type,
263 void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
264 enum mlx5e_tc_attr_to_reg type,
268 int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
269 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
270 enum mlx5_flow_namespace_type ns,
271 enum mlx5e_tc_attr_to_reg type,
274 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
275 struct mlx5e_tc_flow *flow,
276 struct mlx5_flow_attr *attr);
278 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
279 struct flow_match_basic *match, bool outer,
280 void *headers_c, void *headers_v);
282 int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
283 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
285 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
288 struct mlx5_flow_handle *
289 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
290 struct mlx5_flow_spec *spec,
291 struct mlx5_flow_attr *attr);
292 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
293 struct mlx5_flow_handle *rule,
294 struct mlx5_flow_attr *attr);
296 struct mlx5_flow_handle *
297 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
298 struct mlx5_flow_spec *spec,
299 struct mlx5_flow_attr *attr);
301 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
302 struct mlx5_flow_handle *rule,
303 struct mlx5_flow_attr *attr);
305 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev);
306 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
309 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
310 struct mlx5_flow_attr *attr,
312 enum mlx5e_tc_int_port_type type,
315 #else /* CONFIG_MLX5_CLS_ACT */
316 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
317 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
318 static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
319 static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
321 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
322 { return -EOPNOTSUPP; }
324 #endif /* CONFIG_MLX5_CLS_ACT */
326 struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
328 struct mlx5_flow_handle *
329 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
330 struct mlx5_flow_spec *spec,
331 struct mlx5_flow_attr *attr);
332 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
333 struct mlx5_flow_handle *rule,
334 struct mlx5_flow_attr *attr);
336 #else /* CONFIG_MLX5_ESWITCH */
337 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
338 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
339 static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
346 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
347 { return -EOPNOTSUPP; }
350 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
351 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
353 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
356 reg_b = be32_to_cpu(cqe->ft_metadata);
358 if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS))
361 chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
369 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
370 #else /* CONFIG_MLX5_CLS_ACT */
371 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
374 mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
378 #endif /* __MLX5_EN_TC_H__ */