2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <linux/mlx5/fs.h>
40 #include <linux/mlx5/device.h>
41 #include <linux/rhashtable.h>
42 #include <linux/refcount.h>
43 #include <linux/completion.h>
44 #include <net/tc_act/tc_mirred.h>
45 #include <net/tc_act/tc_vlan.h>
46 #include <net/tc_act/tc_tunnel_key.h>
47 #include <net/tc_act/tc_pedit.h>
48 #include <net/tc_act/tc_csum.h>
49 #include <net/tc_act/tc_mpls.h>
51 #include <net/ipv6_stubs.h>
52 #include <net/bareudp.h>
53 #include <net/bonding.h>
56 #include "en/rep/tc.h"
57 #include "en/rep/neigh.h"
62 #include "en/tc_tun.h"
63 #include "en/mapping.h"
65 #include "en/mod_hdr.h"
66 #include "en/tc_priv.h"
67 #include "en/tc_tun_encap.h"
68 #include "lib/devcom.h"
69 #include "lib/geneve.h"
70 #include "lib/fs_chains.h"
71 #include "diag/en_tc_tracepoint.h"
72 #include <asm/div64.h>
74 #define nic_chains(priv) ((priv)->fs.tc.chains)
75 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
77 #define MLX5E_TC_TABLE_NUM_GROUPS 4
78 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
80 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
82 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
87 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
92 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
94 .mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8),
95 .soffset = MLX5_BYTE_OFF(fte_match_param,
96 misc_parameters_2.metadata_reg_c_1),
98 [ZONE_TO_REG] = zone_to_reg_ct,
99 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
100 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
101 [MARK_TO_REG] = mark_to_reg_ct,
102 [LABELS_TO_REG] = labels_to_reg_ct,
103 [FTEID_TO_REG] = fteid_to_reg_ct,
104 /* For NIC rules we store the retore metadata directly
105 * into reg_b that is passed to SW since we don't
106 * jump between steering domains.
108 [NIC_CHAIN_TO_REG] = {
109 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
113 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
116 /* To avoid false lock dependency warning set the tc_ht lock
117 * class different than the lock class of the ht being used when deleting
118 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
119 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
120 * it's different than the ht->mutex here.
122 static struct lock_class_key tc_ht_lock_key;
124 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
127 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
128 enum mlx5e_tc_attr_to_reg type,
132 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
133 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
134 void *headers_c = spec->match_criteria;
135 void *headers_v = spec->match_value;
138 fmask = headers_c + soffset;
139 fval = headers_v + soffset;
141 mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
142 data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
144 memcpy(fmask, &mask, match_len);
145 memcpy(fval, &data, match_len);
147 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
151 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
152 enum mlx5e_tc_attr_to_reg type,
156 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
157 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
158 void *headers_c = spec->match_criteria;
159 void *headers_v = spec->match_value;
162 fmask = headers_c + soffset;
163 fval = headers_v + soffset;
165 memcpy(mask, fmask, match_len);
166 memcpy(data, fval, match_len);
168 *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
169 *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
173 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
174 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
175 enum mlx5_flow_namespace_type ns,
176 enum mlx5e_tc_attr_to_reg type,
179 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
180 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
181 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
185 err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
189 modact = mod_hdr_acts->actions +
190 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
192 /* Firmware has 5bit length field and 0 means 32bits */
196 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
197 MLX5_SET(set_action_in, modact, field, mfield);
198 MLX5_SET(set_action_in, modact, offset, moffset * 8);
199 MLX5_SET(set_action_in, modact, length, mlen * 8);
200 MLX5_SET(set_action_in, modact, data, data);
201 err = mod_hdr_acts->num_actions;
202 mod_hdr_acts->num_actions++;
207 static struct mlx5_tc_ct_priv *
208 get_ct_priv(struct mlx5e_priv *priv)
210 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
211 struct mlx5_rep_uplink_priv *uplink_priv;
212 struct mlx5e_rep_priv *uplink_rpriv;
214 if (is_mdev_switchdev_mode(priv->mdev)) {
215 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
216 uplink_priv = &uplink_rpriv->uplink_priv;
218 return uplink_priv->ct_priv;
221 return priv->fs.tc.ct;
224 struct mlx5_flow_handle *
225 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
226 struct mlx5_flow_spec *spec,
227 struct mlx5_flow_attr *attr)
229 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
231 if (is_mdev_switchdev_mode(priv->mdev))
232 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
234 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
238 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
239 struct mlx5_flow_handle *rule,
240 struct mlx5_flow_attr *attr)
242 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
244 if (is_mdev_switchdev_mode(priv->mdev)) {
245 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
250 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
254 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
255 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
256 enum mlx5_flow_namespace_type ns,
257 enum mlx5e_tc_attr_to_reg type,
260 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
262 return ret < 0 ? ret : 0;
265 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
266 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
267 enum mlx5e_tc_attr_to_reg type,
268 int act_id, u32 data)
270 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
271 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
272 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
275 modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
277 /* Firmware has 5bit length field and 0 means 32bits */
281 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
282 MLX5_SET(set_action_in, modact, field, mfield);
283 MLX5_SET(set_action_in, modact, offset, moffset * 8);
284 MLX5_SET(set_action_in, modact, length, mlen * 8);
285 MLX5_SET(set_action_in, modact, data, data);
288 struct mlx5e_hairpin {
289 struct mlx5_hairpin *pair;
291 struct mlx5_core_dev *func_mdev;
292 struct mlx5e_priv *func_priv;
297 struct mlx5e_rqt indir_rqt;
298 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
299 struct mlx5e_ttc_table ttc;
302 struct mlx5e_hairpin_entry {
303 /* a node of a hash table which keeps all the hairpin entries */
304 struct hlist_node hairpin_hlist;
306 /* protects flows list */
307 spinlock_t flows_lock;
308 /* flows sharing the same hairpin */
309 struct list_head flows;
310 /* hpe's that were not fully initialized when dead peer update event
311 * function traversed them.
313 struct list_head dead_peer_wait_list;
317 struct mlx5e_hairpin *hp;
319 struct completion res_ready;
322 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
323 struct mlx5e_tc_flow *flow);
325 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
327 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
328 return ERR_PTR(-EINVAL);
332 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
334 if (refcount_dec_and_test(&flow->refcnt)) {
335 mlx5e_tc_del_flow(priv, flow);
336 kfree_rcu(flow, rcu_head);
340 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
342 return flow_flag_test(flow, ESWITCH);
345 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
347 return flow_flag_test(flow, FT);
350 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
352 return flow_flag_test(flow, OFFLOADED);
355 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
357 return mlx5e_is_eswitch_flow(flow) ?
358 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
361 static struct mod_hdr_tbl *
362 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
364 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
366 return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
367 &esw->offloads.mod_hdr :
368 &priv->fs.tc.mod_hdr;
371 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
372 struct mlx5e_tc_flow *flow,
373 struct mlx5e_tc_flow_parse_attr *parse_attr)
375 struct mlx5_modify_hdr *modify_hdr;
376 struct mlx5e_mod_hdr_handle *mh;
378 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
379 get_flow_name_space(flow),
380 &parse_attr->mod_hdr_acts);
384 modify_hdr = mlx5e_mod_hdr_get(mh);
385 flow->attr->modify_hdr = modify_hdr;
391 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
392 struct mlx5e_tc_flow *flow)
394 /* flow wasn't fully initialized */
398 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
404 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
406 struct net_device *netdev;
407 struct mlx5e_priv *priv;
409 netdev = __dev_get_by_index(net, ifindex);
410 priv = netdev_priv(netdev);
414 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
416 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
420 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
424 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
426 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
427 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
428 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
430 err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
437 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
442 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
444 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
445 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
448 static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
450 u32 *indirection_rqt, rqn;
451 struct mlx5e_priv *priv = hp->func_priv;
452 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
454 indirection_rqt = kzalloc(sz, GFP_KERNEL);
455 if (!indirection_rqt)
458 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
461 for (i = 0; i < sz; i++) {
463 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
464 ix = mlx5e_bits_invert(i, ilog2(sz));
465 ix = indirection_rqt[ix];
466 rqn = hp->pair->rqn[ix];
467 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
470 kfree(indirection_rqt);
474 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
476 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
477 struct mlx5e_priv *priv = hp->func_priv;
478 struct mlx5_core_dev *mdev = priv->mdev;
482 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
483 in = kvzalloc(inlen, GFP_KERNEL);
487 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
489 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
490 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
492 err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
496 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
498 hp->indir_rqt.enabled = true;
505 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
507 struct mlx5e_priv *priv = hp->func_priv;
508 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
512 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
513 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
515 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
516 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
518 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
519 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
520 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
521 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
523 err = mlx5_core_create_tir(hp->func_mdev, in,
524 &hp->indir_tirn[tt]);
526 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
527 goto err_destroy_tirs;
533 for (i = 0; i < tt; i++)
534 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
538 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
542 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
543 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
546 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
547 struct ttc_params *ttc_params)
549 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
552 memset(ttc_params, 0, sizeof(*ttc_params));
554 ttc_params->any_tt_tirn = hp->tirn;
556 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
557 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
559 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
560 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
561 ft_attr->prio = MLX5E_TC_PRIO;
564 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
566 struct mlx5e_priv *priv = hp->func_priv;
567 struct ttc_params ttc_params;
570 err = mlx5e_hairpin_create_indirect_rqt(hp);
574 err = mlx5e_hairpin_create_indirect_tirs(hp);
576 goto err_create_indirect_tirs;
578 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
579 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
581 goto err_create_ttc_table;
583 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
584 hp->num_channels, hp->ttc.ft.t->id);
588 err_create_ttc_table:
589 mlx5e_hairpin_destroy_indirect_tirs(hp);
590 err_create_indirect_tirs:
591 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
596 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
598 struct mlx5e_priv *priv = hp->func_priv;
600 mlx5e_destroy_ttc_table(priv, &hp->ttc);
601 mlx5e_hairpin_destroy_indirect_tirs(hp);
602 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
605 static struct mlx5e_hairpin *
606 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
609 struct mlx5_core_dev *func_mdev, *peer_mdev;
610 struct mlx5e_hairpin *hp;
611 struct mlx5_hairpin *pair;
614 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
616 return ERR_PTR(-ENOMEM);
618 func_mdev = priv->mdev;
619 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
621 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
624 goto create_pair_err;
627 hp->func_mdev = func_mdev;
628 hp->func_priv = priv;
629 hp->num_channels = params->num_channels;
631 err = mlx5e_hairpin_create_transport(hp);
633 goto create_transport_err;
635 if (hp->num_channels > 1) {
636 err = mlx5e_hairpin_rss_init(hp);
644 mlx5e_hairpin_destroy_transport(hp);
645 create_transport_err:
646 mlx5_core_hairpin_destroy(hp->pair);
652 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
654 if (hp->num_channels > 1)
655 mlx5e_hairpin_rss_cleanup(hp);
656 mlx5e_hairpin_destroy_transport(hp);
657 mlx5_core_hairpin_destroy(hp->pair);
661 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
663 return (peer_vhca_id << 16 | prio);
666 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
667 u16 peer_vhca_id, u8 prio)
669 struct mlx5e_hairpin_entry *hpe;
670 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
672 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
673 hairpin_hlist, hash_key) {
674 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
675 refcount_inc(&hpe->refcnt);
683 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
684 struct mlx5e_hairpin_entry *hpe)
686 /* no more hairpin flows for us, release the hairpin pair */
687 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
689 hash_del(&hpe->hairpin_hlist);
690 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
692 if (!IS_ERR_OR_NULL(hpe->hp)) {
693 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
694 dev_name(hpe->hp->pair->peer_mdev->device));
696 mlx5e_hairpin_destroy(hpe->hp);
699 WARN_ON(!list_empty(&hpe->flows));
703 #define UNKNOWN_MATCH_PRIO 8
705 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
706 struct mlx5_flow_spec *spec, u8 *match_prio,
707 struct netlink_ext_ack *extack)
709 void *headers_c, *headers_v;
710 u8 prio_val, prio_mask = 0;
713 #ifdef CONFIG_MLX5_CORE_EN_DCB
714 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
715 NL_SET_ERR_MSG_MOD(extack,
716 "only PCP trust state supported for hairpin");
720 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
721 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
723 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
725 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
726 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
729 if (!vlan_present || !prio_mask) {
730 prio_val = UNKNOWN_MATCH_PRIO;
731 } else if (prio_mask != 0x7) {
732 NL_SET_ERR_MSG_MOD(extack,
733 "masked priority match not supported for hairpin");
737 *match_prio = prio_val;
741 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
742 struct mlx5e_tc_flow *flow,
743 struct mlx5e_tc_flow_parse_attr *parse_attr,
744 struct netlink_ext_ack *extack)
746 int peer_ifindex = parse_attr->mirred_ifindex[0];
747 struct mlx5_hairpin_params params;
748 struct mlx5_core_dev *peer_mdev;
749 struct mlx5e_hairpin_entry *hpe;
750 struct mlx5e_hairpin *hp;
757 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
758 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
759 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
763 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
764 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
769 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
770 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
772 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
773 wait_for_completion(&hpe->res_ready);
775 if (IS_ERR(hpe->hp)) {
782 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
784 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
788 spin_lock_init(&hpe->flows_lock);
789 INIT_LIST_HEAD(&hpe->flows);
790 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
791 hpe->peer_vhca_id = peer_id;
792 hpe->prio = match_prio;
793 refcount_set(&hpe->refcnt, 1);
794 init_completion(&hpe->res_ready);
796 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
797 hash_hairpin_info(peer_id, match_prio));
798 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
800 params.log_data_size = 15;
801 params.log_data_size = min_t(u8, params.log_data_size,
802 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
803 params.log_data_size = max_t(u8, params.log_data_size,
804 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
806 params.log_num_packets = params.log_data_size -
807 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
808 params.log_num_packets = min_t(u8, params.log_num_packets,
809 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
811 params.q_counter = priv->q_counter;
812 /* set hairpin pair per each 50Gbs share of the link */
813 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
814 link_speed = max_t(u32, link_speed, 50000);
815 link_speed64 = link_speed;
816 do_div(link_speed64, 50000);
817 params.num_channels = link_speed64;
819 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
821 complete_all(&hpe->res_ready);
827 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
828 hp->tirn, hp->pair->rqn[0],
829 dev_name(hp->pair->peer_mdev->device),
830 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
833 if (hpe->hp->num_channels > 1) {
834 flow_flag_set(flow, HAIRPIN_RSS);
835 flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
837 flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
841 spin_lock(&hpe->flows_lock);
842 list_add(&flow->hairpin, &hpe->flows);
843 spin_unlock(&hpe->flows_lock);
848 mlx5e_hairpin_put(priv, hpe);
852 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
853 struct mlx5e_tc_flow *flow)
855 /* flow wasn't fully initialized */
859 spin_lock(&flow->hpe->flows_lock);
860 list_del(&flow->hairpin);
861 spin_unlock(&flow->hpe->flows_lock);
863 mlx5e_hairpin_put(priv, flow->hpe);
867 struct mlx5_flow_handle *
868 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
869 struct mlx5_flow_spec *spec,
870 struct mlx5_flow_attr *attr)
872 struct mlx5_flow_context *flow_context = &spec->flow_context;
873 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
874 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
875 struct mlx5e_tc_table *tc = &priv->fs.tc;
876 struct mlx5_flow_destination dest[2] = {};
877 struct mlx5_flow_act flow_act = {
878 .action = attr->action,
879 .flags = FLOW_ACT_NO_APPEND,
881 struct mlx5_flow_handle *rule;
882 struct mlx5_flow_table *ft;
885 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
886 flow_context->flow_tag = nic_attr->flow_tag;
889 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
890 dest[dest_ix].ft = attr->dest_ft;
892 } else if (nic_attr->hairpin_ft) {
893 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
894 dest[dest_ix].ft = nic_attr->hairpin_ft;
896 } else if (nic_attr->hairpin_tirn) {
897 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
898 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
900 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
901 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
902 if (attr->dest_chain) {
903 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
906 if (IS_ERR(dest[dest_ix].ft))
907 return ERR_CAST(dest[dest_ix].ft);
909 dest[dest_ix].ft = priv->fs.vlan.ft.t;
914 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
915 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
916 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
918 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
919 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
920 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
924 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
925 flow_act.modify_hdr = attr->modify_hdr;
927 mutex_lock(&tc->t_lock);
928 if (IS_ERR_OR_NULL(tc->t)) {
929 /* Create the root table here if doesn't exist yet */
931 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
934 mutex_unlock(&tc->t_lock);
935 netdev_err(priv->netdev,
936 "Failed to create tc offload table\n");
937 rule = ERR_CAST(priv->fs.tc.t);
941 mutex_unlock(&tc->t_lock);
943 if (attr->chain || attr->prio)
944 ft = mlx5_chains_get_table(nic_chains,
945 attr->chain, attr->prio,
955 if (attr->outer_match_level != MLX5_MATCH_NONE)
956 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
958 rule = mlx5_add_flow_rules(ft, spec,
959 &flow_act, dest, dest_ix);
966 if (attr->chain || attr->prio)
967 mlx5_chains_put_table(nic_chains,
968 attr->chain, attr->prio,
971 if (attr->dest_chain)
972 mlx5_chains_put_table(nic_chains,
976 return ERR_CAST(rule);
980 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
981 struct mlx5e_tc_flow_parse_attr *parse_attr,
982 struct mlx5e_tc_flow *flow,
983 struct netlink_ext_ack *extack)
985 struct mlx5_flow_attr *attr = flow->attr;
986 struct mlx5_core_dev *dev = priv->mdev;
987 struct mlx5_fc *counter = NULL;
990 if (flow_flag_test(flow, HAIRPIN)) {
991 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
996 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
997 counter = mlx5_fc_create(dev, true);
999 return PTR_ERR(counter);
1001 attr->counter = counter;
1004 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1005 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1006 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1011 if (flow_flag_test(flow, CT))
1012 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
1013 attr, &parse_attr->mod_hdr_acts);
1015 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1018 return PTR_ERR_OR_ZERO(flow->rule[0]);
1021 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1022 struct mlx5_flow_handle *rule,
1023 struct mlx5_flow_attr *attr)
1025 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
1027 mlx5_del_flow_rules(rule);
1029 if (attr->chain || attr->prio)
1030 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1033 if (attr->dest_chain)
1034 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1038 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1039 struct mlx5e_tc_flow *flow)
1041 struct mlx5_flow_attr *attr = flow->attr;
1042 struct mlx5e_tc_table *tc = &priv->fs.tc;
1044 flow_flag_clear(flow, OFFLOADED);
1046 if (flow_flag_test(flow, CT))
1047 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1048 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1049 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1051 /* Remove root table if no rules are left to avoid
1052 * extra steering hops.
1054 mutex_lock(&priv->fs.tc.t_lock);
1055 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1056 !IS_ERR_OR_NULL(tc->t)) {
1057 mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
1058 priv->fs.tc.t = NULL;
1060 mutex_unlock(&priv->fs.tc.t_lock);
1062 kvfree(attr->parse_attr);
1064 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1065 mlx5e_detach_mod_hdr(priv, flow);
1067 mlx5_fc_destroy(priv->mdev, attr->counter);
1069 if (flow_flag_test(flow, HAIRPIN))
1070 mlx5e_hairpin_flow_del(priv, flow);
1075 struct mlx5_flow_handle *
1076 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1077 struct mlx5e_tc_flow *flow,
1078 struct mlx5_flow_spec *spec,
1079 struct mlx5_flow_attr *attr)
1081 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1082 struct mlx5_flow_handle *rule;
1084 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1085 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1087 if (flow_flag_test(flow, CT)) {
1088 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1090 rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
1094 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1100 if (attr->esw_attr->split_count) {
1101 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1102 if (IS_ERR(flow->rule[1])) {
1103 if (flow_flag_test(flow, CT))
1104 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1106 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1107 return flow->rule[1];
1114 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1115 struct mlx5e_tc_flow *flow,
1116 struct mlx5_flow_attr *attr)
1118 flow_flag_clear(flow, OFFLOADED);
1120 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1121 goto offload_rule_0;
1123 if (flow_flag_test(flow, CT)) {
1124 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1128 if (attr->esw_attr->split_count)
1129 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1132 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1135 struct mlx5_flow_handle *
1136 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1137 struct mlx5e_tc_flow *flow,
1138 struct mlx5_flow_spec *spec)
1140 struct mlx5_flow_attr *slow_attr;
1141 struct mlx5_flow_handle *rule;
1143 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1145 return ERR_PTR(-ENOMEM);
1147 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1148 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1149 slow_attr->esw_attr->split_count = 0;
1150 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1152 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1154 flow_flag_set(flow, SLOW);
1161 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1162 struct mlx5e_tc_flow *flow)
1164 struct mlx5_flow_attr *slow_attr;
1166 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1168 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1172 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1173 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1174 slow_attr->esw_attr->split_count = 0;
1175 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1176 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1177 flow_flag_clear(flow, SLOW);
1181 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1184 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1185 struct list_head *unready_flows)
1187 flow_flag_set(flow, NOT_READY);
1188 list_add_tail(&flow->unready, unready_flows);
1191 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1194 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1196 list_del(&flow->unready);
1197 flow_flag_clear(flow, NOT_READY);
1200 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1202 struct mlx5_rep_uplink_priv *uplink_priv;
1203 struct mlx5e_rep_priv *rpriv;
1204 struct mlx5_eswitch *esw;
1206 esw = flow->priv->mdev->priv.eswitch;
1207 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1208 uplink_priv = &rpriv->uplink_priv;
1210 mutex_lock(&uplink_priv->unready_flows_lock);
1211 unready_flow_add(flow, &uplink_priv->unready_flows);
1212 mutex_unlock(&uplink_priv->unready_flows_lock);
1215 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1217 struct mlx5_rep_uplink_priv *uplink_priv;
1218 struct mlx5e_rep_priv *rpriv;
1219 struct mlx5_eswitch *esw;
1221 esw = flow->priv->mdev->priv.eswitch;
1222 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1223 uplink_priv = &rpriv->uplink_priv;
1225 mutex_lock(&uplink_priv->unready_flows_lock);
1226 unready_flow_del(flow);
1227 mutex_unlock(&uplink_priv->unready_flows_lock);
1230 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
1232 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1234 struct mlx5_core_dev *out_mdev, *route_mdev;
1235 struct mlx5e_priv *out_priv, *route_priv;
1237 out_priv = netdev_priv(out_dev);
1238 out_mdev = out_priv->mdev;
1239 route_priv = netdev_priv(route_dev);
1240 route_mdev = route_priv->mdev;
1242 if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
1243 route_mdev->coredev_type != MLX5_COREDEV_VF)
1246 return same_hw_devs(out_priv, route_priv);
1249 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1251 struct mlx5e_priv *out_priv, *route_priv;
1252 struct mlx5_core_dev *route_mdev;
1253 struct mlx5_eswitch *esw;
1257 out_priv = netdev_priv(out_dev);
1258 esw = out_priv->mdev->priv.eswitch;
1259 route_priv = netdev_priv(route_dev);
1260 route_mdev = route_priv->mdev;
1262 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1263 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1267 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1268 struct mlx5e_tc_flow_parse_attr *parse_attr,
1269 struct mlx5e_tc_flow *flow)
1271 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
1272 struct mlx5_modify_hdr *mod_hdr;
1274 mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1275 get_flow_name_space(flow),
1276 mod_hdr_acts->num_actions,
1277 mod_hdr_acts->actions);
1278 if (IS_ERR(mod_hdr))
1279 return PTR_ERR(mod_hdr);
1281 WARN_ON(flow->attr->modify_hdr);
1282 flow->attr->modify_hdr = mod_hdr;
1288 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1289 struct mlx5e_tc_flow *flow,
1290 struct netlink_ext_ack *extack)
1292 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1293 struct net_device *out_dev, *encap_dev = NULL;
1294 struct mlx5e_tc_flow_parse_attr *parse_attr;
1295 struct mlx5_flow_attr *attr = flow->attr;
1296 bool vf_tun = false, encap_valid = true;
1297 struct mlx5_esw_flow_attr *esw_attr;
1298 struct mlx5_fc *counter = NULL;
1299 struct mlx5e_rep_priv *rpriv;
1300 struct mlx5e_priv *out_priv;
1301 u32 max_prio, max_chain;
1305 /* We check chain range only for tc flows.
1306 * For ft flows, we checked attr->chain was originally 0 and set it to
1307 * FDB_FT_CHAIN which is outside tc range.
1308 * See mlx5e_rep_setup_ft_cb().
1310 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1311 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1312 NL_SET_ERR_MSG_MOD(extack,
1313 "Requested chain is out of supported range");
1318 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1319 if (attr->prio > max_prio) {
1320 NL_SET_ERR_MSG_MOD(extack,
1321 "Requested priority is out of supported range");
1326 if (flow_flag_test(flow, TUN_RX)) {
1327 err = mlx5e_attach_decap_route(priv, flow);
1332 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1333 err = mlx5e_attach_decap(priv, flow, extack);
1338 parse_attr = attr->parse_attr;
1339 esw_attr = attr->esw_attr;
1341 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1344 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1347 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1348 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1350 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1351 extack, &encap_dev, &encap_valid);
1355 if (esw_attr->dests[out_index].flags &
1356 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
1358 out_priv = netdev_priv(encap_dev);
1359 rpriv = out_priv->ppriv;
1360 esw_attr->dests[out_index].rep = rpriv->rep;
1361 esw_attr->dests[out_index].mdev = out_priv->mdev;
1364 err = mlx5_eswitch_add_vlan_action(esw, attr);
1368 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1369 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1371 err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
1375 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1381 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1382 counter = mlx5_fc_create(esw_attr->counter_dev, true);
1383 if (IS_ERR(counter)) {
1384 err = PTR_ERR(counter);
1388 attr->counter = counter;
1391 /* we get here if one of the following takes place:
1392 * (1) there's no error
1393 * (2) there's an encap action and we don't have valid neigh
1396 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1398 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1400 if (IS_ERR(flow->rule[0])) {
1401 err = PTR_ERR(flow->rule[0]);
1404 flow_flag_set(flow, OFFLOADED);
1409 flow_flag_set(flow, FAILED);
1413 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1415 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1416 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1419 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1421 geneve_tlv_option_0_data);
1423 return !!geneve_tlv_opt_0_data;
1426 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1427 struct mlx5e_tc_flow *flow)
1429 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1430 struct mlx5_flow_attr *attr = flow->attr;
1431 struct mlx5_esw_flow_attr *esw_attr;
1432 bool vf_tun = false;
1435 esw_attr = attr->esw_attr;
1436 mlx5e_put_flow_tunnel_id(flow);
1438 if (flow_flag_test(flow, NOT_READY))
1439 remove_unready_flow(flow);
1441 if (mlx5e_is_offloaded_flow(flow)) {
1442 if (flow_flag_test(flow, SLOW))
1443 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1445 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1448 if (mlx5_flow_has_geneve_opt(flow))
1449 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1451 mlx5_eswitch_del_vlan_action(esw, attr);
1453 if (flow->decap_route)
1454 mlx5e_detach_decap_route(priv, flow);
1456 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1457 if (esw_attr->dests[out_index].flags &
1458 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
1460 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1461 mlx5e_detach_encap(priv, flow, out_index);
1462 kfree(attr->parse_attr->tun_info[out_index]);
1466 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1468 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1469 dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
1470 if (vf_tun && attr->modify_hdr)
1471 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
1473 mlx5e_detach_mod_hdr(priv, flow);
1475 kvfree(attr->parse_attr);
1476 kvfree(attr->esw_attr->rx_tun_attr);
1478 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1479 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
1481 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1482 mlx5e_detach_decap(priv, flow);
1487 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1489 return flow->attr->counter;
1492 /* Iterate over tmp_list of flows attached to flow_list head. */
1493 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1495 struct mlx5e_tc_flow *flow, *tmp;
1497 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1498 mlx5e_flow_put(priv, flow);
1501 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1503 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1505 if (!flow_flag_test(flow, ESWITCH) ||
1506 !flow_flag_test(flow, DUP))
1509 mutex_lock(&esw->offloads.peer_mutex);
1510 list_del(&flow->peer);
1511 mutex_unlock(&esw->offloads.peer_mutex);
1513 flow_flag_clear(flow, DUP);
1515 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1516 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1517 kfree(flow->peer_flow);
1520 flow->peer_flow = NULL;
1523 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1525 struct mlx5_core_dev *dev = flow->priv->mdev;
1526 struct mlx5_devcom *devcom = dev->priv.devcom;
1527 struct mlx5_eswitch *peer_esw;
1529 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1533 __mlx5e_tc_del_fdb_peer_flow(flow);
1534 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1537 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1538 struct mlx5e_tc_flow *flow)
1540 if (mlx5e_is_eswitch_flow(flow)) {
1541 mlx5e_tc_del_fdb_peer_flow(flow);
1542 mlx5e_tc_del_fdb_flow(priv, flow);
1544 mlx5e_tc_del_nic_flow(priv, flow);
1548 static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1550 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1551 struct flow_action *flow_action = &rule->action;
1552 const struct flow_action_entry *act;
1555 flow_action_for_each(i, act, flow_action) {
1557 case FLOW_ACTION_GOTO:
1568 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1569 struct flow_dissector_key_enc_opts *opts,
1570 struct netlink_ext_ack *extack,
1573 struct geneve_opt *opt;
1578 while (opts->len > off) {
1579 opt = (struct geneve_opt *)&opts->data[off];
1581 if (!(*dont_care) || opt->opt_class || opt->type ||
1582 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1585 if (opt->opt_class != htons(U16_MAX) ||
1586 opt->type != U8_MAX) {
1587 NL_SET_ERR_MSG(extack,
1588 "Partial match of tunnel options in chain > 0 isn't supported");
1589 netdev_warn(priv->netdev,
1590 "Partial match of tunnel options in chain > 0 isn't supported");
1595 off += sizeof(struct geneve_opt) + opt->length * 4;
1601 #define COPY_DISSECTOR(rule, diss_key, dst)\
1603 struct flow_rule *__rule = (rule);\
1604 typeof(dst) __dst = dst;\
1607 skb_flow_dissector_target(__rule->match.dissector,\
1609 __rule->match.key),\
1613 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1614 struct mlx5e_tc_flow *flow,
1615 struct flow_cls_offload *f,
1616 struct net_device *filter_dev)
1618 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1619 struct netlink_ext_ack *extack = f->common.extack;
1620 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1621 struct flow_match_enc_opts enc_opts_match;
1622 struct tunnel_match_enc_opts tun_enc_opts;
1623 struct mlx5_rep_uplink_priv *uplink_priv;
1624 struct mlx5_flow_attr *attr = flow->attr;
1625 struct mlx5e_rep_priv *uplink_rpriv;
1626 struct tunnel_match_key tunnel_key;
1627 bool enc_opts_is_dont_care = true;
1628 u32 tun_id, enc_opts_id = 0;
1629 struct mlx5_eswitch *esw;
1633 esw = priv->mdev->priv.eswitch;
1634 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1635 uplink_priv = &uplink_rpriv->uplink_priv;
1637 memset(&tunnel_key, 0, sizeof(tunnel_key));
1638 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1639 &tunnel_key.enc_control);
1640 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1641 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1642 &tunnel_key.enc_ipv4);
1644 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1645 &tunnel_key.enc_ipv6);
1646 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1647 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1648 &tunnel_key.enc_tp);
1649 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1650 &tunnel_key.enc_key_id);
1651 tunnel_key.filter_ifindex = filter_dev->ifindex;
1653 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1657 flow_rule_match_enc_opts(rule, &enc_opts_match);
1658 err = enc_opts_is_dont_care_or_full_match(priv,
1659 enc_opts_match.mask,
1661 &enc_opts_is_dont_care);
1665 if (!enc_opts_is_dont_care) {
1666 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1667 memcpy(&tun_enc_opts.key, enc_opts_match.key,
1668 sizeof(*enc_opts_match.key));
1669 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1670 sizeof(*enc_opts_match.mask));
1672 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1673 &tun_enc_opts, &enc_opts_id);
1678 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1679 mask = enc_opts_id ? TUNNEL_ID_MASK :
1680 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1683 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1684 TUNNEL_TO_REG, value, mask);
1686 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1687 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1688 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
1689 TUNNEL_TO_REG, value);
1693 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1696 flow->tunnel_id = value;
1701 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1704 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1708 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
1710 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
1711 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
1712 struct mlx5_rep_uplink_priv *uplink_priv;
1713 struct mlx5e_rep_priv *uplink_rpriv;
1714 struct mlx5_eswitch *esw;
1716 esw = flow->priv->mdev->priv.eswitch;
1717 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1718 uplink_priv = &uplink_rpriv->uplink_priv;
1721 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1723 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1727 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
1729 return flow->tunnel_id;
1732 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
1733 struct flow_match_basic *match, bool outer,
1734 void *headers_c, void *headers_v)
1736 bool ip_version_cap;
1738 ip_version_cap = outer ?
1739 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1740 ft_field_support.outer_ip_version) :
1741 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1742 ft_field_support.inner_ip_version);
1744 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
1745 (match->key->n_proto == htons(ETH_P_IP) ||
1746 match->key->n_proto == htons(ETH_P_IPV6))) {
1747 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
1748 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
1749 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
1751 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1752 ntohs(match->mask->n_proto));
1753 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1754 ntohs(match->key->n_proto));
1758 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
1765 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1767 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
1769 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
1770 /* Return ip_version converted from ethertype anyway */
1772 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1773 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
1775 else if (ethertype == ETH_P_IPV6)
1781 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1782 struct mlx5e_tc_flow *flow,
1783 struct mlx5_flow_spec *spec,
1784 struct flow_cls_offload *f,
1785 struct net_device *filter_dev,
1789 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
1790 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1791 struct netlink_ext_ack *extack = f->common.extack;
1792 bool needs_mapping, sets_mapping;
1795 if (!mlx5e_is_eswitch_flow(flow))
1798 needs_mapping = !!flow->attr->chain;
1799 sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
1800 *match_inner = !needs_mapping;
1802 if ((needs_mapping || sets_mapping) &&
1803 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1804 NL_SET_ERR_MSG(extack,
1805 "Chains on tunnel devices isn't supported without register loopback support");
1806 netdev_warn(priv->netdev,
1807 "Chains on tunnel devices isn't supported without register loopback support");
1811 if (!flow->attr->chain) {
1812 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1815 NL_SET_ERR_MSG_MOD(extack,
1816 "Failed to parse tunnel attributes");
1817 netdev_warn(priv->netdev,
1818 "Failed to parse tunnel attributes");
1822 /* With mpls over udp we decapsulate using packet reformat
1825 if (!netif_is_bareudp(filter_dev))
1826 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1827 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
1830 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
1831 struct mlx5_flow_spec *tmp_spec;
1833 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
1835 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
1836 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
1839 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
1841 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
1844 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
1845 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
1848 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
1854 if (!needs_mapping && !sets_mapping)
1857 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
1860 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
1862 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1866 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
1868 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
1872 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
1874 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1878 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
1880 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
1884 static void *get_match_headers_value(u32 flags,
1885 struct mlx5_flow_spec *spec)
1887 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1888 get_match_inner_headers_value(spec) :
1889 get_match_outer_headers_value(spec);
1892 static void *get_match_headers_criteria(u32 flags,
1893 struct mlx5_flow_spec *spec)
1895 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1896 get_match_inner_headers_criteria(spec) :
1897 get_match_outer_headers_criteria(spec);
1900 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
1901 struct flow_cls_offload *f)
1903 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1904 struct netlink_ext_ack *extack = f->common.extack;
1905 struct net_device *ingress_dev;
1906 struct flow_match_meta match;
1908 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
1911 flow_rule_match_meta(rule, &match);
1912 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
1913 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
1917 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
1918 match.key->ingress_ifindex);
1920 NL_SET_ERR_MSG_MOD(extack,
1921 "Can't find the ingress port to match on");
1925 if (ingress_dev != filter_dev) {
1926 NL_SET_ERR_MSG_MOD(extack,
1927 "Can't match on the ingress filter port");
1934 static bool skip_key_basic(struct net_device *filter_dev,
1935 struct flow_cls_offload *f)
1937 /* When doing mpls over udp decap, the user needs to provide
1938 * MPLS_UC as the protocol in order to be able to match on mpls
1939 * label fields. However, the actual ethertype is IP so we want to
1940 * avoid matching on this, otherwise we'll fail the match.
1942 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
1948 static int __parse_cls_flower(struct mlx5e_priv *priv,
1949 struct mlx5e_tc_flow *flow,
1950 struct mlx5_flow_spec *spec,
1951 struct flow_cls_offload *f,
1952 struct net_device *filter_dev,
1953 u8 *inner_match_level, u8 *outer_match_level)
1955 struct netlink_ext_ack *extack = f->common.extack;
1956 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1958 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1960 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1962 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1964 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1966 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1968 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1969 struct flow_dissector *dissector = rule->match.dissector;
1975 match_level = outer_match_level;
1977 if (dissector->used_keys &
1978 ~(BIT(FLOW_DISSECTOR_KEY_META) |
1979 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1980 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1981 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1982 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1983 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1984 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1985 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1986 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1987 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1988 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1989 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1990 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1991 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1992 BIT(FLOW_DISSECTOR_KEY_TCP) |
1993 BIT(FLOW_DISSECTOR_KEY_IP) |
1994 BIT(FLOW_DISSECTOR_KEY_CT) |
1995 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
1996 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
1997 BIT(FLOW_DISSECTOR_KEY_ICMP) |
1998 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
1999 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2000 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2001 dissector->used_keys);
2005 if (mlx5e_get_tc_tun(filter_dev)) {
2006 bool match_inner = false;
2008 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2009 outer_match_level, &match_inner);
2014 /* header pointers should point to the inner headers
2015 * if the packet was decapsulated already.
2016 * outer headers are set by parse_tunnel_attr.
2018 match_level = inner_match_level;
2019 headers_c = get_match_inner_headers_criteria(spec);
2020 headers_v = get_match_inner_headers_value(spec);
2024 err = mlx5e_flower_parse_meta(filter_dev, f);
2028 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2029 !skip_key_basic(filter_dev, f)) {
2030 struct flow_match_basic match;
2032 flow_rule_match_basic(rule, &match);
2033 mlx5e_tc_set_ethertype(priv->mdev, &match,
2034 match_level == outer_match_level,
2035 headers_c, headers_v);
2037 if (match.mask->n_proto)
2038 *match_level = MLX5_MATCH_L2;
2040 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2041 is_vlan_dev(filter_dev)) {
2042 struct flow_dissector_key_vlan filter_dev_mask;
2043 struct flow_dissector_key_vlan filter_dev_key;
2044 struct flow_match_vlan match;
2046 if (is_vlan_dev(filter_dev)) {
2047 match.key = &filter_dev_key;
2048 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2049 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2050 match.key->vlan_priority = 0;
2051 match.mask = &filter_dev_mask;
2052 memset(match.mask, 0xff, sizeof(*match.mask));
2053 match.mask->vlan_priority = 0;
2055 flow_rule_match_vlan(rule, &match);
2057 if (match.mask->vlan_id ||
2058 match.mask->vlan_priority ||
2059 match.mask->vlan_tpid) {
2060 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2061 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2063 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2066 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2068 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2072 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2073 match.mask->vlan_id);
2074 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2075 match.key->vlan_id);
2077 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2078 match.mask->vlan_priority);
2079 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2080 match.key->vlan_priority);
2082 *match_level = MLX5_MATCH_L2;
2084 } else if (*match_level != MLX5_MATCH_NONE) {
2085 /* cvlan_tag enabled in match criteria and
2086 * disabled in match value means both S & C tags
2087 * don't exist (untagged of both)
2089 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2090 *match_level = MLX5_MATCH_L2;
2093 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2094 struct flow_match_vlan match;
2096 flow_rule_match_cvlan(rule, &match);
2097 if (match.mask->vlan_id ||
2098 match.mask->vlan_priority ||
2099 match.mask->vlan_tpid) {
2100 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2101 MLX5_SET(fte_match_set_misc, misc_c,
2102 outer_second_svlan_tag, 1);
2103 MLX5_SET(fte_match_set_misc, misc_v,
2104 outer_second_svlan_tag, 1);
2106 MLX5_SET(fte_match_set_misc, misc_c,
2107 outer_second_cvlan_tag, 1);
2108 MLX5_SET(fte_match_set_misc, misc_v,
2109 outer_second_cvlan_tag, 1);
2112 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2113 match.mask->vlan_id);
2114 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2115 match.key->vlan_id);
2116 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2117 match.mask->vlan_priority);
2118 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2119 match.key->vlan_priority);
2121 *match_level = MLX5_MATCH_L2;
2122 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2126 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2127 struct flow_match_eth_addrs match;
2129 flow_rule_match_eth_addrs(rule, &match);
2130 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2133 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2137 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2140 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2144 if (!is_zero_ether_addr(match.mask->src) ||
2145 !is_zero_ether_addr(match.mask->dst))
2146 *match_level = MLX5_MATCH_L2;
2149 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2150 struct flow_match_control match;
2152 flow_rule_match_control(rule, &match);
2153 addr_type = match.key->addr_type;
2155 /* the HW doesn't support frag first/later */
2156 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2159 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2160 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2161 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2162 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2164 /* the HW doesn't need L3 inline to match on frag=no */
2165 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2166 *match_level = MLX5_MATCH_L2;
2167 /* *** L2 attributes parsing up to here *** */
2169 *match_level = MLX5_MATCH_L3;
2173 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2174 struct flow_match_basic match;
2176 flow_rule_match_basic(rule, &match);
2177 ip_proto = match.key->ip_proto;
2179 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2180 match.mask->ip_proto);
2181 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2182 match.key->ip_proto);
2184 if (match.mask->ip_proto)
2185 *match_level = MLX5_MATCH_L3;
2188 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2189 struct flow_match_ipv4_addrs match;
2191 flow_rule_match_ipv4_addrs(rule, &match);
2192 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2193 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2194 &match.mask->src, sizeof(match.mask->src));
2195 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2196 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2197 &match.key->src, sizeof(match.key->src));
2198 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2199 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2200 &match.mask->dst, sizeof(match.mask->dst));
2201 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2202 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2203 &match.key->dst, sizeof(match.key->dst));
2205 if (match.mask->src || match.mask->dst)
2206 *match_level = MLX5_MATCH_L3;
2209 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2210 struct flow_match_ipv6_addrs match;
2212 flow_rule_match_ipv6_addrs(rule, &match);
2213 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2214 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2215 &match.mask->src, sizeof(match.mask->src));
2216 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2217 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2218 &match.key->src, sizeof(match.key->src));
2220 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2221 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2222 &match.mask->dst, sizeof(match.mask->dst));
2223 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2224 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2225 &match.key->dst, sizeof(match.key->dst));
2227 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2228 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2229 *match_level = MLX5_MATCH_L3;
2232 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2233 struct flow_match_ip match;
2235 flow_rule_match_ip(rule, &match);
2236 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2237 match.mask->tos & 0x3);
2238 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2239 match.key->tos & 0x3);
2241 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2242 match.mask->tos >> 2);
2243 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2244 match.key->tos >> 2);
2246 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2248 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2251 if (match.mask->ttl &&
2252 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2253 ft_field_support.outer_ipv4_ttl)) {
2254 NL_SET_ERR_MSG_MOD(extack,
2255 "Matching on TTL is not supported");
2259 if (match.mask->tos || match.mask->ttl)
2260 *match_level = MLX5_MATCH_L3;
2263 /* *** L3 attributes parsing up to here *** */
2265 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2266 struct flow_match_ports match;
2268 flow_rule_match_ports(rule, &match);
2271 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2272 tcp_sport, ntohs(match.mask->src));
2273 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2274 tcp_sport, ntohs(match.key->src));
2276 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2277 tcp_dport, ntohs(match.mask->dst));
2278 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2279 tcp_dport, ntohs(match.key->dst));
2283 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2284 udp_sport, ntohs(match.mask->src));
2285 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2286 udp_sport, ntohs(match.key->src));
2288 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2289 udp_dport, ntohs(match.mask->dst));
2290 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2291 udp_dport, ntohs(match.key->dst));
2294 NL_SET_ERR_MSG_MOD(extack,
2295 "Only UDP and TCP transports are supported for L4 matching");
2296 netdev_err(priv->netdev,
2297 "Only UDP and TCP transport are supported\n");
2301 if (match.mask->src || match.mask->dst)
2302 *match_level = MLX5_MATCH_L4;
2305 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2306 struct flow_match_tcp match;
2308 flow_rule_match_tcp(rule, &match);
2309 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2310 ntohs(match.mask->flags));
2311 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2312 ntohs(match.key->flags));
2314 if (match.mask->flags)
2315 *match_level = MLX5_MATCH_L4;
2317 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2318 struct flow_match_icmp match;
2320 flow_rule_match_icmp(rule, &match);
2323 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2324 MLX5_FLEX_PROTO_ICMP))
2326 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2328 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2330 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2332 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2335 case IPPROTO_ICMPV6:
2336 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2337 MLX5_FLEX_PROTO_ICMPV6))
2339 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
2341 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
2343 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
2345 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
2349 NL_SET_ERR_MSG_MOD(extack,
2350 "Code and type matching only with ICMP and ICMPv6");
2351 netdev_err(priv->netdev,
2352 "Code and type matching only with ICMP and ICMPv6\n");
2355 if (match.mask->code || match.mask->type) {
2356 *match_level = MLX5_MATCH_L4;
2357 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
2363 static int parse_cls_flower(struct mlx5e_priv *priv,
2364 struct mlx5e_tc_flow *flow,
2365 struct mlx5_flow_spec *spec,
2366 struct flow_cls_offload *f,
2367 struct net_device *filter_dev)
2369 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2370 struct netlink_ext_ack *extack = f->common.extack;
2371 struct mlx5_core_dev *dev = priv->mdev;
2372 struct mlx5_eswitch *esw = dev->priv.eswitch;
2373 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2374 struct mlx5_eswitch_rep *rep;
2375 bool is_eswitch_flow;
2378 inner_match_level = MLX5_MATCH_NONE;
2379 outer_match_level = MLX5_MATCH_NONE;
2381 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2382 &inner_match_level, &outer_match_level);
2383 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2384 outer_match_level : inner_match_level;
2386 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2387 if (!err && is_eswitch_flow) {
2389 if (rep->vport != MLX5_VPORT_UPLINK &&
2390 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2391 esw->offloads.inline_mode < non_tunnel_match_level)) {
2392 NL_SET_ERR_MSG_MOD(extack,
2393 "Flow is not offloaded due to min inline setting");
2394 netdev_warn(priv->netdev,
2395 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2396 non_tunnel_match_level, esw->offloads.inline_mode);
2401 flow->attr->inner_match_level = inner_match_level;
2402 flow->attr->outer_match_level = outer_match_level;
2408 struct pedit_headers {
2410 struct vlan_hdr vlan;
2417 struct pedit_headers_action {
2418 struct pedit_headers vals;
2419 struct pedit_headers masks;
2423 static int pedit_header_offsets[] = {
2424 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2425 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2426 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2427 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2428 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2431 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2433 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2434 struct pedit_headers_action *hdrs)
2436 u32 *curr_pmask, *curr_pval;
2438 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2439 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2441 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2444 *curr_pmask |= mask;
2445 *curr_pval |= (val & mask);
2453 struct mlx5_fields {
2461 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2462 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2463 offsetof(struct pedit_headers, field) + (off), \
2464 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2466 /* masked values are the same and there are no rewrites that do not have a
2469 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2470 type matchmaskx = *(type *)(matchmaskp); \
2471 type matchvalx = *(type *)(matchvalp); \
2472 type maskx = *(type *)(maskp); \
2473 type valx = *(type *)(valp); \
2475 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2479 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2480 void *matchmaskp, u8 bsize)
2486 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2489 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2492 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2499 static struct mlx5_fields fields[] = {
2500 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2501 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2502 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2503 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2504 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2505 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2507 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2508 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2509 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2510 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2512 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2513 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2514 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2515 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2516 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2517 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2518 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2519 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2520 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2521 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2522 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2523 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2524 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2525 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2526 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2527 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2528 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2529 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
2531 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2532 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2533 /* in linux iphdr tcp_flags is 8 bits long */
2534 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2536 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2537 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2540 static unsigned long mask_to_le(unsigned long mask, int size)
2546 mask_be32 = (__force __be32)(mask);
2547 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2548 } else if (size == 16) {
2549 mask_be32 = (__force __be32)(mask);
2550 mask_be16 = *(__be16 *)&mask_be32;
2551 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2556 static int offload_pedit_fields(struct mlx5e_priv *priv,
2558 struct pedit_headers_action *hdrs,
2559 struct mlx5e_tc_flow_parse_attr *parse_attr,
2561 struct netlink_ext_ack *extack)
2563 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2564 int i, action_size, first, last, next_z;
2565 void *headers_c, *headers_v, *action, *vals_p;
2566 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2567 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2568 struct mlx5_fields *f;
2569 unsigned long mask, field_mask;
2573 mod_acts = &parse_attr->mod_hdr_acts;
2574 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2575 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2577 set_masks = &hdrs[0].masks;
2578 add_masks = &hdrs[1].masks;
2579 set_vals = &hdrs[0].vals;
2580 add_vals = &hdrs[1].vals;
2582 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2584 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2588 /* avoid seeing bits set from previous iterations */
2592 s_masks_p = (void *)set_masks + f->offset;
2593 a_masks_p = (void *)add_masks + f->offset;
2595 s_mask = *s_masks_p & f->field_mask;
2596 a_mask = *a_masks_p & f->field_mask;
2598 if (!s_mask && !a_mask) /* nothing to offload here */
2601 if (s_mask && a_mask) {
2602 NL_SET_ERR_MSG_MOD(extack,
2603 "can't set and add to the same HW field");
2604 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2610 void *match_mask = headers_c + f->match_offset;
2611 void *match_val = headers_v + f->match_offset;
2613 cmd = MLX5_ACTION_TYPE_SET;
2615 vals_p = (void *)set_vals + f->offset;
2616 /* don't rewrite if we have a match on the same value */
2617 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2618 match_mask, f->field_bsize))
2620 /* clear to denote we consumed this field */
2621 *s_masks_p &= ~f->field_mask;
2623 cmd = MLX5_ACTION_TYPE_ADD;
2625 vals_p = (void *)add_vals + f->offset;
2626 /* add 0 is no change */
2627 if ((*(u32 *)vals_p & f->field_mask) == 0)
2629 /* clear to denote we consumed this field */
2630 *a_masks_p &= ~f->field_mask;
2635 mask = mask_to_le(mask, f->field_bsize);
2637 first = find_first_bit(&mask, f->field_bsize);
2638 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2639 last = find_last_bit(&mask, f->field_bsize);
2640 if (first < next_z && next_z < last) {
2641 NL_SET_ERR_MSG_MOD(extack,
2642 "rewrite of few sub-fields isn't supported");
2643 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2648 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2650 NL_SET_ERR_MSG_MOD(extack,
2651 "too many pedit actions, can't offload");
2652 mlx5_core_warn(priv->mdev,
2653 "mlx5: parsed %d pedit actions, can't do more\n",
2654 mod_acts->num_actions);
2658 action = mod_acts->actions +
2659 (mod_acts->num_actions * action_size);
2660 MLX5_SET(set_action_in, action, action_type, cmd);
2661 MLX5_SET(set_action_in, action, field, f->field);
2663 if (cmd == MLX5_ACTION_TYPE_SET) {
2666 field_mask = mask_to_le(f->field_mask, f->field_bsize);
2668 /* if field is bit sized it can start not from first bit */
2669 start = find_first_bit(&field_mask, f->field_bsize);
2671 MLX5_SET(set_action_in, action, offset, first - start);
2672 /* length is num of bits to be written, zero means length of 32 */
2673 MLX5_SET(set_action_in, action, length, (last - first + 1));
2676 if (f->field_bsize == 32)
2677 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2678 else if (f->field_bsize == 16)
2679 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2680 else if (f->field_bsize == 8)
2681 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2683 ++mod_acts->num_actions;
2689 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2692 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2693 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2694 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2695 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2698 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2700 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2702 int action_size, new_num_actions, max_hw_actions;
2703 size_t new_sz, old_sz;
2706 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2709 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2711 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2713 new_num_actions = min(max_hw_actions,
2714 mod_hdr_acts->actions ?
2715 mod_hdr_acts->max_actions * 2 : 1);
2716 if (mod_hdr_acts->max_actions == new_num_actions)
2719 new_sz = action_size * new_num_actions;
2720 old_sz = mod_hdr_acts->max_actions * action_size;
2721 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2725 memset(ret + old_sz, 0, new_sz - old_sz);
2726 mod_hdr_acts->actions = ret;
2727 mod_hdr_acts->max_actions = new_num_actions;
2732 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2734 kfree(mod_hdr_acts->actions);
2735 mod_hdr_acts->actions = NULL;
2736 mod_hdr_acts->num_actions = 0;
2737 mod_hdr_acts->max_actions = 0;
2740 static const struct pedit_headers zero_masks = {};
2743 parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2744 const struct flow_action_entry *act, int namespace,
2745 struct mlx5e_tc_flow_parse_attr *parse_attr,
2746 struct pedit_headers_action *hdrs,
2747 struct netlink_ext_ack *extack)
2749 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2750 int err = -EOPNOTSUPP;
2751 u32 mask, val, offset;
2754 htype = act->mangle.htype;
2755 err = -EOPNOTSUPP; /* can't be all optimistic */
2757 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2758 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2762 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2763 NL_SET_ERR_MSG_MOD(extack,
2764 "The pedit offload action is not supported");
2768 mask = act->mangle.mask;
2769 val = act->mangle.val;
2770 offset = act->mangle.offset;
2772 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2784 parse_pedit_to_reformat(struct mlx5e_priv *priv,
2785 const struct flow_action_entry *act,
2786 struct mlx5e_tc_flow_parse_attr *parse_attr,
2787 struct netlink_ext_ack *extack)
2789 u32 mask, val, offset;
2792 if (act->id != FLOW_ACTION_MANGLE)
2795 if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
2796 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
2800 mask = ~act->mangle.mask;
2801 val = act->mangle.val;
2802 offset = act->mangle.offset;
2803 p = (u32 *)&parse_attr->eth;
2804 *(p + (offset >> 2)) |= (val & mask);
2809 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2810 const struct flow_action_entry *act, int namespace,
2811 struct mlx5e_tc_flow_parse_attr *parse_attr,
2812 struct pedit_headers_action *hdrs,
2813 struct mlx5e_tc_flow *flow,
2814 struct netlink_ext_ack *extack)
2816 if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
2817 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
2819 return parse_pedit_to_modify_hdr(priv, act, namespace,
2820 parse_attr, hdrs, extack);
2823 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2824 struct mlx5e_tc_flow_parse_attr *parse_attr,
2825 struct pedit_headers_action *hdrs,
2827 struct netlink_ext_ack *extack)
2829 struct pedit_headers *cmd_masks;
2833 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
2834 action_flags, extack);
2836 goto out_dealloc_parsed_actions;
2838 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2839 cmd_masks = &hdrs[cmd].masks;
2840 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2841 NL_SET_ERR_MSG_MOD(extack,
2842 "attempt to offload an unsupported field");
2843 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2844 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2845 16, 1, cmd_masks, sizeof(zero_masks), true);
2847 goto out_dealloc_parsed_actions;
2853 out_dealloc_parsed_actions:
2854 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2858 static bool csum_offload_supported(struct mlx5e_priv *priv,
2861 struct netlink_ext_ack *extack)
2863 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2864 TCA_CSUM_UPDATE_FLAG_UDP;
2866 /* The HW recalcs checksums only if re-writing headers */
2867 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2868 NL_SET_ERR_MSG_MOD(extack,
2869 "TC csum action is only offloaded with pedit");
2870 netdev_warn(priv->netdev,
2871 "TC csum action is only offloaded with pedit\n");
2875 if (update_flags & ~prot_flags) {
2876 NL_SET_ERR_MSG_MOD(extack,
2877 "can't offload TC csum action for some header/s");
2878 netdev_warn(priv->netdev,
2879 "can't offload TC csum action for some header/s - flags %#x\n",
2887 struct ip_ttl_word {
2893 struct ipv6_hoplimit_word {
2899 static int is_action_keys_supported(const struct flow_action_entry *act,
2900 bool ct_flow, bool *modify_ip_header,
2902 struct netlink_ext_ack *extack)
2907 htype = act->mangle.htype;
2908 offset = act->mangle.offset;
2909 mask = ~act->mangle.mask;
2910 /* For IPv4 & IPv6 header check 4 byte word,
2911 * to determine that modified fields
2912 * are NOT ttl & hop_limit only.
2914 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2915 struct ip_ttl_word *ttl_word =
2916 (struct ip_ttl_word *)&mask;
2918 if (offset != offsetof(struct iphdr, ttl) ||
2919 ttl_word->protocol ||
2921 *modify_ip_header = true;
2924 if (offset >= offsetof(struct iphdr, saddr))
2925 *modify_tuple = true;
2927 if (ct_flow && *modify_tuple) {
2928 NL_SET_ERR_MSG_MOD(extack,
2929 "can't offload re-write of ipv4 address with action ct");
2932 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2933 struct ipv6_hoplimit_word *hoplimit_word =
2934 (struct ipv6_hoplimit_word *)&mask;
2936 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2937 hoplimit_word->payload_len ||
2938 hoplimit_word->nexthdr) {
2939 *modify_ip_header = true;
2942 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
2943 *modify_tuple = true;
2945 if (ct_flow && *modify_tuple) {
2946 NL_SET_ERR_MSG_MOD(extack,
2947 "can't offload re-write of ipv6 address with action ct");
2950 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
2951 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
2952 *modify_tuple = true;
2954 NL_SET_ERR_MSG_MOD(extack,
2955 "can't offload re-write of transport header ports with action ct");
2963 static bool modify_header_match_supported(struct mlx5e_priv *priv,
2964 struct mlx5_flow_spec *spec,
2965 struct flow_action *flow_action,
2966 u32 actions, bool ct_flow,
2968 struct netlink_ext_ack *extack)
2970 const struct flow_action_entry *act;
2971 bool modify_ip_header, modify_tuple;
2978 headers_c = get_match_headers_criteria(actions, spec);
2979 headers_v = get_match_headers_value(actions, spec);
2980 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2982 /* for non-IP we only re-write MACs, so we're okay */
2983 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
2984 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2987 modify_ip_header = false;
2988 modify_tuple = false;
2989 flow_action_for_each(i, act, flow_action) {
2990 if (act->id != FLOW_ACTION_MANGLE &&
2991 act->id != FLOW_ACTION_ADD)
2994 err = is_action_keys_supported(act, ct_flow,
2996 &modify_tuple, extack);
3001 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3002 * (or after clear action), as otherwise, since the tuple is changed,
3003 * we can't restore ct state
3005 if (!ct_clear && modify_tuple &&
3006 mlx5_tc_ct_add_no_trk_match(spec)) {
3007 NL_SET_ERR_MSG_MOD(extack,
3008 "can't offload tuple modify header with ct matches");
3009 netdev_info(priv->netdev,
3010 "can't offload tuple modify header with ct matches");
3014 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3015 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3016 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3017 NL_SET_ERR_MSG_MOD(extack,
3018 "can't offload re-write of non TCP/UDP");
3019 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3028 static bool actions_match_supported(struct mlx5e_priv *priv,
3029 struct flow_action *flow_action,
3030 struct mlx5e_tc_flow_parse_attr *parse_attr,
3031 struct mlx5e_tc_flow *flow,
3032 struct netlink_ext_ack *extack)
3034 bool ct_flow = false, ct_clear = false;
3037 ct_clear = flow->attr->ct_attr.ct_action &
3039 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3040 actions = flow->attr->action;
3042 if (mlx5e_is_eswitch_flow(flow)) {
3043 if (flow->attr->esw_attr->split_count && ct_flow &&
3044 !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
3045 /* All registers used by ct are cleared when using
3048 NL_SET_ERR_MSG_MOD(extack,
3049 "Can't offload mirroring with action ct");
3054 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3055 return modify_header_match_supported(priv, &parse_attr->spec,
3056 flow_action, actions,
3063 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3065 return priv->mdev == peer_priv->mdev;
3068 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3070 struct mlx5_core_dev *fmdev, *pmdev;
3071 u64 fsystem_guid, psystem_guid;
3074 pmdev = peer_priv->mdev;
3076 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3077 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3079 return (fsystem_guid == psystem_guid);
3082 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3083 const struct flow_action_entry *act,
3084 struct mlx5e_tc_flow_parse_attr *parse_attr,
3085 struct pedit_headers_action *hdrs,
3086 u32 *action, struct netlink_ext_ack *extack)
3088 u16 mask16 = VLAN_VID_MASK;
3089 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3090 const struct flow_action_entry pedit_act = {
3091 .id = FLOW_ACTION_MANGLE,
3092 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3093 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3094 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3095 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3097 u8 match_prio_mask, match_prio_val;
3098 void *headers_c, *headers_v;
3101 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3102 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3104 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3105 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3106 NL_SET_ERR_MSG_MOD(extack,
3107 "VLAN rewrite action must have VLAN protocol match");
3111 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3112 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3113 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3114 NL_SET_ERR_MSG_MOD(extack,
3115 "Changing VLAN prio is not supported");
3119 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3120 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3126 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3127 struct mlx5e_tc_flow_parse_attr *parse_attr,
3128 struct pedit_headers_action *hdrs,
3129 u32 *action, struct netlink_ext_ack *extack)
3131 const struct flow_action_entry prio_tag_act = {
3134 MLX5_GET(fte_match_set_lyr_2_4,
3135 get_match_headers_value(*action,
3138 MLX5_GET(fte_match_set_lyr_2_4,
3139 get_match_headers_criteria(*action,
3144 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3145 &prio_tag_act, parse_attr, hdrs, action,
3149 static int validate_goto_chain(struct mlx5e_priv *priv,
3150 struct mlx5e_tc_flow *flow,
3151 const struct flow_action_entry *act,
3153 struct netlink_ext_ack *extack)
3155 bool is_esw = mlx5e_is_eswitch_flow(flow);
3156 struct mlx5_flow_attr *attr = flow->attr;
3157 bool ft_flow = mlx5e_is_ft_flow(flow);
3158 u32 dest_chain = act->chain_index;
3159 struct mlx5_fs_chains *chains;
3160 struct mlx5_eswitch *esw;
3161 u32 reformat_and_fwd;
3164 esw = priv->mdev->priv.eswitch;
3165 chains = is_esw ? esw_chains(esw) : nic_chains(priv);
3166 max_chain = mlx5_chains_get_chain_range(chains);
3167 reformat_and_fwd = is_esw ?
3168 MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
3169 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
3172 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3176 if (!mlx5_chains_backwards_supported(chains) &&
3177 dest_chain <= attr->chain) {
3178 NL_SET_ERR_MSG_MOD(extack,
3179 "Goto lower numbered chain isn't supported");
3183 if (dest_chain > max_chain) {
3184 NL_SET_ERR_MSG_MOD(extack,
3185 "Requested destination chain is out of supported range");
3189 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3190 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3191 !reformat_and_fwd) {
3192 NL_SET_ERR_MSG_MOD(extack,
3193 "Goto chain is not allowed if action has reformat or decap");
3200 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3201 struct flow_action *flow_action,
3202 struct mlx5e_tc_flow_parse_attr *parse_attr,
3203 struct mlx5e_tc_flow *flow,
3204 struct netlink_ext_ack *extack)
3206 struct mlx5_flow_attr *attr = flow->attr;
3207 struct pedit_headers_action hdrs[2] = {};
3208 const struct flow_action_entry *act;
3209 struct mlx5_nic_flow_attr *nic_attr;
3213 if (!flow_action_has_entries(flow_action))
3216 if (!flow_action_hw_stats_check(flow_action, extack,
3217 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3220 nic_attr = attr->nic_attr;
3222 nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3224 flow_action_for_each(i, act, flow_action) {
3226 case FLOW_ACTION_ACCEPT:
3227 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3228 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3230 case FLOW_ACTION_DROP:
3231 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3232 if (MLX5_CAP_FLOWTABLE(priv->mdev,
3233 flow_table_properties_nic_receive.flow_counter))
3234 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3236 case FLOW_ACTION_MANGLE:
3237 case FLOW_ACTION_ADD:
3238 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3239 parse_attr, hdrs, NULL, extack);
3243 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3245 case FLOW_ACTION_VLAN_MANGLE:
3246 err = add_vlan_rewrite_action(priv,
3247 MLX5_FLOW_NAMESPACE_KERNEL,
3248 act, parse_attr, hdrs,
3254 case FLOW_ACTION_CSUM:
3255 if (csum_offload_supported(priv, action,
3261 case FLOW_ACTION_REDIRECT: {
3262 struct net_device *peer_dev = act->dev;
3264 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3265 same_hw_devs(priv, netdev_priv(peer_dev))) {
3266 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3267 flow_flag_set(flow, HAIRPIN);
3268 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3269 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3271 NL_SET_ERR_MSG_MOD(extack,
3272 "device is not on same HW, can't offload");
3273 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3279 case FLOW_ACTION_MARK: {
3280 u32 mark = act->mark;
3282 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3283 NL_SET_ERR_MSG_MOD(extack,
3284 "Bad flow mark - only 16 bit is supported");
3288 nic_attr->flow_tag = mark;
3289 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3292 case FLOW_ACTION_GOTO:
3293 err = validate_goto_chain(priv, flow, act, action,
3298 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3299 attr->dest_chain = act->chain_index;
3301 case FLOW_ACTION_CT:
3302 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
3306 flow_flag_set(flow, CT);
3309 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3314 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3315 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3316 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3317 parse_attr, hdrs, &action, extack);
3320 /* in case all pedit actions are skipped, remove the MOD_HDR
3323 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3324 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3325 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3329 attr->action = action;
3331 if (attr->dest_chain) {
3332 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3333 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3336 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3339 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3340 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3342 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3348 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3349 struct net_device *peer_netdev)
3351 struct mlx5e_priv *peer_priv;
3353 peer_priv = netdev_priv(peer_netdev);
3355 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3356 mlx5e_eswitch_vf_rep(priv->netdev) &&
3357 mlx5e_eswitch_vf_rep(peer_netdev) &&
3358 same_hw_devs(priv, peer_priv));
3361 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3362 const struct flow_action_entry *act,
3363 struct mlx5_esw_flow_attr *attr,
3366 u8 vlan_idx = attr->total_vlan;
3368 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3372 case FLOW_ACTION_VLAN_POP:
3374 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3375 MLX5_FS_VLAN_DEPTH))
3378 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3380 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3383 case FLOW_ACTION_VLAN_PUSH:
3384 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3385 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3386 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3387 if (!attr->vlan_proto[vlan_idx])
3388 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3391 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3392 MLX5_FS_VLAN_DEPTH))
3395 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3397 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3398 (act->vlan.proto != htons(ETH_P_8021Q) ||
3402 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3409 attr->total_vlan = vlan_idx + 1;
3414 static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3415 struct net_device *out_dev)
3417 struct net_device *fdb_out_dev = out_dev;
3418 struct net_device *uplink_upper;
3421 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3422 if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3423 uplink_upper == out_dev) {
3424 fdb_out_dev = uplink_dev;
3425 } else if (netif_is_lag_master(out_dev)) {
3426 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3428 (!mlx5e_eswitch_rep(fdb_out_dev) ||
3429 !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3436 static int add_vlan_push_action(struct mlx5e_priv *priv,
3437 struct mlx5_flow_attr *attr,
3438 struct net_device **out_dev,
3441 struct net_device *vlan_dev = *out_dev;
3442 struct flow_action_entry vlan_act = {
3443 .id = FLOW_ACTION_VLAN_PUSH,
3444 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3445 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3450 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3454 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3455 dev_get_iflink(vlan_dev));
3456 if (is_vlan_dev(*out_dev))
3457 err = add_vlan_push_action(priv, attr, out_dev, action);
3462 static int add_vlan_pop_action(struct mlx5e_priv *priv,
3463 struct mlx5_flow_attr *attr,
3466 struct flow_action_entry vlan_act = {
3467 .id = FLOW_ACTION_VLAN_POP,
3469 int nest_level, err = 0;
3471 nest_level = attr->parse_attr->filter_dev->lower_level -
3472 priv->netdev->lower_level;
3473 while (nest_level--) {
3474 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3482 static bool same_hw_reps(struct mlx5e_priv *priv,
3483 struct net_device *peer_netdev)
3485 struct mlx5e_priv *peer_priv;
3487 peer_priv = netdev_priv(peer_netdev);
3489 return mlx5e_eswitch_rep(priv->netdev) &&
3490 mlx5e_eswitch_rep(peer_netdev) &&
3491 same_hw_devs(priv, peer_priv);
3494 static bool is_lag_dev(struct mlx5e_priv *priv,
3495 struct net_device *peer_netdev)
3497 return ((mlx5_lag_is_sriov(priv->mdev) ||
3498 mlx5_lag_is_multipath(priv->mdev)) &&
3499 same_hw_reps(priv, peer_netdev));
3502 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3503 struct net_device *out_dev)
3505 if (is_merged_eswitch_vfs(priv, out_dev))
3508 if (is_lag_dev(priv, out_dev))
3511 return mlx5e_eswitch_rep(out_dev) &&
3512 same_port_devs(priv, netdev_priv(out_dev));
3515 static bool is_duplicated_output_device(struct net_device *dev,
3516 struct net_device *out_dev,
3517 int *ifindexes, int if_count,
3518 struct netlink_ext_ack *extack)
3522 for (i = 0; i < if_count; i++) {
3523 if (ifindexes[i] == out_dev->ifindex) {
3524 NL_SET_ERR_MSG_MOD(extack,
3525 "can't duplicate output to same device");
3526 netdev_err(dev, "can't duplicate output to same device: %s\n",
3535 static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3536 struct mlx5e_tc_flow *flow,
3537 struct net_device *out_dev,
3538 struct netlink_ext_ack *extack)
3540 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
3541 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3542 struct mlx5e_rep_priv *rep_priv;
3544 /* Forwarding non encapsulated traffic between
3545 * uplink ports is allowed only if
3546 * termination_table_raw_traffic cap is set.
3548 * Input vport was stored attr->in_rep.
3549 * In LAG case, *priv* is the private data of
3550 * uplink which may be not the input vport.
3552 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3554 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3555 mlx5e_eswitch_uplink_rep(out_dev)))
3558 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3559 termination_table_raw_traffic)) {
3560 NL_SET_ERR_MSG_MOD(extack,
3561 "devices are both uplink, can't offload forwarding");
3562 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3563 priv->netdev->name, out_dev->name);
3565 } else if (out_dev != rep_priv->netdev) {
3566 NL_SET_ERR_MSG_MOD(extack,
3567 "devices are not the same uplink, can't offload forwarding");
3568 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3569 priv->netdev->name, out_dev->name);
3575 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3576 struct flow_action *flow_action,
3577 struct mlx5e_tc_flow *flow,
3578 struct netlink_ext_ack *extack,
3579 struct net_device *filter_dev)
3581 struct pedit_headers_action hdrs[2] = {};
3582 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3583 struct mlx5e_tc_flow_parse_attr *parse_attr;
3584 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3585 const struct ip_tunnel_info *info = NULL;
3586 struct mlx5_flow_attr *attr = flow->attr;
3587 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
3588 bool ft_flow = mlx5e_is_ft_flow(flow);
3589 const struct flow_action_entry *act;
3590 struct mlx5_esw_flow_attr *esw_attr;
3591 bool encap = false, decap = false;
3592 u32 action = attr->action;
3593 int err, i, if_count = 0;
3594 bool mpls_push = false;
3596 if (!flow_action_has_entries(flow_action))
3599 if (!flow_action_hw_stats_check(flow_action, extack,
3600 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3603 esw_attr = attr->esw_attr;
3604 parse_attr = attr->parse_attr;
3606 flow_action_for_each(i, act, flow_action) {
3608 case FLOW_ACTION_DROP:
3609 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3610 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3612 case FLOW_ACTION_TRAP:
3613 if (!flow_offload_has_one_action(flow_action)) {
3614 NL_SET_ERR_MSG_MOD(extack,
3615 "action trap is supported as a sole action only");
3618 action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3619 MLX5_FLOW_CONTEXT_ACTION_COUNT);
3620 attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
3622 case FLOW_ACTION_MPLS_PUSH:
3623 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3624 reformat_l2_to_l3_tunnel) ||
3625 act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
3626 NL_SET_ERR_MSG_MOD(extack,
3627 "mpls push is supported only for mpls_uc protocol");
3632 case FLOW_ACTION_MPLS_POP:
3633 /* we only support mpls pop if it is the first action
3634 * and the filter net device is bareudp. Subsequent
3635 * actions can be pedit and the last can be mirred
3639 NL_SET_ERR_MSG_MOD(extack,
3640 "mpls pop supported only as first action");
3643 if (!netif_is_bareudp(filter_dev)) {
3644 NL_SET_ERR_MSG_MOD(extack,
3645 "mpls pop supported only on bareudp devices");
3649 parse_attr->eth.h_proto = act->mpls_pop.proto;
3650 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
3651 flow_flag_set(flow, L3_TO_L2_DECAP);
3653 case FLOW_ACTION_MANGLE:
3654 case FLOW_ACTION_ADD:
3655 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3656 parse_attr, hdrs, flow, extack);
3660 if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
3661 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3662 esw_attr->split_count = esw_attr->out_count;
3665 case FLOW_ACTION_CSUM:
3666 if (csum_offload_supported(priv, action,
3667 act->csum_flags, extack))
3671 case FLOW_ACTION_REDIRECT:
3672 case FLOW_ACTION_MIRRED: {
3673 struct mlx5e_priv *out_priv;
3674 struct net_device *out_dev;
3678 /* out_dev is NULL when filters with
3679 * non-existing mirred device are replayed to
3685 if (mpls_push && !netif_is_bareudp(out_dev)) {
3686 NL_SET_ERR_MSG_MOD(extack,
3687 "mpls is supported only through a bareudp device");
3691 if (ft_flow && out_dev == priv->netdev) {
3692 /* Ignore forward to self rules generated
3693 * by adding both mlx5 devs to the flow table
3694 * block on a normal nft offload setup.
3699 if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3700 NL_SET_ERR_MSG_MOD(extack,
3701 "can't support more output ports, can't offload forwarding");
3702 netdev_warn(priv->netdev,
3703 "can't support more than %d output ports, can't offload forwarding\n",
3704 esw_attr->out_count);
3708 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3709 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3711 parse_attr->mirred_ifindex[esw_attr->out_count] =
3713 parse_attr->tun_info[esw_attr->out_count] =
3714 mlx5e_dup_tun_info(info);
3715 if (!parse_attr->tun_info[esw_attr->out_count])
3718 esw_attr->dests[esw_attr->out_count].flags |=
3719 MLX5_ESW_DEST_ENCAP;
3720 esw_attr->out_count++;
3721 /* attr->dests[].rep is resolved when we
3724 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
3725 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3726 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3728 if (is_duplicated_output_device(priv->netdev,
3735 ifindexes[if_count] = out_dev->ifindex;
3738 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
3742 if (is_vlan_dev(out_dev)) {
3743 err = add_vlan_push_action(priv, attr,
3750 if (is_vlan_dev(parse_attr->filter_dev)) {
3751 err = add_vlan_pop_action(priv, attr,
3757 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
3761 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3762 NL_SET_ERR_MSG_MOD(extack,
3763 "devices are not on same switch HW, can't offload forwarding");
3767 out_priv = netdev_priv(out_dev);
3768 rpriv = out_priv->ppriv;
3769 esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
3770 esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
3771 esw_attr->out_count++;
3772 } else if (parse_attr->filter_dev != priv->netdev) {
3773 /* All mlx5 devices are called to configure
3774 * high level device filters. Therefore, the
3775 * *attempt* to install a filter on invalid
3776 * eswitch should not trigger an explicit error
3780 NL_SET_ERR_MSG_MOD(extack,
3781 "devices are not on same switch HW, can't offload forwarding");
3782 netdev_warn(priv->netdev,
3783 "devices %s %s not on same switch HW, can't offload forwarding\n",
3790 case FLOW_ACTION_TUNNEL_ENCAP:
3798 case FLOW_ACTION_VLAN_PUSH:
3799 case FLOW_ACTION_VLAN_POP:
3800 if (act->id == FLOW_ACTION_VLAN_PUSH &&
3801 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
3802 /* Replace vlan pop+push with vlan modify */
3803 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3804 err = add_vlan_rewrite_action(priv,
3805 MLX5_FLOW_NAMESPACE_FDB,
3806 act, parse_attr, hdrs,
3809 err = parse_tc_vlan_action(priv, act, esw_attr, &action);
3814 esw_attr->split_count = esw_attr->out_count;
3816 case FLOW_ACTION_VLAN_MANGLE:
3817 err = add_vlan_rewrite_action(priv,
3818 MLX5_FLOW_NAMESPACE_FDB,
3819 act, parse_attr, hdrs,
3824 esw_attr->split_count = esw_attr->out_count;
3826 case FLOW_ACTION_TUNNEL_DECAP:
3829 case FLOW_ACTION_GOTO:
3830 err = validate_goto_chain(priv, flow, act, action,
3835 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3836 attr->dest_chain = act->chain_index;
3838 case FLOW_ACTION_CT:
3839 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
3843 flow_flag_set(flow, CT);
3844 esw_attr->split_count = esw_attr->out_count;
3847 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3852 /* always set IP version for indirect table handling */
3853 attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
3855 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
3856 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
3857 /* For prio tag mode, replace vlan pop with rewrite vlan prio
3860 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3861 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
3867 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3868 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3869 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3870 parse_attr, hdrs, &action, extack);
3873 /* in case all pedit actions are skipped, remove the MOD_HDR
3874 * flag. we might have set split_count either by pedit or
3875 * pop/push. if there is no pop/push either, reset it too.
3877 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3878 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3879 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3880 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3881 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3882 esw_attr->split_count = 0;
3886 attr->action = action;
3887 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3890 if (attr->dest_chain) {
3892 /* It can be supported if we'll create a mapping for
3893 * the tunnel device only (without tunnel), and set
3894 * this tunnel id with this decap flow.
3896 * On restore (miss), we'll just set this saved tunnel
3900 NL_SET_ERR_MSG(extack,
3901 "Decap with goto isn't supported");
3902 netdev_warn(priv->netdev,
3903 "Decap with goto isn't supported");
3907 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3910 if (!(attr->action &
3911 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3912 NL_SET_ERR_MSG_MOD(extack,
3913 "Rule must have at least one forward/drop action");
3917 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3918 NL_SET_ERR_MSG_MOD(extack,
3919 "current firmware doesn't support split rule for port mirroring");
3920 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3927 static void get_flags(int flags, unsigned long *flow_flags)
3929 unsigned long __flow_flags = 0;
3931 if (flags & MLX5_TC_FLAG(INGRESS))
3932 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3933 if (flags & MLX5_TC_FLAG(EGRESS))
3934 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
3936 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
3937 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3938 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
3939 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3940 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
3941 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
3943 *flow_flags = __flow_flags;
3946 static const struct rhashtable_params tc_ht_params = {
3947 .head_offset = offsetof(struct mlx5e_tc_flow, node),
3948 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
3949 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
3950 .automatic_shrinking = true,
3953 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
3954 unsigned long flags)
3956 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3957 struct mlx5e_rep_priv *uplink_rpriv;
3959 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
3960 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3961 return &uplink_rpriv->uplink_priv.tc_ht;
3962 } else /* NIC offload */
3963 return &priv->fs.tc.ht;
3966 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
3968 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3969 struct mlx5_flow_attr *attr = flow->attr;
3970 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
3971 flow_flag_test(flow, INGRESS);
3972 bool act_is_encap = !!(attr->action &
3973 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
3974 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
3975 MLX5_DEVCOM_ESW_OFFLOADS);
3980 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
3981 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
3982 (is_rep_ingress || act_is_encap))
3988 struct mlx5_flow_attr *
3989 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
3991 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
3992 sizeof(struct mlx5_esw_flow_attr) :
3993 sizeof(struct mlx5_nic_flow_attr);
3994 struct mlx5_flow_attr *attr;
3996 return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4000 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4001 struct flow_cls_offload *f, unsigned long flow_flags,
4002 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4003 struct mlx5e_tc_flow **__flow)
4005 struct mlx5e_tc_flow_parse_attr *parse_attr;
4006 struct mlx5_flow_attr *attr;
4007 struct mlx5e_tc_flow *flow;
4011 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4012 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4013 if (!parse_attr || !flow)
4016 flow->flags = flow_flags;
4017 flow->cookie = f->cookie;
4020 attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
4026 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4027 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4028 INIT_LIST_HEAD(&flow->hairpin);
4029 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4030 refcount_set(&flow->refcnt, 1);
4031 init_completion(&flow->init_done);
4034 *__parse_attr = parse_attr;
4045 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4046 struct mlx5e_tc_flow_parse_attr *parse_attr,
4047 struct flow_cls_offload *f)
4049 attr->parse_attr = parse_attr;
4050 attr->chain = f->common.chain_index;
4051 attr->prio = f->common.prio;
4055 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4056 struct mlx5e_priv *priv,
4057 struct mlx5e_tc_flow_parse_attr *parse_attr,
4058 struct flow_cls_offload *f,
4059 struct mlx5_eswitch_rep *in_rep,
4060 struct mlx5_core_dev *in_mdev)
4062 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4063 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4065 mlx5e_flow_attr_init(attr, parse_attr, f);
4067 esw_attr->in_rep = in_rep;
4068 esw_attr->in_mdev = in_mdev;
4070 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4071 MLX5_COUNTER_SOURCE_ESWITCH)
4072 esw_attr->counter_dev = in_mdev;
4074 esw_attr->counter_dev = priv->mdev;
4077 static struct mlx5e_tc_flow *
4078 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4079 struct flow_cls_offload *f,
4080 unsigned long flow_flags,
4081 struct net_device *filter_dev,
4082 struct mlx5_eswitch_rep *in_rep,
4083 struct mlx5_core_dev *in_mdev)
4085 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4086 struct netlink_ext_ack *extack = f->common.extack;
4087 struct mlx5e_tc_flow_parse_attr *parse_attr;
4088 struct mlx5e_tc_flow *flow;
4091 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4092 attr_size = sizeof(struct mlx5_esw_flow_attr);
4093 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4094 &parse_attr, &flow);
4098 parse_attr->filter_dev = filter_dev;
4099 mlx5e_flow_esw_attr_init(flow->attr,
4101 f, in_rep, in_mdev);
4103 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4108 /* actions validation depends on parsing the ct matches first */
4109 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4110 &flow->attr->ct_attr, extack);
4114 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4118 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4119 complete_all(&flow->init_done);
4121 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4124 add_unready_flow(flow);
4130 mlx5e_flow_put(priv, flow);
4132 return ERR_PTR(err);
4135 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4136 struct mlx5e_tc_flow *flow,
4137 unsigned long flow_flags)
4139 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4140 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4141 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4142 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4143 struct mlx5e_tc_flow_parse_attr *parse_attr;
4144 struct mlx5e_rep_priv *peer_urpriv;
4145 struct mlx5e_tc_flow *peer_flow;
4146 struct mlx5_core_dev *in_mdev;
4149 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4153 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4154 peer_priv = netdev_priv(peer_urpriv->netdev);
4156 /* in_mdev is assigned of which the packet originated from.
4157 * So packets redirected to uplink use the same mdev of the
4158 * original flow and packets redirected from uplink use the
4161 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4162 in_mdev = peer_priv->mdev;
4164 in_mdev = priv->mdev;
4166 parse_attr = flow->attr->parse_attr;
4167 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4168 parse_attr->filter_dev,
4169 attr->in_rep, in_mdev);
4170 if (IS_ERR(peer_flow)) {
4171 err = PTR_ERR(peer_flow);
4175 flow->peer_flow = peer_flow;
4176 flow_flag_set(flow, DUP);
4177 mutex_lock(&esw->offloads.peer_mutex);
4178 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4179 mutex_unlock(&esw->offloads.peer_mutex);
4182 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4187 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4188 struct flow_cls_offload *f,
4189 unsigned long flow_flags,
4190 struct net_device *filter_dev,
4191 struct mlx5e_tc_flow **__flow)
4193 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4194 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4195 struct mlx5_core_dev *in_mdev = priv->mdev;
4196 struct mlx5e_tc_flow *flow;
4199 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4202 return PTR_ERR(flow);
4204 if (is_peer_flow_needed(flow)) {
4205 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4207 mlx5e_tc_del_fdb_flow(priv, flow);
4221 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4222 struct flow_cls_offload *f,
4223 unsigned long flow_flags,
4224 struct net_device *filter_dev,
4225 struct mlx5e_tc_flow **__flow)
4227 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4228 struct netlink_ext_ack *extack = f->common.extack;
4229 struct mlx5e_tc_flow_parse_attr *parse_attr;
4230 struct mlx5e_tc_flow *flow;
4233 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4234 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4236 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4240 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4241 attr_size = sizeof(struct mlx5_nic_flow_attr);
4242 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4243 &parse_attr, &flow);
4247 parse_attr->filter_dev = filter_dev;
4248 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4250 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4255 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4256 &flow->attr->ct_attr, extack);
4260 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4264 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4268 flow_flag_set(flow, OFFLOADED);
4274 flow_flag_set(flow, FAILED);
4275 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4276 mlx5e_flow_put(priv, flow);
4282 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4283 struct flow_cls_offload *f,
4284 unsigned long flags,
4285 struct net_device *filter_dev,
4286 struct mlx5e_tc_flow **flow)
4288 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4289 unsigned long flow_flags;
4292 get_flags(flags, &flow_flags);
4294 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4297 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4298 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4301 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4307 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4308 struct mlx5e_rep_priv *rpriv)
4310 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4311 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4312 * function is called from NIC mode.
4314 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4317 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4318 struct flow_cls_offload *f, unsigned long flags)
4320 struct netlink_ext_ack *extack = f->common.extack;
4321 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4322 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4323 struct mlx5e_tc_flow *flow;
4327 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4329 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4332 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4335 NL_SET_ERR_MSG_MOD(extack,
4336 "flow cookie already exists, ignoring");
4337 netdev_warn_once(priv->netdev,
4338 "flow cookie %lx already exists, ignoring\n",
4348 trace_mlx5e_configure_flower(f);
4349 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4353 /* Flow rule offloaded to non-uplink representor sharing tc block,
4354 * set the flow's owner dev.
4356 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4357 flow->orig_dev = dev;
4359 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4366 mlx5e_flow_put(priv, flow);
4371 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4373 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4374 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4376 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4377 flow_flag_test(flow, EGRESS) == dir_egress;
4380 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4381 struct flow_cls_offload *f, unsigned long flags)
4383 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4384 struct mlx5e_tc_flow *flow;
4388 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4389 if (!flow || !same_flow_direction(flow, flags)) {
4394 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4397 if (flow_flag_test_and_set(flow, DELETED)) {
4401 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4404 trace_mlx5e_delete_flower(f);
4405 mlx5e_flow_put(priv, flow);
4414 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4415 struct flow_cls_offload *f, unsigned long flags)
4417 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4418 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4419 struct mlx5_eswitch *peer_esw;
4420 struct mlx5e_tc_flow *flow;
4421 struct mlx5_fc *counter;
4428 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4432 return PTR_ERR(flow);
4434 if (!same_flow_direction(flow, flags)) {
4439 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4440 counter = mlx5e_tc_get_counter(flow);
4444 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4447 /* Under multipath it's possible for one rule to be currently
4448 * un-offloaded while the other rule is offloaded.
4450 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4454 if (flow_flag_test(flow, DUP) &&
4455 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4460 counter = mlx5e_tc_get_counter(flow->peer_flow);
4462 goto no_peer_counter;
4463 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4466 packets += packets2;
4467 lastuse = max_t(u64, lastuse, lastuse2);
4471 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4473 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4474 FLOW_ACTION_HW_STATS_DELAYED);
4475 trace_mlx5e_stats_flower(f);
4477 mlx5e_flow_put(priv, flow);
4481 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4482 struct netlink_ext_ack *extack)
4484 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4485 struct mlx5_eswitch *esw;
4490 vport_num = rpriv->rep->vport;
4491 if (vport_num >= MLX5_VPORT_ECPF) {
4492 NL_SET_ERR_MSG_MOD(extack,
4493 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4497 esw = priv->mdev->priv.eswitch;
4498 /* rate is given in bytes/sec.
4499 * First convert to bits/sec and then round to the nearest mbit/secs.
4500 * mbit means million bits.
4501 * Moreover, if rate is non zero we choose to configure to a minimum of
4505 rate = (rate * BITS_PER_BYTE) + 500000;
4506 rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
4509 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4511 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4516 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4517 struct flow_action *flow_action,
4518 struct netlink_ext_ack *extack)
4520 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4521 const struct flow_action_entry *act;
4525 if (!flow_action_has_entries(flow_action)) {
4526 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4530 if (!flow_offload_has_one_action(flow_action)) {
4531 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4535 if (!flow_action_basic_hw_stats_check(flow_action, extack))
4538 flow_action_for_each(i, act, flow_action) {
4540 case FLOW_ACTION_POLICE:
4541 if (act->police.rate_pkt_ps) {
4542 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
4545 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4549 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4552 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4560 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4561 struct tc_cls_matchall_offload *ma)
4563 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4564 struct netlink_ext_ack *extack = ma->common.extack;
4566 if (!mlx5_esw_qos_enabled(esw)) {
4567 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4571 if (ma->common.prio != 1) {
4572 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4576 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4579 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4580 struct tc_cls_matchall_offload *ma)
4582 struct netlink_ext_ack *extack = ma->common.extack;
4584 return apply_police_params(priv, 0, extack);
4587 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4588 struct tc_cls_matchall_offload *ma)
4590 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4591 struct rtnl_link_stats64 cur_stats;
4595 cur_stats = priv->stats.vf_vport;
4596 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4597 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4598 rpriv->prev_vf_vport_stats = cur_stats;
4599 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4600 FLOW_ACTION_HW_STATS_DELAYED);
4603 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4604 struct mlx5e_priv *peer_priv)
4606 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4607 struct mlx5e_hairpin_entry *hpe, *tmp;
4608 LIST_HEAD(init_wait_list);
4612 if (!same_hw_devs(priv, peer_priv))
4615 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4617 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4618 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4619 if (refcount_inc_not_zero(&hpe->refcnt))
4620 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4621 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4623 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4624 wait_for_completion(&hpe->res_ready);
4625 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4626 hpe->hp->pair->peer_gone = true;
4628 mlx5e_hairpin_put(priv, hpe);
4632 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4633 unsigned long event, void *ptr)
4635 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4636 struct mlx5e_flow_steering *fs;
4637 struct mlx5e_priv *peer_priv;
4638 struct mlx5e_tc_table *tc;
4639 struct mlx5e_priv *priv;
4641 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4642 event != NETDEV_UNREGISTER ||
4643 ndev->reg_state == NETREG_REGISTERED)
4646 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4647 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4648 priv = container_of(fs, struct mlx5e_priv, fs);
4649 peer_priv = netdev_priv(ndev);
4650 if (priv == peer_priv ||
4651 !(priv->netdev->features & NETIF_F_HW_TC))
4654 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4659 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
4661 int tc_grp_size, tc_tbl_size;
4662 u32 max_flow_counter;
4664 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
4665 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
4667 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
4669 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
4670 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
4675 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4677 struct mlx5e_tc_table *tc = &priv->fs.tc;
4678 struct mlx5_core_dev *dev = priv->mdev;
4679 struct mlx5_chains_attr attr = {};
4682 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4683 mutex_init(&tc->t_lock);
4684 mutex_init(&tc->hairpin_tbl_lock);
4685 hash_init(tc->hairpin_tbl);
4687 err = rhashtable_init(&tc->ht, &tc_ht_params);
4691 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
4693 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4694 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
4695 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
4696 attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
4698 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
4699 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
4700 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
4701 attr.default_ft = priv->fs.vlan.ft.t;
4703 tc->chains = mlx5_chains_create(dev, &attr);
4704 if (IS_ERR(tc->chains)) {
4705 err = PTR_ERR(tc->chains);
4709 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
4710 MLX5_FLOW_NAMESPACE_KERNEL);
4712 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4713 err = register_netdevice_notifier_dev_net(priv->netdev,
4717 tc->netdevice_nb.notifier_call = NULL;
4718 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4725 mlx5_tc_ct_clean(tc->ct);
4726 mlx5_chains_destroy(tc->chains);
4728 rhashtable_destroy(&tc->ht);
4732 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4734 struct mlx5e_tc_flow *flow = ptr;
4735 struct mlx5e_priv *priv = flow->priv;
4737 mlx5e_tc_del_flow(priv, flow);
4741 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4743 struct mlx5e_tc_table *tc = &priv->fs.tc;
4745 if (tc->netdevice_nb.notifier_call)
4746 unregister_netdevice_notifier_dev_net(priv->netdev,
4750 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
4751 mutex_destroy(&tc->hairpin_tbl_lock);
4753 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
4755 if (!IS_ERR_OR_NULL(tc->t)) {
4756 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
4759 mutex_destroy(&tc->t_lock);
4761 mlx5_tc_ct_clean(tc->ct);
4762 mlx5_chains_destroy(tc->chains);
4765 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4767 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
4768 struct mlx5_rep_uplink_priv *uplink_priv;
4769 struct mlx5e_rep_priv *rpriv;
4770 struct mapping_ctx *mapping;
4771 struct mlx5_eswitch *esw;
4772 struct mlx5e_priv *priv;
4775 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4776 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
4777 priv = netdev_priv(rpriv->netdev);
4778 esw = priv->mdev->priv.eswitch;
4780 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
4782 &esw->offloads.mod_hdr,
4783 MLX5_FLOW_NAMESPACE_FDB);
4785 mapping = mapping_create(sizeof(struct tunnel_match_key),
4786 TUNNEL_INFO_BITS_MASK, true);
4787 if (IS_ERR(mapping)) {
4788 err = PTR_ERR(mapping);
4789 goto err_tun_mapping;
4791 uplink_priv->tunnel_mapping = mapping;
4793 /* 0xFFF is reserved for stack devices slow path table mark */
4794 mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
4795 if (IS_ERR(mapping)) {
4796 err = PTR_ERR(mapping);
4797 goto err_enc_opts_mapping;
4799 uplink_priv->tunnel_enc_opts_mapping = mapping;
4801 err = rhashtable_init(tc_ht, &tc_ht_params);
4805 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
4807 uplink_priv->encap = mlx5e_tc_tun_init(priv);
4808 if (IS_ERR(uplink_priv->encap)) {
4809 err = PTR_ERR(uplink_priv->encap);
4810 goto err_register_fib_notifier;
4815 err_register_fib_notifier:
4816 rhashtable_destroy(tc_ht);
4818 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4819 err_enc_opts_mapping:
4820 mapping_destroy(uplink_priv->tunnel_mapping);
4822 mlx5_tc_ct_clean(uplink_priv->ct_priv);
4823 netdev_warn(priv->netdev,
4824 "Failed to initialize tc (eswitch), err: %d", err);
4828 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4830 struct mlx5_rep_uplink_priv *uplink_priv;
4832 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4834 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
4835 mlx5e_tc_tun_cleanup(uplink_priv->encap);
4837 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4838 mapping_destroy(uplink_priv->tunnel_mapping);
4840 mlx5_tc_ct_clean(uplink_priv->ct_priv);
4843 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
4845 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4847 return atomic_read(&tc_ht->nelems);
4850 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
4852 struct mlx5e_tc_flow *flow, *tmp;
4854 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
4855 __mlx5e_tc_del_fdb_peer_flow(flow);
4858 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4860 struct mlx5_rep_uplink_priv *rpriv =
4861 container_of(work, struct mlx5_rep_uplink_priv,
4862 reoffload_flows_work);
4863 struct mlx5e_tc_flow *flow, *tmp;
4865 mutex_lock(&rpriv->unready_flows_lock);
4866 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4867 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4868 unready_flow_del(flow);
4870 mutex_unlock(&rpriv->unready_flows_lock);
4873 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
4874 struct flow_cls_offload *cls_flower,
4875 unsigned long flags)
4877 switch (cls_flower->command) {
4878 case FLOW_CLS_REPLACE:
4879 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
4881 case FLOW_CLS_DESTROY:
4882 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
4884 case FLOW_CLS_STATS:
4885 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
4892 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4895 unsigned long flags = MLX5_TC_FLAG(INGRESS);
4896 struct mlx5e_priv *priv = cb_priv;
4898 if (!priv->netdev || !netif_device_present(priv->netdev))
4901 if (mlx5e_is_uplink_rep(priv))
4902 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
4904 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
4907 case TC_SETUP_CLSFLOWER:
4908 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
4914 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
4915 struct sk_buff *skb)
4917 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4918 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
4919 struct mlx5e_priv *priv = netdev_priv(skb->dev);
4920 struct mlx5e_tc_table *tc = &priv->fs.tc;
4921 struct tc_skb_ext *tc_skb_ext;
4924 reg_b = be32_to_cpu(cqe->ft_metadata);
4926 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
4928 err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
4930 netdev_dbg(priv->netdev,
4931 "Couldn't find chain for chain tag: %d, err: %d\n",
4937 tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
4938 if (WARN_ON(!tc_skb_ext))
4941 tc_skb_ext->chain = chain;
4943 zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
4946 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
4950 #endif /* CONFIG_NET_TC_SKB_EXT */