2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
43 #include <net/ipv6_stubs.h>
44 #include <net/bareudp.h>
45 #include <net/bonding.h>
47 #include "en/tc/post_act.h"
49 #include "en/rep/tc.h"
50 #include "en/rep/neigh.h"
55 #include "en/tc_tun.h"
56 #include "en/mapping.h"
58 #include "en/mod_hdr.h"
59 #include "en/tc_tun_encap.h"
60 #include "en/tc/sample.h"
61 #include "en/tc/act/act.h"
62 #include "en/tc/post_meter.h"
63 #include "lib/devcom.h"
64 #include "lib/geneve.h"
65 #include "lib/fs_chains.h"
66 #include "diag/en_tc_tracepoint.h"
67 #include <asm/div64.h>
71 #define MLX5E_TC_TABLE_NUM_GROUPS 4
72 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
74 struct mlx5e_hairpin_params {
75 struct mlx5_core_dev *mdev;
80 struct mlx5e_tc_table {
81 /* Protects the dynamic assignment of the t parameter
82 * which is the nic tc root table.
85 struct mlx5e_priv *priv;
86 struct mlx5_flow_table *t;
87 struct mlx5_flow_table *miss_t;
88 struct mlx5_fs_chains *chains;
89 struct mlx5e_post_act *post_act;
93 struct mod_hdr_tbl mod_hdr;
94 struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
95 DECLARE_HASHTABLE(hairpin_tbl, 8);
97 struct notifier_block netdevice_nb;
98 struct netdev_net_notifier netdevice_nn;
100 struct mlx5_tc_ct_priv *ct;
101 struct mapping_ctx *mapping;
102 struct mlx5e_hairpin_params hairpin_params;
103 struct dentry *dfs_root;
106 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
108 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
113 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
118 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
120 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
121 .soffset = MLX5_BYTE_OFF(fte_match_param,
122 misc_parameters_2.metadata_reg_c_1),
124 [ZONE_TO_REG] = zone_to_reg_ct,
125 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
126 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
127 [MARK_TO_REG] = mark_to_reg_ct,
128 [LABELS_TO_REG] = labels_to_reg_ct,
129 [FTEID_TO_REG] = fteid_to_reg_ct,
130 /* For NIC rules we store the restore metadata directly
131 * into reg_b that is passed to SW since we don't
132 * jump between steering domains.
134 [NIC_CHAIN_TO_REG] = {
135 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
139 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
140 [PACKET_COLOR_TO_REG] = packet_color_to_reg,
143 struct mlx5e_tc_jump_state {
146 struct mlx5_flow_attr *jumping_attr;
148 enum flow_action_id last_id;
152 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
154 struct mlx5e_tc_table *tc;
156 tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
157 return tc ? tc : ERR_PTR(-ENOMEM);
160 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
165 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
170 /* To avoid false lock dependency warning set the tc_ht lock
171 * class different than the lock class of the ht being used when deleting
172 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
173 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
174 * it's different than the ht->mutex here.
176 static struct lock_class_key tc_ht_lock_key;
178 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
179 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
180 static void mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr);
183 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
184 enum mlx5e_tc_attr_to_reg type,
188 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
189 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
190 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
191 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
192 u32 max_mask = GENMASK(match_len - 1, 0);
193 __be32 curr_mask_be, curr_val_be;
194 u32 curr_mask, curr_val;
196 fmask = headers_c + soffset;
197 fval = headers_v + soffset;
199 memcpy(&curr_mask_be, fmask, 4);
200 memcpy(&curr_val_be, fval, 4);
202 curr_mask = be32_to_cpu(curr_mask_be);
203 curr_val = be32_to_cpu(curr_val_be);
205 //move to correct offset
206 WARN_ON(mask > max_mask);
209 max_mask <<= moffset;
212 curr_mask &= ~max_mask;
213 curr_val &= ~max_mask;
215 //add current to mask
219 //back to be32 and write
220 curr_mask_be = cpu_to_be32(curr_mask);
221 curr_val_be = cpu_to_be32(curr_val);
223 memcpy(fmask, &curr_mask_be, 4);
224 memcpy(fval, &curr_val_be, 4);
226 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
230 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
231 enum mlx5e_tc_attr_to_reg type,
235 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
236 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
237 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
238 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
239 u32 max_mask = GENMASK(match_len - 1, 0);
240 __be32 curr_mask_be, curr_val_be;
241 u32 curr_mask, curr_val;
243 fmask = headers_c + soffset;
244 fval = headers_v + soffset;
246 memcpy(&curr_mask_be, fmask, 4);
247 memcpy(&curr_val_be, fval, 4);
249 curr_mask = be32_to_cpu(curr_mask_be);
250 curr_val = be32_to_cpu(curr_val_be);
252 *mask = (curr_mask >> moffset) & max_mask;
253 *val = (curr_val >> moffset) & max_mask;
257 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
258 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
259 enum mlx5_flow_namespace_type ns,
260 enum mlx5e_tc_attr_to_reg type,
263 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
264 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
265 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
269 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
271 return PTR_ERR(modact);
273 /* Firmware has 5bit length field and 0 means 32bits */
277 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
278 MLX5_SET(set_action_in, modact, field, mfield);
279 MLX5_SET(set_action_in, modact, offset, moffset);
280 MLX5_SET(set_action_in, modact, length, mlen);
281 MLX5_SET(set_action_in, modact, data, data);
282 err = mod_hdr_acts->num_actions;
283 mod_hdr_acts->num_actions++;
288 struct mlx5e_tc_int_port_priv *
289 mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
291 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
292 struct mlx5_rep_uplink_priv *uplink_priv;
293 struct mlx5e_rep_priv *uplink_rpriv;
295 if (is_mdev_switchdev_mode(priv->mdev)) {
296 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
297 uplink_priv = &uplink_rpriv->uplink_priv;
299 return uplink_priv->int_port_priv;
305 struct mlx5e_flow_meters *
306 mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
308 struct mlx5_eswitch *esw = dev->priv.eswitch;
309 struct mlx5_rep_uplink_priv *uplink_priv;
310 struct mlx5e_rep_priv *uplink_rpriv;
311 struct mlx5e_priv *priv;
313 if (is_mdev_switchdev_mode(dev)) {
314 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
315 uplink_priv = &uplink_rpriv->uplink_priv;
316 priv = netdev_priv(uplink_rpriv->netdev);
317 if (!uplink_priv->flow_meters)
318 uplink_priv->flow_meters =
319 mlx5e_flow_meters_init(priv,
320 MLX5_FLOW_NAMESPACE_FDB,
321 uplink_priv->post_act);
322 if (!IS_ERR(uplink_priv->flow_meters))
323 return uplink_priv->flow_meters;
329 static struct mlx5_tc_ct_priv *
330 get_ct_priv(struct mlx5e_priv *priv)
332 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
333 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
334 struct mlx5_rep_uplink_priv *uplink_priv;
335 struct mlx5e_rep_priv *uplink_rpriv;
337 if (is_mdev_switchdev_mode(priv->mdev)) {
338 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
339 uplink_priv = &uplink_rpriv->uplink_priv;
341 return uplink_priv->ct_priv;
347 static struct mlx5e_tc_psample *
348 get_sample_priv(struct mlx5e_priv *priv)
350 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
351 struct mlx5_rep_uplink_priv *uplink_priv;
352 struct mlx5e_rep_priv *uplink_rpriv;
354 if (is_mdev_switchdev_mode(priv->mdev)) {
355 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
356 uplink_priv = &uplink_rpriv->uplink_priv;
358 return uplink_priv->tc_psample;
364 static struct mlx5e_post_act *
365 get_post_action(struct mlx5e_priv *priv)
367 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
368 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
369 struct mlx5_rep_uplink_priv *uplink_priv;
370 struct mlx5e_rep_priv *uplink_rpriv;
372 if (is_mdev_switchdev_mode(priv->mdev)) {
373 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
374 uplink_priv = &uplink_rpriv->uplink_priv;
376 return uplink_priv->post_act;
382 struct mlx5_flow_handle *
383 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
384 struct mlx5_flow_spec *spec,
385 struct mlx5_flow_attr *attr)
387 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
389 if (is_mdev_switchdev_mode(priv->mdev))
390 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
392 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
396 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
397 struct mlx5_flow_handle *rule,
398 struct mlx5_flow_attr *attr)
400 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
402 if (is_mdev_switchdev_mode(priv->mdev)) {
403 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
407 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
411 is_flow_meter_action(struct mlx5_flow_attr *attr)
413 return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
414 (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) ||
415 attr->flags & MLX5_ATTR_FLAG_MTU);
419 mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
420 struct mlx5_flow_attr *attr)
422 struct mlx5e_post_act *post_act = get_post_action(priv);
423 struct mlx5e_post_meter_priv *post_meter;
424 enum mlx5_flow_namespace_type ns_type;
425 struct mlx5e_flow_meter_handle *meter;
426 enum mlx5e_post_meter_type type;
428 meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
430 mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
431 return PTR_ERR(meter);
434 ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
435 type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE;
436 post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
438 meter->act_counter, meter->drop_counter,
439 attr->branch_true, attr->branch_false);
440 if (IS_ERR(post_meter)) {
441 mlx5_core_err(priv->mdev, "Failed to init post meter\n");
445 attr->meter_attr.meter = meter;
446 attr->meter_attr.post_meter = post_meter;
447 attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
448 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
453 mlx5e_tc_meter_put(meter);
454 return PTR_ERR(post_meter);
458 mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
460 mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter);
461 mlx5e_tc_meter_put(attr->meter_attr.meter);
464 struct mlx5_flow_handle *
465 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
466 struct mlx5_flow_spec *spec,
467 struct mlx5_flow_attr *attr)
469 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
472 if (attr->flags & MLX5_ATTR_FLAG_CT) {
473 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
474 &attr->parse_attr->mod_hdr_acts;
476 return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
481 if (!is_mdev_switchdev_mode(priv->mdev))
482 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
484 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
485 return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
487 if (is_flow_meter_action(attr)) {
488 err = mlx5e_tc_add_flow_meter(priv, attr);
493 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
497 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
498 struct mlx5_flow_handle *rule,
499 struct mlx5_flow_attr *attr)
501 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
503 if (attr->flags & MLX5_ATTR_FLAG_CT) {
504 mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
508 if (!is_mdev_switchdev_mode(priv->mdev)) {
509 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
513 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
514 mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
518 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
520 if (attr->meter_attr.meter)
521 mlx5e_tc_del_flow_meter(esw, attr);
525 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
526 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
527 enum mlx5_flow_namespace_type ns,
528 enum mlx5e_tc_attr_to_reg type,
531 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
533 return ret < 0 ? ret : 0;
536 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
537 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
538 enum mlx5e_tc_attr_to_reg type,
539 int act_id, u32 data)
541 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
542 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
543 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
546 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
548 /* Firmware has 5bit length field and 0 means 32bits */
552 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
553 MLX5_SET(set_action_in, modact, field, mfield);
554 MLX5_SET(set_action_in, modact, offset, moffset);
555 MLX5_SET(set_action_in, modact, length, mlen);
556 MLX5_SET(set_action_in, modact, data, data);
559 struct mlx5e_hairpin {
560 struct mlx5_hairpin *pair;
562 struct mlx5_core_dev *func_mdev;
563 struct mlx5e_priv *func_priv;
565 struct mlx5e_tir direct_tir;
568 struct mlx5e_rqt indir_rqt;
569 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
570 struct mlx5_ttc_table *ttc;
573 struct mlx5e_hairpin_entry {
574 /* a node of a hash table which keeps all the hairpin entries */
575 struct hlist_node hairpin_hlist;
577 /* protects flows list */
578 spinlock_t flows_lock;
579 /* flows sharing the same hairpin */
580 struct list_head flows;
581 /* hpe's that were not fully initialized when dead peer update event
582 * function traversed them.
584 struct list_head dead_peer_wait_list;
588 struct mlx5e_hairpin *hp;
590 struct completion res_ready;
593 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
594 struct mlx5e_tc_flow *flow);
596 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
598 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
599 return ERR_PTR(-EINVAL);
603 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
605 if (refcount_dec_and_test(&flow->refcnt)) {
606 mlx5e_tc_del_flow(priv, flow);
607 kfree_rcu(flow, rcu_head);
611 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
613 return flow_flag_test(flow, ESWITCH);
616 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
618 return flow_flag_test(flow, FT);
621 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
623 return flow_flag_test(flow, OFFLOADED);
626 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
628 return mlx5e_is_eswitch_flow(flow) ?
629 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
632 static struct mlx5_core_dev *
633 get_flow_counter_dev(struct mlx5e_tc_flow *flow)
635 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
638 static struct mod_hdr_tbl *
639 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
641 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
642 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
644 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
645 &esw->offloads.mod_hdr :
649 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
650 struct mlx5e_tc_flow *flow,
651 struct mlx5e_tc_flow_parse_attr *parse_attr)
653 struct mlx5_modify_hdr *modify_hdr;
654 struct mlx5e_mod_hdr_handle *mh;
656 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
657 mlx5e_get_flow_namespace(flow),
658 &parse_attr->mod_hdr_acts);
662 modify_hdr = mlx5e_mod_hdr_get(mh);
663 flow->attr->modify_hdr = modify_hdr;
669 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
670 struct mlx5e_tc_flow *flow)
672 /* flow wasn't fully initialized */
676 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
682 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
684 struct mlx5_core_dev *mdev;
685 struct net_device *netdev;
686 struct mlx5e_priv *priv;
688 netdev = dev_get_by_index(net, ifindex);
690 return ERR_PTR(-ENODEV);
692 priv = netdev_priv(netdev);
696 /* Mirred tc action holds a refcount on the ifindex net_device (see
697 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
698 * after dev_put(netdev), while we're in the context of adding a tc flow.
700 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
701 * stored in a hairpin object, which exists until all flows, that refer to it, get
704 * On the other hand, after a hairpin object has been created, the peer net_device may
705 * be removed/unbound while there are still some hairpin flows that are using it. This
706 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
707 * NETDEV_UNREGISTER event of the peer net_device.
712 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
714 struct mlx5e_tir_builder *builder;
717 builder = mlx5e_tir_builder_alloc(false);
721 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
725 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
726 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
731 mlx5e_tir_builder_free(builder);
735 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
740 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
742 mlx5e_tir_destroy(&hp->direct_tir);
743 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
746 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
748 struct mlx5e_priv *priv = hp->func_priv;
749 struct mlx5_core_dev *mdev = priv->mdev;
750 struct mlx5e_rss_params_indir *indir;
753 indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
757 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
758 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
759 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
766 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
768 struct mlx5e_priv *priv = hp->func_priv;
769 struct mlx5e_rss_params_hash rss_hash;
770 enum mlx5_traffic_types tt, max_tt;
771 struct mlx5e_tir_builder *builder;
774 builder = mlx5e_tir_builder_alloc(false);
778 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
780 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
781 struct mlx5e_rss_params_traffic_type rss_tt;
783 rss_tt = mlx5e_rss_get_default_tt_config(tt);
785 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
786 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
788 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
790 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
792 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
793 goto err_destroy_tirs;
796 mlx5e_tir_builder_clear(builder);
800 mlx5e_tir_builder_free(builder);
805 for (tt = 0; tt < max_tt; tt++)
806 mlx5e_tir_destroy(&hp->indir_tir[tt]);
811 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
815 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
816 mlx5e_tir_destroy(&hp->indir_tir[tt]);
819 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
820 struct ttc_params *ttc_params)
822 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
825 memset(ttc_params, 0, sizeof(*ttc_params));
827 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
828 MLX5_FLOW_NAMESPACE_KERNEL);
829 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
830 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
831 ttc_params->dests[tt].tir_num =
833 mlx5e_tir_get_tirn(&hp->direct_tir) :
834 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
837 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
838 ft_attr->prio = MLX5E_TC_PRIO;
841 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
843 struct mlx5e_priv *priv = hp->func_priv;
844 struct ttc_params ttc_params;
845 struct mlx5_ttc_table *ttc;
848 err = mlx5e_hairpin_create_indirect_rqt(hp);
852 err = mlx5e_hairpin_create_indirect_tirs(hp);
854 goto err_create_indirect_tirs;
856 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
857 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
858 if (IS_ERR(hp->ttc)) {
859 err = PTR_ERR(hp->ttc);
860 goto err_create_ttc_table;
863 ttc = mlx5e_fs_get_ttc(priv->fs, false);
864 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
866 mlx5_get_ttc_flow_table(ttc)->id);
870 err_create_ttc_table:
871 mlx5e_hairpin_destroy_indirect_tirs(hp);
872 err_create_indirect_tirs:
873 mlx5e_rqt_destroy(&hp->indir_rqt);
878 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
880 mlx5_destroy_ttc_table(hp->ttc);
881 mlx5e_hairpin_destroy_indirect_tirs(hp);
882 mlx5e_rqt_destroy(&hp->indir_rqt);
885 static struct mlx5e_hairpin *
886 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
889 struct mlx5_core_dev *func_mdev, *peer_mdev;
890 struct mlx5e_hairpin *hp;
891 struct mlx5_hairpin *pair;
894 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
896 return ERR_PTR(-ENOMEM);
898 func_mdev = priv->mdev;
899 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
900 if (IS_ERR(peer_mdev)) {
901 err = PTR_ERR(peer_mdev);
902 goto create_pair_err;
905 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
908 goto create_pair_err;
911 hp->func_mdev = func_mdev;
912 hp->func_priv = priv;
913 hp->num_channels = params->num_channels;
915 err = mlx5e_hairpin_create_transport(hp);
917 goto create_transport_err;
919 if (hp->num_channels > 1) {
920 err = mlx5e_hairpin_rss_init(hp);
928 mlx5e_hairpin_destroy_transport(hp);
929 create_transport_err:
930 mlx5_core_hairpin_destroy(hp->pair);
936 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
938 if (hp->num_channels > 1)
939 mlx5e_hairpin_rss_cleanup(hp);
940 mlx5e_hairpin_destroy_transport(hp);
941 mlx5_core_hairpin_destroy(hp->pair);
945 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
947 return (peer_vhca_id << 16 | prio);
950 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
951 u16 peer_vhca_id, u8 prio)
953 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
954 struct mlx5e_hairpin_entry *hpe;
955 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
957 hash_for_each_possible(tc->hairpin_tbl, hpe,
958 hairpin_hlist, hash_key) {
959 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
960 refcount_inc(&hpe->refcnt);
968 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
969 struct mlx5e_hairpin_entry *hpe)
971 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
972 /* no more hairpin flows for us, release the hairpin pair */
973 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
975 hash_del(&hpe->hairpin_hlist);
976 mutex_unlock(&tc->hairpin_tbl_lock);
978 if (!IS_ERR_OR_NULL(hpe->hp)) {
979 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
980 dev_name(hpe->hp->pair->peer_mdev->device));
982 mlx5e_hairpin_destroy(hpe->hp);
985 WARN_ON(!list_empty(&hpe->flows));
989 #define UNKNOWN_MATCH_PRIO 8
991 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
992 struct mlx5_flow_spec *spec, u8 *match_prio,
993 struct netlink_ext_ack *extack)
995 void *headers_c, *headers_v;
996 u8 prio_val, prio_mask = 0;
999 #ifdef CONFIG_MLX5_CORE_EN_DCB
1000 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
1001 NL_SET_ERR_MSG_MOD(extack,
1002 "only PCP trust state supported for hairpin");
1006 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
1007 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1009 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
1011 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
1012 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
1015 if (!vlan_present || !prio_mask) {
1016 prio_val = UNKNOWN_MATCH_PRIO;
1017 } else if (prio_mask != 0x7) {
1018 NL_SET_ERR_MSG_MOD(extack,
1019 "masked priority match not supported for hairpin");
1023 *match_prio = prio_val;
1027 static int debugfs_hairpin_queues_set(void *data, u64 val)
1029 struct mlx5e_hairpin_params *hp = data;
1032 mlx5_core_err(hp->mdev,
1033 "Number of hairpin queues must be > 0\n");
1037 hp->num_queues = val;
1042 static int debugfs_hairpin_queues_get(void *data, u64 *val)
1044 struct mlx5e_hairpin_params *hp = data;
1046 *val = hp->num_queues;
1050 DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queues, debugfs_hairpin_queues_get,
1051 debugfs_hairpin_queues_set, "%llu\n");
1053 static int debugfs_hairpin_queue_size_set(void *data, u64 val)
1055 struct mlx5e_hairpin_params *hp = data;
1057 if (val > BIT(MLX5_CAP_GEN(hp->mdev, log_max_hairpin_num_packets))) {
1058 mlx5_core_err(hp->mdev,
1059 "Invalid hairpin queue size, must be <= %lu\n",
1060 BIT(MLX5_CAP_GEN(hp->mdev,
1061 log_max_hairpin_num_packets)));
1065 hp->queue_size = roundup_pow_of_two(val);
1070 static int debugfs_hairpin_queue_size_get(void *data, u64 *val)
1072 struct mlx5e_hairpin_params *hp = data;
1074 *val = hp->queue_size;
1078 DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queue_size,
1079 debugfs_hairpin_queue_size_get,
1080 debugfs_hairpin_queue_size_set, "%llu\n");
1082 static int debugfs_hairpin_num_active_get(void *data, u64 *val)
1084 struct mlx5e_tc_table *tc = data;
1085 struct mlx5e_hairpin_entry *hpe;
1089 mutex_lock(&tc->hairpin_tbl_lock);
1090 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
1092 mutex_unlock(&tc->hairpin_tbl_lock);
1098 DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active,
1099 debugfs_hairpin_num_active_get, NULL, "%llu\n");
1101 static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv)
1104 struct mlx5e_tc_table *tc = file->private;
1105 struct mlx5e_hairpin_entry *hpe;
1108 mutex_lock(&tc->hairpin_tbl_lock);
1109 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
1110 seq_printf(file, "Hairpin peer_vhca_id %u prio %u refcnt %u\n",
1111 hpe->peer_vhca_id, hpe->prio,
1112 refcount_read(&hpe->refcnt));
1113 mutex_unlock(&tc->hairpin_tbl_lock);
1117 DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump);
1119 static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
1120 struct dentry *dfs_root)
1122 if (IS_ERR_OR_NULL(dfs_root))
1125 tc->dfs_root = debugfs_create_dir("tc", dfs_root);
1129 debugfs_create_file("hairpin_num_queues", 0644, tc->dfs_root,
1130 &tc->hairpin_params, &fops_hairpin_queues);
1131 debugfs_create_file("hairpin_queue_size", 0644, tc->dfs_root,
1132 &tc->hairpin_params, &fops_hairpin_queue_size);
1133 debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc,
1134 &fops_hairpin_num_active);
1135 debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc,
1136 &debugfs_hairpin_table_dump_fops);
1140 mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
1141 struct mlx5_core_dev *mdev)
1146 hairpin_params->mdev = mdev;
1147 /* set hairpin pair per each 50Gbs share of the link */
1148 mlx5e_port_max_linkspeed(mdev, &link_speed);
1149 link_speed = max_t(u32, link_speed, 50000);
1150 link_speed64 = link_speed;
1151 do_div(link_speed64, 50000);
1152 hairpin_params->num_queues = link_speed64;
1154 hairpin_params->queue_size =
1155 BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev),
1156 MLX5_CAP_GEN(mdev, log_max_hairpin_num_packets)));
1159 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
1160 struct mlx5e_tc_flow *flow,
1161 struct mlx5e_tc_flow_parse_attr *parse_attr,
1162 struct netlink_ext_ack *extack)
1164 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1165 int peer_ifindex = parse_attr->mirred_ifindex[0];
1166 struct mlx5_hairpin_params params;
1167 struct mlx5_core_dev *peer_mdev;
1168 struct mlx5e_hairpin_entry *hpe;
1169 struct mlx5e_hairpin *hp;
1174 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
1175 if (IS_ERR(peer_mdev)) {
1176 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
1177 return PTR_ERR(peer_mdev);
1180 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
1181 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
1185 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
1186 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
1191 mutex_lock(&tc->hairpin_tbl_lock);
1192 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
1194 mutex_unlock(&tc->hairpin_tbl_lock);
1195 wait_for_completion(&hpe->res_ready);
1197 if (IS_ERR(hpe->hp)) {
1204 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
1206 mutex_unlock(&tc->hairpin_tbl_lock);
1210 spin_lock_init(&hpe->flows_lock);
1211 INIT_LIST_HEAD(&hpe->flows);
1212 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
1213 hpe->peer_vhca_id = peer_id;
1214 hpe->prio = match_prio;
1215 refcount_set(&hpe->refcnt, 1);
1216 init_completion(&hpe->res_ready);
1218 hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
1219 hash_hairpin_info(peer_id, match_prio));
1220 mutex_unlock(&tc->hairpin_tbl_lock);
1222 params.log_num_packets = ilog2(tc->hairpin_params.queue_size);
1223 params.log_data_size =
1225 params.log_num_packets +
1226 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev),
1227 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
1228 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
1230 params.q_counter = priv->q_counter;
1231 params.num_channels = tc->hairpin_params.num_queues;
1233 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
1235 complete_all(&hpe->res_ready);
1241 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
1242 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
1243 dev_name(hp->pair->peer_mdev->device),
1244 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
1247 if (hpe->hp->num_channels > 1) {
1248 flow_flag_set(flow, HAIRPIN_RSS);
1249 flow->attr->nic_attr->hairpin_ft =
1250 mlx5_get_ttc_flow_table(hpe->hp->ttc);
1252 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
1256 spin_lock(&hpe->flows_lock);
1257 list_add(&flow->hairpin, &hpe->flows);
1258 spin_unlock(&hpe->flows_lock);
1263 mlx5e_hairpin_put(priv, hpe);
1267 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
1268 struct mlx5e_tc_flow *flow)
1270 /* flow wasn't fully initialized */
1274 spin_lock(&flow->hpe->flows_lock);
1275 list_del(&flow->hairpin);
1276 spin_unlock(&flow->hpe->flows_lock);
1278 mlx5e_hairpin_put(priv, flow->hpe);
1282 struct mlx5_flow_handle *
1283 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
1284 struct mlx5_flow_spec *spec,
1285 struct mlx5_flow_attr *attr)
1287 struct mlx5_flow_context *flow_context = &spec->flow_context;
1288 struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
1289 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1290 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
1291 struct mlx5_flow_destination dest[2] = {};
1292 struct mlx5_fs_chains *nic_chains;
1293 struct mlx5_flow_act flow_act = {
1294 .action = attr->action,
1295 .flags = FLOW_ACT_NO_APPEND,
1297 struct mlx5_flow_handle *rule;
1298 struct mlx5_flow_table *ft;
1301 nic_chains = mlx5e_nic_chains(tc);
1302 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1303 flow_context->flow_tag = nic_attr->flow_tag;
1305 if (attr->dest_ft) {
1306 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1307 dest[dest_ix].ft = attr->dest_ft;
1309 } else if (nic_attr->hairpin_ft) {
1310 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1311 dest[dest_ix].ft = nic_attr->hairpin_ft;
1313 } else if (nic_attr->hairpin_tirn) {
1314 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1315 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
1317 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1318 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1319 if (attr->dest_chain) {
1320 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
1321 attr->dest_chain, 1,
1323 if (IS_ERR(dest[dest_ix].ft))
1324 return ERR_CAST(dest[dest_ix].ft);
1326 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
1331 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1332 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
1333 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1335 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1336 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1337 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
1341 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1342 flow_act.modify_hdr = attr->modify_hdr;
1344 mutex_lock(&tc->t_lock);
1345 if (IS_ERR_OR_NULL(tc->t)) {
1346 /* Create the root table here if doesn't exist yet */
1348 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1350 if (IS_ERR(tc->t)) {
1351 mutex_unlock(&tc->t_lock);
1352 netdev_err(priv->netdev,
1353 "Failed to create tc offload table\n");
1354 rule = ERR_CAST(tc->t);
1358 mutex_unlock(&tc->t_lock);
1360 if (attr->chain || attr->prio)
1361 ft = mlx5_chains_get_table(nic_chains,
1362 attr->chain, attr->prio,
1368 rule = ERR_CAST(ft);
1372 if (attr->outer_match_level != MLX5_MATCH_NONE)
1373 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1375 rule = mlx5_add_flow_rules(ft, spec,
1376 &flow_act, dest, dest_ix);
1383 if (attr->chain || attr->prio)
1384 mlx5_chains_put_table(nic_chains,
1385 attr->chain, attr->prio,
1388 if (attr->dest_chain)
1389 mlx5_chains_put_table(nic_chains,
1390 attr->dest_chain, 1,
1393 return ERR_CAST(rule);
1397 alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
1398 struct mlx5_flow_attr *attr)
1401 struct mlx5_fc *counter;
1403 counter = mlx5_fc_create(counter_dev, true);
1404 if (IS_ERR(counter))
1405 return PTR_ERR(counter);
1407 attr->counter = counter;
1412 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1413 struct mlx5e_tc_flow *flow,
1414 struct netlink_ext_ack *extack)
1416 struct mlx5e_tc_flow_parse_attr *parse_attr;
1417 struct mlx5_flow_attr *attr = flow->attr;
1418 struct mlx5_core_dev *dev = priv->mdev;
1421 parse_attr = attr->parse_attr;
1423 if (flow_flag_test(flow, HAIRPIN)) {
1424 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1429 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1430 err = alloc_flow_attr_counter(dev, attr);
1435 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1436 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1441 if (attr->flags & MLX5_ATTR_FLAG_CT)
1442 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
1443 attr, &parse_attr->mod_hdr_acts);
1445 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1448 return PTR_ERR_OR_ZERO(flow->rule[0]);
1451 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1452 struct mlx5_flow_handle *rule,
1453 struct mlx5_flow_attr *attr)
1455 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1456 struct mlx5_fs_chains *nic_chains;
1458 nic_chains = mlx5e_nic_chains(tc);
1459 mlx5_del_flow_rules(rule);
1461 if (attr->chain || attr->prio)
1462 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1465 if (attr->dest_chain)
1466 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1470 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1471 struct mlx5e_tc_flow *flow)
1473 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1474 struct mlx5_flow_attr *attr = flow->attr;
1476 flow_flag_clear(flow, OFFLOADED);
1478 if (attr->flags & MLX5_ATTR_FLAG_CT)
1479 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
1480 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1481 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1483 /* Remove root table if no rules are left to avoid
1484 * extra steering hops.
1486 mutex_lock(&tc->t_lock);
1487 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1488 !IS_ERR_OR_NULL(tc->t)) {
1489 mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
1492 mutex_unlock(&tc->t_lock);
1494 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1495 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1496 mlx5e_detach_mod_hdr(priv, flow);
1499 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1500 mlx5_fc_destroy(priv->mdev, attr->counter);
1502 if (flow_flag_test(flow, HAIRPIN))
1503 mlx5e_hairpin_flow_del(priv, flow);
1505 free_flow_post_acts(flow);
1507 kvfree(attr->parse_attr);
1511 struct mlx5_flow_handle *
1512 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1513 struct mlx5e_tc_flow *flow,
1514 struct mlx5_flow_spec *spec,
1515 struct mlx5_flow_attr *attr)
1517 struct mlx5_flow_handle *rule;
1519 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1520 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1522 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
1527 if (attr->esw_attr->split_count) {
1528 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1529 if (IS_ERR(flow->rule[1]))
1536 mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
1537 return flow->rule[1];
1540 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1541 struct mlx5e_tc_flow *flow,
1542 struct mlx5_flow_attr *attr)
1544 flow_flag_clear(flow, OFFLOADED);
1546 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1547 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1549 if (attr->esw_attr->split_count)
1550 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1552 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
1555 struct mlx5_flow_handle *
1556 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1557 struct mlx5e_tc_flow *flow,
1558 struct mlx5_flow_spec *spec)
1560 struct mlx5e_tc_mod_hdr_acts mod_acts = {};
1561 struct mlx5e_mod_hdr_handle *mh = NULL;
1562 struct mlx5_flow_attr *slow_attr;
1563 struct mlx5_flow_handle *rule;
1564 bool fwd_and_modify_cap;
1565 u32 chain_mapping = 0;
1568 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1570 return ERR_PTR(-ENOMEM);
1572 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1573 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1574 slow_attr->esw_attr->split_count = 0;
1575 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1577 fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
1578 if (!fwd_and_modify_cap)
1581 err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
1585 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
1586 CHAIN_TO_REG, chain_mapping);
1590 mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
1591 MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
1597 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1598 slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
1601 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1603 err = PTR_ERR(rule);
1608 flow->chain_mapping = chain_mapping;
1609 flow_flag_set(flow, SLOW);
1611 mlx5e_mod_hdr_dealloc(&mod_acts);
1617 if (fwd_and_modify_cap)
1618 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
1621 if (fwd_and_modify_cap)
1622 mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
1624 mlx5e_mod_hdr_dealloc(&mod_acts);
1626 return ERR_PTR(err);
1629 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1630 struct mlx5e_tc_flow *flow)
1632 struct mlx5_flow_attr *slow_attr;
1634 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1636 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1640 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1641 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1642 slow_attr->esw_attr->split_count = 0;
1643 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1644 if (flow->slow_mh) {
1645 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1646 slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
1648 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1649 if (flow->slow_mh) {
1650 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
1651 mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
1652 flow->chain_mapping = 0;
1653 flow->slow_mh = NULL;
1655 flow_flag_clear(flow, SLOW);
1659 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1662 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1663 struct list_head *unready_flows)
1665 flow_flag_set(flow, NOT_READY);
1666 list_add_tail(&flow->unready, unready_flows);
1669 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1672 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1674 list_del(&flow->unready);
1675 flow_flag_clear(flow, NOT_READY);
1678 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1680 struct mlx5_rep_uplink_priv *uplink_priv;
1681 struct mlx5e_rep_priv *rpriv;
1682 struct mlx5_eswitch *esw;
1684 esw = flow->priv->mdev->priv.eswitch;
1685 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1686 uplink_priv = &rpriv->uplink_priv;
1688 mutex_lock(&uplink_priv->unready_flows_lock);
1689 unready_flow_add(flow, &uplink_priv->unready_flows);
1690 mutex_unlock(&uplink_priv->unready_flows_lock);
1693 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1695 struct mlx5_rep_uplink_priv *uplink_priv;
1696 struct mlx5e_rep_priv *rpriv;
1697 struct mlx5_eswitch *esw;
1699 esw = flow->priv->mdev->priv.eswitch;
1700 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1701 uplink_priv = &rpriv->uplink_priv;
1703 mutex_lock(&uplink_priv->unready_flows_lock);
1704 unready_flow_del(flow);
1705 mutex_unlock(&uplink_priv->unready_flows_lock);
1708 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1710 struct mlx5_core_dev *out_mdev, *route_mdev;
1711 struct mlx5e_priv *out_priv, *route_priv;
1713 out_priv = netdev_priv(out_dev);
1714 out_mdev = out_priv->mdev;
1715 route_priv = netdev_priv(route_dev);
1716 route_mdev = route_priv->mdev;
1718 if (out_mdev->coredev_type != MLX5_COREDEV_PF)
1721 if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
1722 route_mdev->coredev_type != MLX5_COREDEV_SF)
1725 return mlx5e_same_hw_devs(out_priv, route_priv);
1728 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1730 struct mlx5e_priv *out_priv, *route_priv;
1731 struct mlx5_devcom *devcom = NULL;
1732 struct mlx5_core_dev *route_mdev;
1733 struct mlx5_eswitch *esw;
1737 out_priv = netdev_priv(out_dev);
1738 esw = out_priv->mdev->priv.eswitch;
1739 route_priv = netdev_priv(route_dev);
1740 route_mdev = route_priv->mdev;
1742 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1743 if (mlx5_lag_is_active(out_priv->mdev)) {
1744 /* In lag case we may get devices from different eswitch instances.
1745 * If we failed to get vport num, it means, mostly, that we on the wrong
1748 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1752 devcom = out_priv->mdev->priv.devcom;
1753 esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1758 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1760 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1764 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1765 struct mlx5e_tc_flow *flow,
1766 struct mlx5_flow_attr *attr)
1768 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1769 struct mlx5_modify_hdr *mod_hdr;
1771 mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1772 mlx5e_get_flow_namespace(flow),
1773 mod_hdr_acts->num_actions,
1774 mod_hdr_acts->actions);
1775 if (IS_ERR(mod_hdr))
1776 return PTR_ERR(mod_hdr);
1778 WARN_ON(attr->modify_hdr);
1779 attr->modify_hdr = mod_hdr;
1785 set_encap_dests(struct mlx5e_priv *priv,
1786 struct mlx5e_tc_flow *flow,
1787 struct mlx5_flow_attr *attr,
1788 struct netlink_ext_ack *extack,
1791 struct mlx5e_tc_flow_parse_attr *parse_attr;
1792 struct mlx5_esw_flow_attr *esw_attr;
1793 struct net_device *encap_dev = NULL;
1794 struct mlx5e_rep_priv *rpriv;
1795 struct mlx5e_priv *out_priv;
1799 if (!mlx5e_is_eswitch_flow(flow))
1802 parse_attr = attr->parse_attr;
1803 esw_attr = attr->esw_attr;
1806 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1807 struct net_device *out_dev;
1810 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1813 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1814 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1816 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1820 err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
1821 extack, &encap_dev);
1826 if (esw_attr->dests[out_index].flags &
1827 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1828 !esw_attr->dest_int_port)
1831 out_priv = netdev_priv(encap_dev);
1832 rpriv = out_priv->ppriv;
1833 esw_attr->dests[out_index].rep = rpriv->rep;
1834 esw_attr->dests[out_index].mdev = out_priv->mdev;
1837 if (*vf_tun && esw_attr->out_count > 1) {
1838 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1848 clean_encap_dests(struct mlx5e_priv *priv,
1849 struct mlx5e_tc_flow *flow,
1850 struct mlx5_flow_attr *attr,
1853 struct mlx5_esw_flow_attr *esw_attr;
1856 if (!mlx5e_is_eswitch_flow(flow))
1859 esw_attr = attr->esw_attr;
1862 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1863 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1866 if (esw_attr->dests[out_index].flags &
1867 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1868 !esw_attr->dest_int_port)
1871 mlx5e_detach_encap(priv, flow, attr, out_index);
1872 kfree(attr->parse_attr->tun_info[out_index]);
1877 verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
1880 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
1881 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
1886 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
1887 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
1891 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1892 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
1893 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
1901 post_process_attr(struct mlx5e_tc_flow *flow,
1902 struct mlx5_flow_attr *attr,
1903 bool is_post_act_attr,
1904 struct netlink_ext_ack *extack)
1906 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1910 err = verify_attr_actions(attr->action, extack);
1914 err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
1918 if (mlx5e_is_eswitch_flow(flow)) {
1919 err = mlx5_eswitch_add_vlan_action(esw, attr);
1924 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1925 if (vf_tun || is_post_act_attr) {
1926 err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
1930 err = mlx5e_attach_mod_hdr(flow->priv, flow, attr->parse_attr);
1936 if (attr->branch_true &&
1937 attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1938 err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_true);
1943 if (attr->branch_false &&
1944 attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1945 err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_false);
1950 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1951 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
1961 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1962 struct mlx5e_tc_flow *flow,
1963 struct netlink_ext_ack *extack)
1965 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1966 struct mlx5e_tc_flow_parse_attr *parse_attr;
1967 struct mlx5_flow_attr *attr = flow->attr;
1968 struct mlx5_esw_flow_attr *esw_attr;
1969 u32 max_prio, max_chain;
1972 parse_attr = attr->parse_attr;
1973 esw_attr = attr->esw_attr;
1975 /* We check chain range only for tc flows.
1976 * For ft flows, we checked attr->chain was originally 0 and set it to
1977 * FDB_FT_CHAIN which is outside tc range.
1978 * See mlx5e_rep_setup_ft_cb().
1980 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1981 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1982 NL_SET_ERR_MSG_MOD(extack,
1983 "Requested chain is out of supported range");
1988 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1989 if (attr->prio > max_prio) {
1990 NL_SET_ERR_MSG_MOD(extack,
1991 "Requested priority is out of supported range");
1996 if (flow_flag_test(flow, TUN_RX)) {
1997 err = mlx5e_attach_decap_route(priv, flow);
2001 if (!attr->chain && esw_attr->int_port &&
2002 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2003 /* If decap route device is internal port, change the
2004 * source vport value in reg_c0 back to uplink just in
2005 * case the rule performs goto chain > 0. If we have a miss
2006 * on chain > 0 we want the metadata regs to hold the
2007 * chain id so SW will resume handling of this packet
2008 * from the proper chain.
2010 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
2011 esw_attr->in_rep->vport);
2013 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
2014 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
2019 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2023 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
2024 err = mlx5e_attach_decap(priv, flow, extack);
2029 if (netif_is_ovs_master(parse_attr->filter_dev)) {
2030 struct mlx5e_tc_int_port *int_port;
2033 NL_SET_ERR_MSG_MOD(extack,
2034 "Internal port rule is only supported on chain 0");
2039 if (attr->dest_chain) {
2040 NL_SET_ERR_MSG_MOD(extack,
2041 "Internal port rule offload doesn't support goto action");
2046 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
2047 parse_attr->filter_dev->ifindex,
2048 flow_flag_test(flow, EGRESS) ?
2049 MLX5E_TC_INT_PORT_EGRESS :
2050 MLX5E_TC_INT_PORT_INGRESS);
2051 if (IS_ERR(int_port)) {
2052 err = PTR_ERR(int_port);
2056 esw_attr->int_port = int_port;
2059 err = post_process_attr(flow, attr, false, extack);
2063 /* we get here if one of the following takes place:
2064 * (1) there's no error
2065 * (2) there's an encap action and we don't have valid neigh
2067 if (flow_flag_test(flow, SLOW))
2068 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
2070 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
2072 if (IS_ERR(flow->rule[0])) {
2073 err = PTR_ERR(flow->rule[0]);
2076 flow_flag_set(flow, OFFLOADED);
2081 flow_flag_set(flow, FAILED);
2085 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
2087 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
2088 void *headers_v = MLX5_ADDR_OF(fte_match_param,
2091 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
2093 geneve_tlv_option_0_data);
2095 return !!geneve_tlv_opt_0_data;
2098 static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
2103 mlx5_free_flow_attr(flow, attr);
2104 kvfree(attr->parse_attr);
2108 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
2109 struct mlx5e_tc_flow *flow)
2111 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2112 struct mlx5_flow_attr *attr = flow->attr;
2113 struct mlx5_esw_flow_attr *esw_attr;
2116 esw_attr = attr->esw_attr;
2117 mlx5e_put_flow_tunnel_id(flow);
2119 if (flow_flag_test(flow, NOT_READY))
2120 remove_unready_flow(flow);
2122 if (mlx5e_is_offloaded_flow(flow)) {
2123 if (flow_flag_test(flow, SLOW))
2124 mlx5e_tc_unoffload_from_slow_path(esw, flow);
2126 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
2128 complete_all(&flow->del_hw_done);
2130 if (mlx5_flow_has_geneve_opt(flow))
2131 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
2133 mlx5_eswitch_del_vlan_action(esw, attr);
2135 if (flow->decap_route)
2136 mlx5e_detach_decap_route(priv, flow);
2138 clean_encap_dests(priv, flow, attr, &vf_tun);
2140 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
2142 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
2143 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
2144 if (vf_tun && attr->modify_hdr)
2145 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
2147 mlx5e_detach_mod_hdr(priv, flow);
2150 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
2151 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
2153 if (esw_attr->int_port)
2154 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
2156 if (esw_attr->dest_int_port)
2157 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
2159 if (flow_flag_test(flow, L3_TO_L2_DECAP))
2160 mlx5e_detach_decap(priv, flow);
2162 free_flow_post_acts(flow);
2163 free_branch_attr(flow, attr->branch_true);
2164 free_branch_attr(flow, attr->branch_false);
2166 if (flow->attr->lag.count)
2167 mlx5_lag_del_mpesw_rule(esw->dev);
2169 kvfree(attr->esw_attr->rx_tun_attr);
2170 kvfree(attr->parse_attr);
2174 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
2176 struct mlx5_flow_attr *attr;
2178 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
2179 return attr->counter;
2182 /* Iterate over tmp_list of flows attached to flow_list head. */
2183 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
2185 struct mlx5e_tc_flow *flow, *tmp;
2187 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
2188 mlx5e_flow_put(priv, flow);
2191 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
2193 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
2195 if (!flow_flag_test(flow, ESWITCH) ||
2196 !flow_flag_test(flow, DUP))
2199 mutex_lock(&esw->offloads.peer_mutex);
2200 list_del(&flow->peer);
2201 mutex_unlock(&esw->offloads.peer_mutex);
2203 flow_flag_clear(flow, DUP);
2205 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
2206 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
2207 kfree(flow->peer_flow);
2210 flow->peer_flow = NULL;
2213 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
2215 struct mlx5_core_dev *dev = flow->priv->mdev;
2216 struct mlx5_devcom *devcom = dev->priv.devcom;
2217 struct mlx5_eswitch *peer_esw;
2219 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2223 __mlx5e_tc_del_fdb_peer_flow(flow);
2224 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2227 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
2228 struct mlx5e_tc_flow *flow)
2230 if (mlx5e_is_eswitch_flow(flow)) {
2231 mlx5e_tc_del_fdb_peer_flow(flow);
2232 mlx5e_tc_del_fdb_flow(priv, flow);
2234 mlx5e_tc_del_nic_flow(priv, flow);
2238 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
2240 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2241 struct flow_action *flow_action = &rule->action;
2242 const struct flow_action_entry *act;
2248 flow_action_for_each(i, act, flow_action) {
2250 case FLOW_ACTION_GOTO:
2252 case FLOW_ACTION_SAMPLE:
2263 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
2264 struct flow_dissector_key_enc_opts *opts,
2265 struct netlink_ext_ack *extack,
2268 struct geneve_opt *opt;
2273 while (opts->len > off) {
2274 opt = (struct geneve_opt *)&opts->data[off];
2276 if (!(*dont_care) || opt->opt_class || opt->type ||
2277 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
2280 if (opt->opt_class != htons(U16_MAX) ||
2281 opt->type != U8_MAX) {
2282 NL_SET_ERR_MSG_MOD(extack,
2283 "Partial match of tunnel options in chain > 0 isn't supported");
2284 netdev_warn(priv->netdev,
2285 "Partial match of tunnel options in chain > 0 isn't supported");
2290 off += sizeof(struct geneve_opt) + opt->length * 4;
2296 #define COPY_DISSECTOR(rule, diss_key, dst)\
2298 struct flow_rule *__rule = (rule);\
2299 typeof(dst) __dst = dst;\
2302 skb_flow_dissector_target(__rule->match.dissector,\
2304 __rule->match.key),\
2308 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
2309 struct mlx5e_tc_flow *flow,
2310 struct flow_cls_offload *f,
2311 struct net_device *filter_dev)
2313 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2314 struct netlink_ext_ack *extack = f->common.extack;
2315 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
2316 struct flow_match_enc_opts enc_opts_match;
2317 struct tunnel_match_enc_opts tun_enc_opts;
2318 struct mlx5_rep_uplink_priv *uplink_priv;
2319 struct mlx5_flow_attr *attr = flow->attr;
2320 struct mlx5e_rep_priv *uplink_rpriv;
2321 struct tunnel_match_key tunnel_key;
2322 bool enc_opts_is_dont_care = true;
2323 u32 tun_id, enc_opts_id = 0;
2324 struct mlx5_eswitch *esw;
2328 esw = priv->mdev->priv.eswitch;
2329 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2330 uplink_priv = &uplink_rpriv->uplink_priv;
2332 memset(&tunnel_key, 0, sizeof(tunnel_key));
2333 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2334 &tunnel_key.enc_control);
2335 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
2336 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
2337 &tunnel_key.enc_ipv4);
2339 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
2340 &tunnel_key.enc_ipv6);
2341 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
2342 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
2343 &tunnel_key.enc_tp);
2344 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
2345 &tunnel_key.enc_key_id);
2346 tunnel_key.filter_ifindex = filter_dev->ifindex;
2348 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
2352 flow_rule_match_enc_opts(rule, &enc_opts_match);
2353 err = enc_opts_is_dont_care_or_full_match(priv,
2354 enc_opts_match.mask,
2356 &enc_opts_is_dont_care);
2360 if (!enc_opts_is_dont_care) {
2361 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2362 memcpy(&tun_enc_opts.key, enc_opts_match.key,
2363 sizeof(*enc_opts_match.key));
2364 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2365 sizeof(*enc_opts_match.mask));
2367 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2368 &tun_enc_opts, &enc_opts_id);
2373 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2374 mask = enc_opts_id ? TUNNEL_ID_MASK :
2375 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2378 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2379 TUNNEL_TO_REG, value, mask);
2381 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2382 err = mlx5e_tc_match_to_reg_set(priv->mdev,
2383 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2384 TUNNEL_TO_REG, value);
2388 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2391 flow->attr->tunnel_id = value;
2396 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2399 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2403 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2405 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
2406 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
2407 struct mlx5_rep_uplink_priv *uplink_priv;
2408 struct mlx5e_rep_priv *uplink_rpriv;
2409 struct mlx5_eswitch *esw;
2411 esw = flow->priv->mdev->priv.eswitch;
2412 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2413 uplink_priv = &uplink_rpriv->uplink_priv;
2416 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2418 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2422 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2423 struct flow_match_basic *match, bool outer,
2424 void *headers_c, void *headers_v)
2426 bool ip_version_cap;
2428 ip_version_cap = outer ?
2429 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2430 ft_field_support.outer_ip_version) :
2431 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2432 ft_field_support.inner_ip_version);
2434 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2435 (match->key->n_proto == htons(ETH_P_IP) ||
2436 match->key->n_proto == htons(ETH_P_IPV6))) {
2437 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2438 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2439 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2441 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2442 ntohs(match->mask->n_proto));
2443 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2444 ntohs(match->key->n_proto));
2448 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
2455 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2457 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2459 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
2460 /* Return ip_version converted from ethertype anyway */
2462 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2463 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
2465 else if (ethertype == ETH_P_IPV6)
2471 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
2472 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
2473 * +---------+----------------------------------------+
2474 * |Arriving | Arriving Outer Header |
2475 * | Inner +---------+---------+---------+----------+
2476 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
2477 * +---------+---------+---------+---------+----------+
2478 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
2479 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
2480 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
2481 * | CE | CE | CE | CE | CE |
2482 * +---------+---------+---------+---------+----------+
2484 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
2485 * the inner ip_ecn value before hardware decap action.
2487 * Cells marked are changed from original inner packet ip_ecn value during decap, and
2488 * so matching those values on inner ip_ecn before decap will fail.
2490 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
2491 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
2492 * and such we can drop the inner ip_ecn=CE match.
2495 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
2496 struct flow_cls_offload *f,
2497 bool *match_inner_ecn)
2499 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
2500 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2501 struct netlink_ext_ack *extack = f->common.extack;
2502 struct flow_match_ip match;
2504 *match_inner_ecn = true;
2506 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
2507 flow_rule_match_enc_ip(rule, &match);
2508 outer_ecn_key = match.key->tos & INET_ECN_MASK;
2509 outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
2512 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2513 flow_rule_match_ip(rule, &match);
2514 inner_ecn_key = match.key->tos & INET_ECN_MASK;
2515 inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
2518 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
2519 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
2520 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
2524 if (!outer_ecn_mask) {
2525 if (!inner_ecn_mask)
2528 NL_SET_ERR_MSG_MOD(extack,
2529 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2530 netdev_warn(priv->netdev,
2531 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2535 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
2536 NL_SET_ERR_MSG_MOD(extack,
2537 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2538 netdev_warn(priv->netdev,
2539 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2543 if (!inner_ecn_mask)
2546 /* Both inner and outer have full mask on ecn */
2548 if (outer_ecn_key == INET_ECN_ECT_1) {
2549 /* inner ecn might change by DECAP action */
2551 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
2552 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
2556 if (outer_ecn_key != INET_ECN_CE)
2559 if (inner_ecn_key != INET_ECN_CE) {
2560 /* Can't happen in software, as packet ecn will be changed to CE after decap */
2561 NL_SET_ERR_MSG_MOD(extack,
2562 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2563 netdev_warn(priv->netdev,
2564 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2568 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
2569 * drop match on inner ecn
2571 *match_inner_ecn = false;
2576 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2577 struct mlx5e_tc_flow *flow,
2578 struct mlx5_flow_spec *spec,
2579 struct flow_cls_offload *f,
2580 struct net_device *filter_dev,
2584 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
2585 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2586 struct netlink_ext_ack *extack = f->common.extack;
2587 bool needs_mapping, sets_mapping;
2590 if (!mlx5e_is_eswitch_flow(flow)) {
2591 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
2595 needs_mapping = !!flow->attr->chain;
2596 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2597 *match_inner = !needs_mapping;
2599 if ((needs_mapping || sets_mapping) &&
2600 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2601 NL_SET_ERR_MSG_MOD(extack,
2602 "Chains on tunnel devices isn't supported without register loopback support");
2603 netdev_warn(priv->netdev,
2604 "Chains on tunnel devices isn't supported without register loopback support");
2608 if (!flow->attr->chain) {
2609 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2612 NL_SET_ERR_MSG_MOD(extack,
2613 "Failed to parse tunnel attributes");
2614 netdev_warn(priv->netdev,
2615 "Failed to parse tunnel attributes");
2619 /* With mpls over udp we decapsulate using packet reformat
2622 if (!netif_is_bareudp(filter_dev))
2623 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2624 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2627 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2628 struct mlx5_flow_spec *tmp_spec;
2630 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2632 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
2633 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
2636 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2638 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2641 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2642 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2645 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2651 if (!needs_mapping && !sets_mapping)
2654 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2657 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2659 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2663 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2665 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2669 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2671 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2675 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2677 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2681 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
2683 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2684 get_match_inner_headers_value(spec) :
2685 get_match_outer_headers_value(spec);
2688 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
2690 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2691 get_match_inner_headers_criteria(spec) :
2692 get_match_outer_headers_criteria(spec);
2695 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2696 struct flow_cls_offload *f)
2698 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2699 struct netlink_ext_ack *extack = f->common.extack;
2700 struct net_device *ingress_dev;
2701 struct flow_match_meta match;
2703 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2706 flow_rule_match_meta(rule, &match);
2707 if (!match.mask->ingress_ifindex)
2710 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2711 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2715 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2716 match.key->ingress_ifindex);
2718 NL_SET_ERR_MSG_MOD(extack,
2719 "Can't find the ingress port to match on");
2723 if (ingress_dev != filter_dev) {
2724 NL_SET_ERR_MSG_MOD(extack,
2725 "Can't match on the ingress filter port");
2732 static bool skip_key_basic(struct net_device *filter_dev,
2733 struct flow_cls_offload *f)
2735 /* When doing mpls over udp decap, the user needs to provide
2736 * MPLS_UC as the protocol in order to be able to match on mpls
2737 * label fields. However, the actual ethertype is IP so we want to
2738 * avoid matching on this, otherwise we'll fail the match.
2740 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2746 static int __parse_cls_flower(struct mlx5e_priv *priv,
2747 struct mlx5e_tc_flow *flow,
2748 struct mlx5_flow_spec *spec,
2749 struct flow_cls_offload *f,
2750 struct net_device *filter_dev,
2751 u8 *inner_match_level, u8 *outer_match_level)
2753 struct netlink_ext_ack *extack = f->common.extack;
2754 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2756 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2758 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2760 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2762 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2764 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2766 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2767 struct flow_dissector *dissector = rule->match.dissector;
2768 enum fs_flow_table_type fs_type;
2769 bool match_inner_ecn = true;
2775 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2776 match_level = outer_match_level;
2778 if (dissector->used_keys &
2779 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2780 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2781 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2782 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2783 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2784 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2785 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2786 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2787 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2788 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2789 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2790 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2791 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2792 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2793 BIT(FLOW_DISSECTOR_KEY_TCP) |
2794 BIT(FLOW_DISSECTOR_KEY_IP) |
2795 BIT(FLOW_DISSECTOR_KEY_CT) |
2796 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2797 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2798 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2799 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2800 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2801 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2802 dissector->used_keys);
2806 if (mlx5e_get_tc_tun(filter_dev)) {
2807 bool match_inner = false;
2809 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2810 outer_match_level, &match_inner);
2815 /* header pointers should point to the inner headers
2816 * if the packet was decapsulated already.
2817 * outer headers are set by parse_tunnel_attr.
2819 match_level = inner_match_level;
2820 headers_c = get_match_inner_headers_criteria(spec);
2821 headers_v = get_match_inner_headers_value(spec);
2824 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
2829 err = mlx5e_flower_parse_meta(filter_dev, f);
2833 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2834 !skip_key_basic(filter_dev, f)) {
2835 struct flow_match_basic match;
2837 flow_rule_match_basic(rule, &match);
2838 mlx5e_tc_set_ethertype(priv->mdev, &match,
2839 match_level == outer_match_level,
2840 headers_c, headers_v);
2842 if (match.mask->n_proto)
2843 *match_level = MLX5_MATCH_L2;
2845 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2846 is_vlan_dev(filter_dev)) {
2847 struct flow_dissector_key_vlan filter_dev_mask;
2848 struct flow_dissector_key_vlan filter_dev_key;
2849 struct flow_match_vlan match;
2851 if (is_vlan_dev(filter_dev)) {
2852 match.key = &filter_dev_key;
2853 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2854 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2855 match.key->vlan_priority = 0;
2856 match.mask = &filter_dev_mask;
2857 memset(match.mask, 0xff, sizeof(*match.mask));
2858 match.mask->vlan_priority = 0;
2860 flow_rule_match_vlan(rule, &match);
2862 if (match.mask->vlan_id ||
2863 match.mask->vlan_priority ||
2864 match.mask->vlan_tpid) {
2865 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2866 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2868 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2871 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2873 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2877 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2878 match.mask->vlan_id);
2879 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2880 match.key->vlan_id);
2882 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2883 match.mask->vlan_priority);
2884 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2885 match.key->vlan_priority);
2887 *match_level = MLX5_MATCH_L2;
2889 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
2890 match.mask->vlan_eth_type &&
2891 MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
2892 ft_field_support.outer_second_vid,
2894 MLX5_SET(fte_match_set_misc, misc_c,
2895 outer_second_cvlan_tag, 1);
2896 spec->match_criteria_enable |=
2897 MLX5_MATCH_MISC_PARAMETERS;
2900 } else if (*match_level != MLX5_MATCH_NONE) {
2901 /* cvlan_tag enabled in match criteria and
2902 * disabled in match value means both S & C tags
2903 * don't exist (untagged of both)
2905 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2906 *match_level = MLX5_MATCH_L2;
2909 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2910 struct flow_match_vlan match;
2912 flow_rule_match_cvlan(rule, &match);
2913 if (match.mask->vlan_id ||
2914 match.mask->vlan_priority ||
2915 match.mask->vlan_tpid) {
2916 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2918 NL_SET_ERR_MSG_MOD(extack,
2919 "Matching on CVLAN is not supported");
2923 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2924 MLX5_SET(fte_match_set_misc, misc_c,
2925 outer_second_svlan_tag, 1);
2926 MLX5_SET(fte_match_set_misc, misc_v,
2927 outer_second_svlan_tag, 1);
2929 MLX5_SET(fte_match_set_misc, misc_c,
2930 outer_second_cvlan_tag, 1);
2931 MLX5_SET(fte_match_set_misc, misc_v,
2932 outer_second_cvlan_tag, 1);
2935 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2936 match.mask->vlan_id);
2937 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2938 match.key->vlan_id);
2939 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2940 match.mask->vlan_priority);
2941 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2942 match.key->vlan_priority);
2944 *match_level = MLX5_MATCH_L2;
2945 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2949 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2950 struct flow_match_eth_addrs match;
2952 flow_rule_match_eth_addrs(rule, &match);
2953 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2956 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2960 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2963 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2967 if (!is_zero_ether_addr(match.mask->src) ||
2968 !is_zero_ether_addr(match.mask->dst))
2969 *match_level = MLX5_MATCH_L2;
2972 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2973 struct flow_match_control match;
2975 flow_rule_match_control(rule, &match);
2976 addr_type = match.key->addr_type;
2978 /* the HW doesn't support frag first/later */
2979 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
2980 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
2984 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2985 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2986 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2987 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2989 /* the HW doesn't need L3 inline to match on frag=no */
2990 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2991 *match_level = MLX5_MATCH_L2;
2992 /* *** L2 attributes parsing up to here *** */
2994 *match_level = MLX5_MATCH_L3;
2998 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2999 struct flow_match_basic match;
3001 flow_rule_match_basic(rule, &match);
3002 ip_proto = match.key->ip_proto;
3004 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
3005 match.mask->ip_proto);
3006 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3007 match.key->ip_proto);
3009 if (match.mask->ip_proto)
3010 *match_level = MLX5_MATCH_L3;
3013 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3014 struct flow_match_ipv4_addrs match;
3016 flow_rule_match_ipv4_addrs(rule, &match);
3017 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
3018 src_ipv4_src_ipv6.ipv4_layout.ipv4),
3019 &match.mask->src, sizeof(match.mask->src));
3020 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3021 src_ipv4_src_ipv6.ipv4_layout.ipv4),
3022 &match.key->src, sizeof(match.key->src));
3023 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
3024 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
3025 &match.mask->dst, sizeof(match.mask->dst));
3026 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3027 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
3028 &match.key->dst, sizeof(match.key->dst));
3030 if (match.mask->src || match.mask->dst)
3031 *match_level = MLX5_MATCH_L3;
3034 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3035 struct flow_match_ipv6_addrs match;
3037 flow_rule_match_ipv6_addrs(rule, &match);
3038 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
3039 src_ipv4_src_ipv6.ipv6_layout.ipv6),
3040 &match.mask->src, sizeof(match.mask->src));
3041 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3042 src_ipv4_src_ipv6.ipv6_layout.ipv6),
3043 &match.key->src, sizeof(match.key->src));
3045 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
3046 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
3047 &match.mask->dst, sizeof(match.mask->dst));
3048 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3049 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
3050 &match.key->dst, sizeof(match.key->dst));
3052 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
3053 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
3054 *match_level = MLX5_MATCH_L3;
3057 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
3058 struct flow_match_ip match;
3060 flow_rule_match_ip(rule, &match);
3061 if (match_inner_ecn) {
3062 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
3063 match.mask->tos & 0x3);
3064 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
3065 match.key->tos & 0x3);
3068 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
3069 match.mask->tos >> 2);
3070 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
3071 match.key->tos >> 2);
3073 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
3075 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
3078 if (match.mask->ttl &&
3079 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3080 ft_field_support.outer_ipv4_ttl)) {
3081 NL_SET_ERR_MSG_MOD(extack,
3082 "Matching on TTL is not supported");
3086 if (match.mask->tos || match.mask->ttl)
3087 *match_level = MLX5_MATCH_L3;
3090 /* *** L3 attributes parsing up to here *** */
3092 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3093 struct flow_match_ports match;
3095 flow_rule_match_ports(rule, &match);
3098 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
3099 tcp_sport, ntohs(match.mask->src));
3100 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3101 tcp_sport, ntohs(match.key->src));
3103 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
3104 tcp_dport, ntohs(match.mask->dst));
3105 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3106 tcp_dport, ntohs(match.key->dst));
3110 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
3111 udp_sport, ntohs(match.mask->src));
3112 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3113 udp_sport, ntohs(match.key->src));
3115 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
3116 udp_dport, ntohs(match.mask->dst));
3117 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3118 udp_dport, ntohs(match.key->dst));
3121 NL_SET_ERR_MSG_MOD(extack,
3122 "Only UDP and TCP transports are supported for L4 matching");
3123 netdev_err(priv->netdev,
3124 "Only UDP and TCP transport are supported\n");
3128 if (match.mask->src || match.mask->dst)
3129 *match_level = MLX5_MATCH_L4;
3132 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
3133 struct flow_match_tcp match;
3135 flow_rule_match_tcp(rule, &match);
3136 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
3137 ntohs(match.mask->flags));
3138 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3139 ntohs(match.key->flags));
3141 if (match.mask->flags)
3142 *match_level = MLX5_MATCH_L4;
3144 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
3145 struct flow_match_icmp match;
3147 flow_rule_match_icmp(rule, &match);
3150 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
3151 MLX5_FLEX_PROTO_ICMP)) {
3152 NL_SET_ERR_MSG_MOD(extack,
3153 "Match on Flex protocols for ICMP is not supported");
3156 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
3158 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
3160 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
3162 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
3165 case IPPROTO_ICMPV6:
3166 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
3167 MLX5_FLEX_PROTO_ICMPV6)) {
3168 NL_SET_ERR_MSG_MOD(extack,
3169 "Match on Flex protocols for ICMPV6 is not supported");
3172 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
3174 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
3176 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
3178 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
3182 NL_SET_ERR_MSG_MOD(extack,
3183 "Code and type matching only with ICMP and ICMPv6");
3184 netdev_err(priv->netdev,
3185 "Code and type matching only with ICMP and ICMPv6\n");
3188 if (match.mask->code || match.mask->type) {
3189 *match_level = MLX5_MATCH_L4;
3190 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
3193 /* Currently supported only for MPLS over UDP */
3194 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
3195 !netif_is_bareudp(filter_dev)) {
3196 NL_SET_ERR_MSG_MOD(extack,
3197 "Matching on MPLS is supported only for MPLS over UDP");
3198 netdev_err(priv->netdev,
3199 "Matching on MPLS is supported only for MPLS over UDP\n");
3206 static int parse_cls_flower(struct mlx5e_priv *priv,
3207 struct mlx5e_tc_flow *flow,
3208 struct mlx5_flow_spec *spec,
3209 struct flow_cls_offload *f,
3210 struct net_device *filter_dev)
3212 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
3213 struct netlink_ext_ack *extack = f->common.extack;
3214 struct mlx5_core_dev *dev = priv->mdev;
3215 struct mlx5_eswitch *esw = dev->priv.eswitch;
3216 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3217 struct mlx5_eswitch_rep *rep;
3218 bool is_eswitch_flow;
3221 inner_match_level = MLX5_MATCH_NONE;
3222 outer_match_level = MLX5_MATCH_NONE;
3224 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
3225 &inner_match_level, &outer_match_level);
3226 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
3227 outer_match_level : inner_match_level;
3229 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
3230 if (!err && is_eswitch_flow) {
3232 if (rep->vport != MLX5_VPORT_UPLINK &&
3233 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
3234 esw->offloads.inline_mode < non_tunnel_match_level)) {
3235 NL_SET_ERR_MSG_MOD(extack,
3236 "Flow is not offloaded due to min inline setting");
3237 netdev_warn(priv->netdev,
3238 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
3239 non_tunnel_match_level, esw->offloads.inline_mode);
3244 flow->attr->inner_match_level = inner_match_level;
3245 flow->attr->outer_match_level = outer_match_level;
3251 struct mlx5_fields {
3259 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
3260 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
3261 offsetof(struct pedit_headers, field) + (off), \
3262 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
3264 /* masked values are the same and there are no rewrites that do not have a
3267 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
3268 type matchmaskx = *(type *)(matchmaskp); \
3269 type matchvalx = *(type *)(matchvalp); \
3270 type maskx = *(type *)(maskp); \
3271 type valx = *(type *)(valp); \
3273 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
3277 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
3278 void *matchmaskp, u8 bsize)
3284 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
3287 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
3290 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
3297 static struct mlx5_fields fields[] = {
3298 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
3299 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
3300 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
3301 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
3302 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
3303 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
3305 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
3306 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
3307 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
3308 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
3310 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
3311 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
3312 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
3313 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
3314 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
3315 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
3316 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
3317 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
3318 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
3319 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
3320 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
3321 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
3322 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
3323 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
3324 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
3325 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
3326 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
3327 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
3329 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
3330 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
3331 /* in linux iphdr tcp_flags is 8 bits long */
3332 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
3334 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
3335 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
3338 static unsigned long mask_to_le(unsigned long mask, int size)
3344 mask_be32 = (__force __be32)(mask);
3345 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
3346 } else if (size == 16) {
3347 mask_be32 = (__force __be32)(mask);
3348 mask_be16 = *(__be16 *)&mask_be32;
3349 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
3355 static int offload_pedit_fields(struct mlx5e_priv *priv,
3357 struct mlx5e_tc_flow_parse_attr *parse_attr,
3359 struct netlink_ext_ack *extack)
3361 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
3362 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3363 void *headers_c, *headers_v, *action, *vals_p;
3364 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
3365 struct mlx5e_tc_mod_hdr_acts *mod_acts;
3366 unsigned long mask, field_mask;
3367 int i, first, last, next_z;
3368 struct mlx5_fields *f;
3371 mod_acts = &parse_attr->mod_hdr_acts;
3372 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
3373 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
3375 set_masks = &hdrs[0].masks;
3376 add_masks = &hdrs[1].masks;
3377 set_vals = &hdrs[0].vals;
3378 add_vals = &hdrs[1].vals;
3380 for (i = 0; i < ARRAY_SIZE(fields); i++) {
3384 /* avoid seeing bits set from previous iterations */
3388 s_masks_p = (void *)set_masks + f->offset;
3389 a_masks_p = (void *)add_masks + f->offset;
3391 s_mask = *s_masks_p & f->field_mask;
3392 a_mask = *a_masks_p & f->field_mask;
3394 if (!s_mask && !a_mask) /* nothing to offload here */
3397 if (s_mask && a_mask) {
3398 NL_SET_ERR_MSG_MOD(extack,
3399 "can't set and add to the same HW field");
3400 netdev_warn(priv->netdev,
3401 "mlx5: can't set and add to the same HW field (%x)\n",
3408 void *match_mask = headers_c + f->match_offset;
3409 void *match_val = headers_v + f->match_offset;
3411 cmd = MLX5_ACTION_TYPE_SET;
3413 vals_p = (void *)set_vals + f->offset;
3414 /* don't rewrite if we have a match on the same value */
3415 if (cmp_val_mask(vals_p, s_masks_p, match_val,
3416 match_mask, f->field_bsize))
3418 /* clear to denote we consumed this field */
3419 *s_masks_p &= ~f->field_mask;
3421 cmd = MLX5_ACTION_TYPE_ADD;
3423 vals_p = (void *)add_vals + f->offset;
3424 /* add 0 is no change */
3425 if ((*(u32 *)vals_p & f->field_mask) == 0)
3427 /* clear to denote we consumed this field */
3428 *a_masks_p &= ~f->field_mask;
3433 mask = mask_to_le(mask, f->field_bsize);
3435 first = find_first_bit(&mask, f->field_bsize);
3436 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
3437 last = find_last_bit(&mask, f->field_bsize);
3438 if (first < next_z && next_z < last) {
3439 NL_SET_ERR_MSG_MOD(extack,
3440 "rewrite of few sub-fields isn't supported");
3441 netdev_warn(priv->netdev,
3442 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
3447 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
3448 if (IS_ERR(action)) {
3449 NL_SET_ERR_MSG_MOD(extack,
3450 "too many pedit actions, can't offload");
3451 mlx5_core_warn(priv->mdev,
3452 "mlx5: parsed %d pedit actions, can't do more\n",
3453 mod_acts->num_actions);
3454 return PTR_ERR(action);
3457 MLX5_SET(set_action_in, action, action_type, cmd);
3458 MLX5_SET(set_action_in, action, field, f->field);
3460 if (cmd == MLX5_ACTION_TYPE_SET) {
3463 field_mask = mask_to_le(f->field_mask, f->field_bsize);
3465 /* if field is bit sized it can start not from first bit */
3466 start = find_first_bit(&field_mask, f->field_bsize);
3468 MLX5_SET(set_action_in, action, offset, first - start);
3469 /* length is num of bits to be written, zero means length of 32 */
3470 MLX5_SET(set_action_in, action, length, (last - first + 1));
3473 if (f->field_bsize == 32)
3474 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
3475 else if (f->field_bsize == 16)
3476 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
3477 else if (f->field_bsize == 8)
3478 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
3480 ++mod_acts->num_actions;
3486 static const struct pedit_headers zero_masks = {};
3488 static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
3489 struct mlx5e_tc_flow_parse_attr *parse_attr,
3490 struct netlink_ext_ack *extack)
3492 struct pedit_headers *cmd_masks;
3495 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3496 cmd_masks = &parse_attr->hdrs[cmd].masks;
3497 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3498 NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
3499 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3500 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3501 16, 1, cmd_masks, sizeof(zero_masks), true);
3509 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3510 struct mlx5e_tc_flow_parse_attr *parse_attr,
3512 struct netlink_ext_ack *extack)
3516 err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
3518 goto out_dealloc_parsed_actions;
3520 err = verify_offload_pedit_fields(priv, parse_attr, extack);
3522 goto out_dealloc_parsed_actions;
3526 out_dealloc_parsed_actions:
3527 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3531 struct ip_ttl_word {
3537 struct ipv6_hoplimit_word {
3544 is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
3545 bool *modify_ip_header, bool *modify_tuple,
3546 struct netlink_ext_ack *extack)
3551 htype = act->mangle.htype;
3552 offset = act->mangle.offset;
3553 mask = ~act->mangle.mask;
3554 /* For IPv4 & IPv6 header check 4 byte word,
3555 * to determine that modified fields
3556 * are NOT ttl & hop_limit only.
3558 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3559 struct ip_ttl_word *ttl_word =
3560 (struct ip_ttl_word *)&mask;
3562 if (offset != offsetof(struct iphdr, ttl) ||
3563 ttl_word->protocol ||
3565 *modify_ip_header = true;
3568 if (offset >= offsetof(struct iphdr, saddr))
3569 *modify_tuple = true;
3571 if (ct_flow && *modify_tuple) {
3572 NL_SET_ERR_MSG_MOD(extack,
3573 "can't offload re-write of ipv4 address with action ct");
3576 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3577 struct ipv6_hoplimit_word *hoplimit_word =
3578 (struct ipv6_hoplimit_word *)&mask;
3580 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3581 hoplimit_word->payload_len ||
3582 hoplimit_word->nexthdr) {
3583 *modify_ip_header = true;
3586 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3587 *modify_tuple = true;
3589 if (ct_flow && *modify_tuple) {
3590 NL_SET_ERR_MSG_MOD(extack,
3591 "can't offload re-write of ipv6 address with action ct");
3594 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3595 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3596 *modify_tuple = true;
3598 NL_SET_ERR_MSG_MOD(extack,
3599 "can't offload re-write of transport header ports with action ct");
3607 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3608 bool ct_flow, struct netlink_ext_ack *extack,
3609 struct mlx5e_priv *priv,
3610 struct mlx5_flow_spec *spec)
3612 if (!modify_tuple || ct_clear)
3616 NL_SET_ERR_MSG_MOD(extack,
3617 "can't offload tuple modification with non-clear ct()");
3618 netdev_info(priv->netdev,
3619 "can't offload tuple modification with non-clear ct()");
3623 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3624 * (or after clear action), as otherwise, since the tuple is changed,
3625 * we can't restore ct state
3627 if (mlx5_tc_ct_add_no_trk_match(spec)) {
3628 NL_SET_ERR_MSG_MOD(extack,
3629 "can't offload tuple modification with ct matches and no ct(clear) action");
3630 netdev_info(priv->netdev,
3631 "can't offload tuple modification with ct matches and no ct(clear) action");
3638 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3639 struct mlx5_flow_spec *spec,
3640 struct flow_action *flow_action,
3641 u32 actions, bool ct_flow,
3643 struct netlink_ext_ack *extack)
3645 const struct flow_action_entry *act;
3646 bool modify_ip_header, modify_tuple;
3653 headers_c = mlx5e_get_match_headers_criteria(actions, spec);
3654 headers_v = mlx5e_get_match_headers_value(actions, spec);
3655 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3657 /* for non-IP we only re-write MACs, so we're okay */
3658 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3659 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3662 modify_ip_header = false;
3663 modify_tuple = false;
3664 flow_action_for_each(i, act, flow_action) {
3665 if (act->id != FLOW_ACTION_MANGLE &&
3666 act->id != FLOW_ACTION_ADD)
3669 if (!is_action_keys_supported(act, ct_flow,
3671 &modify_tuple, extack))
3675 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3679 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3680 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3681 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3682 NL_SET_ERR_MSG_MOD(extack,
3683 "can't offload re-write of non TCP/UDP");
3684 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3694 actions_match_supported_fdb(struct mlx5e_priv *priv,
3695 struct mlx5e_tc_flow_parse_attr *parse_attr,
3696 struct mlx5e_tc_flow *flow,
3697 struct netlink_ext_ack *extack)
3699 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3700 bool ct_flow, ct_clear;
3702 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3703 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3705 if (esw_attr->split_count && ct_flow &&
3706 !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
3707 /* All registers used by ct are cleared when using
3710 NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
3714 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3715 NL_SET_ERR_MSG_MOD(extack,
3716 "current firmware doesn't support split rule for port mirroring");
3717 netdev_warn_once(priv->netdev,
3718 "current firmware doesn't support split rule for port mirroring\n");
3726 actions_match_supported(struct mlx5e_priv *priv,
3727 struct flow_action *flow_action,
3729 struct mlx5e_tc_flow_parse_attr *parse_attr,
3730 struct mlx5e_tc_flow *flow,
3731 struct netlink_ext_ack *extack)
3733 bool ct_flow, ct_clear;
3735 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3736 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3738 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3739 !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
3740 actions, ct_flow, ct_clear, extack))
3743 if (mlx5e_is_eswitch_flow(flow) &&
3744 !actions_match_supported_fdb(priv, parse_attr, flow, extack))
3750 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3752 return priv->mdev == peer_priv->mdev;
3755 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3757 struct mlx5_core_dev *fmdev, *pmdev;
3758 u64 fsystem_guid, psystem_guid;
3761 pmdev = peer_priv->mdev;
3763 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3764 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3766 return (fsystem_guid == psystem_guid);
3770 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
3771 struct mlx5e_tc_flow *flow,
3772 struct mlx5_flow_attr *attr,
3773 struct netlink_ext_ack *extack)
3775 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3776 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3777 enum mlx5_flow_namespace_type ns_type;
3780 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
3781 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
3784 ns_type = mlx5e_get_flow_namespace(flow);
3786 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
3790 if (parse_attr->mod_hdr_acts.num_actions > 0)
3793 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
3794 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3795 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3797 if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
3800 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3801 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3802 attr->esw_attr->split_count = 0;
3807 static struct mlx5_flow_attr*
3808 mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
3809 enum mlx5_flow_namespace_type ns_type)
3811 struct mlx5e_tc_flow_parse_attr *parse_attr;
3812 u32 attr_sz = ns_to_attr_sz(ns_type);
3813 struct mlx5_flow_attr *attr2;
3815 attr2 = mlx5_alloc_flow_attr(ns_type);
3816 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3817 if (!attr2 || !parse_attr) {
3823 memcpy(attr2, attr, attr_sz);
3824 INIT_LIST_HEAD(&attr2->list);
3825 parse_attr->filter_dev = attr->parse_attr->filter_dev;
3828 attr2->parse_attr = parse_attr;
3829 attr2->dest_chain = 0;
3830 attr2->dest_ft = NULL;
3832 if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3833 attr2->esw_attr->out_count = 0;
3834 attr2->esw_attr->split_count = 0;
3837 attr2->branch_true = NULL;
3838 attr2->branch_false = NULL;
3839 attr2->jumping_attr = NULL;
3843 struct mlx5_flow_attr *
3844 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
3846 struct mlx5_esw_flow_attr *esw_attr;
3847 struct mlx5_flow_attr *attr;
3850 list_for_each_entry(attr, &flow->attrs, list) {
3851 esw_attr = attr->esw_attr;
3852 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
3853 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
3862 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
3864 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3865 struct mlx5_flow_attr *attr;
3867 list_for_each_entry(attr, &flow->attrs, list) {
3868 if (list_is_last(&attr->list, &flow->attrs))
3871 mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
3876 free_flow_post_acts(struct mlx5e_tc_flow *flow)
3878 struct mlx5_flow_attr *attr, *tmp;
3880 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
3881 if (list_is_last(&attr->list, &flow->attrs))
3884 mlx5_free_flow_attr(flow, attr);
3885 free_branch_attr(flow, attr->branch_true);
3886 free_branch_attr(flow, attr->branch_false);
3888 list_del(&attr->list);
3889 kvfree(attr->parse_attr);
3895 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
3897 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3898 struct mlx5_flow_attr *attr;
3901 list_for_each_entry(attr, &flow->attrs, list) {
3902 if (list_is_last(&attr->list, &flow->attrs))
3905 err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
3913 /* TC filter rule HW translation:
3915 * +---------------------+
3916 * + ft prio (tc chain) +
3917 * + original match +
3918 * +---------------------+
3920 * | if multi table action
3923 * +---------------------+
3924 * + post act ft |<----.
3925 * + match fte id | | split on multi table action
3926 * + do actions |-----'
3927 * +---------------------+
3931 * Do rest of the actions after last multi table action.
3934 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
3936 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3937 struct mlx5_flow_attr *attr, *next_attr = NULL;
3938 struct mlx5e_post_act_handle *handle;
3941 /* This is going in reverse order as needed.
3942 * The first entry is the last attribute.
3944 list_for_each_entry(attr, &flow->attrs, list) {
3946 /* Set counter action on last post act rule. */
3947 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3950 if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) {
3951 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
3956 /* Don't add post_act rule for first attr (last in the list).
3957 * It's being handled by the caller.
3959 if (list_is_last(&attr->list, &flow->attrs))
3962 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
3966 err = post_process_attr(flow, attr, true, extack);
3970 handle = mlx5e_tc_post_act_add(post_act, attr);
3971 if (IS_ERR(handle)) {
3972 err = PTR_ERR(handle);
3976 attr->post_act_handle = handle;
3978 if (attr->jumping_attr) {
3979 err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr);
3987 if (flow_flag_test(flow, SLOW))
3990 err = mlx5e_tc_offload_flow_post_acts(flow);
3998 free_flow_post_acts(flow);
4003 alloc_branch_attr(struct mlx5e_tc_flow *flow,
4004 struct mlx5e_tc_act_branch_ctrl *cond,
4005 struct mlx5_flow_attr **cond_attr,
4007 struct netlink_ext_ack *extack)
4009 struct mlx5_flow_attr *attr;
4012 *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr,
4013 mlx5e_get_flow_namespace(flow));
4019 switch (cond->act_id) {
4020 case FLOW_ACTION_DROP:
4021 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
4023 case FLOW_ACTION_ACCEPT:
4024 case FLOW_ACTION_PIPE:
4025 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4026 attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
4028 case FLOW_ACTION_JUMP:
4030 NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps");
4034 *jump_count = cond->extval;
4035 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4036 attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
4051 dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
4052 struct mlx5_flow_attr *attr, struct mlx5e_priv *priv,
4053 struct mlx5e_tc_jump_state *jump_state)
4055 if (!jump_state->jump_count)
4058 /* Single tc action can instantiate multiple offload actions (e.g. pedit)
4059 * Jump only over a tc action
4061 if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index)
4064 jump_state->last_id = act->id;
4065 jump_state->last_index = act->hw_index;
4067 /* nothing to do for intermediate actions */
4068 if (--jump_state->jump_count > 1)
4071 if (jump_state->jump_count == 1) { /* last action in the jump action list */
4073 /* create a new attribute after this action */
4074 jump_state->jump_target = true;
4076 if (tc_act->is_terminating_action) { /* the branch ends here */
4077 attr->flags |= MLX5_ATTR_FLAG_TERMINATING;
4078 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4079 } else { /* the branch continues executing the rest of the actions */
4080 struct mlx5e_post_act *post_act;
4082 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4083 post_act = get_post_action(priv);
4084 attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
4086 } else if (jump_state->jump_count == 0) { /* first attr after the jump action list */
4087 /* This is the post action for the jumping attribute (either red or green)
4088 * Use the stored jumping_attr to set the post act id on the jumping attribute
4090 attr->jumping_attr = jump_state->jumping_attr;
4095 parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
4096 struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr,
4097 struct mlx5e_tc_jump_state *jump_state,
4098 struct netlink_ext_ack *extack)
4100 struct mlx5e_tc_act_branch_ctrl cond_true, cond_false;
4101 u32 jump_count = jump_state->jump_count;
4104 if (!tc_act->get_branch_ctrl)
4107 tc_act->get_branch_ctrl(act, &cond_true, &cond_false);
4109 err = alloc_branch_attr(flow, &cond_true,
4110 &attr->branch_true, &jump_count, extack);
4115 jump_state->jumping_attr = attr->branch_true;
4117 err = alloc_branch_attr(flow, &cond_false,
4118 &attr->branch_false, &jump_count, extack);
4120 goto err_branch_false;
4122 if (jump_count && !jump_state->jumping_attr)
4123 jump_state->jumping_attr = attr->branch_false;
4125 jump_state->jump_count = jump_count;
4129 free_branch_attr(flow, attr->branch_true);
4135 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
4136 struct flow_action *flow_action)
4138 struct netlink_ext_ack *extack = parse_state->extack;
4139 struct mlx5e_tc_flow_action flow_action_reorder;
4140 struct mlx5e_tc_flow *flow = parse_state->flow;
4141 struct mlx5e_tc_jump_state jump_state = {};
4142 struct mlx5_flow_attr *attr = flow->attr;
4143 enum mlx5_flow_namespace_type ns_type;
4144 struct mlx5e_priv *priv = flow->priv;
4145 struct flow_action_entry *act, **_act;
4146 struct mlx5e_tc_act *tc_act;
4149 flow_action_reorder.num_entries = flow_action->num_entries;
4150 flow_action_reorder.entries = kcalloc(flow_action->num_entries,
4151 sizeof(flow_action), GFP_KERNEL);
4152 if (!flow_action_reorder.entries)
4155 mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
4157 ns_type = mlx5e_get_flow_namespace(flow);
4158 list_add(&attr->list, &flow->attrs);
4160 flow_action_for_each(i, _act, &flow_action_reorder) {
4161 jump_state.jump_target = false;
4163 tc_act = mlx5e_tc_act_get(act->id, ns_type);
4165 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
4170 if (!tc_act->can_offload(parse_state, act, i, attr)) {
4175 err = tc_act->parse_action(parse_state, act, priv, attr);
4179 dec_jump_count(act, tc_act, attr, priv, &jump_state);
4181 err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
4185 parse_state->actions |= attr->action;
4187 /* Split attr for multi table act if not the last act. */
4188 if (jump_state.jump_target ||
4189 (tc_act->is_multi_table_act &&
4190 tc_act->is_multi_table_act(priv, act, attr) &&
4191 i < flow_action_reorder.num_entries - 1)) {
4192 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
4196 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
4202 list_add(&attr->list, &flow->attrs);
4206 kfree(flow_action_reorder.entries);
4208 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
4210 goto out_free_post_acts;
4212 err = alloc_flow_post_acts(flow, extack);
4214 goto out_free_post_acts;
4219 kfree(flow_action_reorder.entries);
4221 free_flow_post_acts(flow);
4227 flow_action_supported(struct flow_action *flow_action,
4228 struct netlink_ext_ack *extack)
4230 if (!flow_action_has_entries(flow_action)) {
4231 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
4235 if (!flow_action_hw_stats_check(flow_action, extack,
4236 FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
4237 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4245 parse_tc_nic_actions(struct mlx5e_priv *priv,
4246 struct flow_action *flow_action,
4247 struct mlx5e_tc_flow *flow,
4248 struct netlink_ext_ack *extack)
4250 struct mlx5e_tc_act_parse_state *parse_state;
4251 struct mlx5e_tc_flow_parse_attr *parse_attr;
4252 struct mlx5_flow_attr *attr = flow->attr;
4255 err = flow_action_supported(flow_action, extack);
4259 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
4260 parse_attr = attr->parse_attr;
4261 parse_state = &parse_attr->parse_state;
4262 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4263 parse_state->ct_priv = get_ct_priv(priv);
4265 err = parse_tc_actions(parse_state, flow_action);
4269 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4273 err = verify_attr_actions(attr->action, extack);
4277 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4278 parse_attr, flow, extack))
4284 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
4285 struct net_device *peer_netdev)
4287 struct mlx5e_priv *peer_priv;
4289 peer_priv = netdev_priv(peer_netdev);
4291 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
4292 mlx5e_eswitch_vf_rep(priv->netdev) &&
4293 mlx5e_eswitch_vf_rep(peer_netdev) &&
4294 mlx5e_same_hw_devs(priv, peer_priv));
4297 static bool same_hw_reps(struct mlx5e_priv *priv,
4298 struct net_device *peer_netdev)
4300 struct mlx5e_priv *peer_priv;
4302 peer_priv = netdev_priv(peer_netdev);
4304 return mlx5e_eswitch_rep(priv->netdev) &&
4305 mlx5e_eswitch_rep(peer_netdev) &&
4306 mlx5e_same_hw_devs(priv, peer_priv);
4309 static bool is_lag_dev(struct mlx5e_priv *priv,
4310 struct net_device *peer_netdev)
4312 return ((mlx5_lag_is_sriov(priv->mdev) ||
4313 mlx5_lag_is_multipath(priv->mdev)) &&
4314 same_hw_reps(priv, peer_netdev));
4317 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
4319 if (same_hw_reps(priv, out_dev) &&
4320 MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
4321 MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
4327 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
4328 struct net_device *out_dev)
4330 if (is_merged_eswitch_vfs(priv, out_dev))
4333 if (is_multiport_eligible(priv, out_dev))
4336 if (is_lag_dev(priv, out_dev))
4339 return mlx5e_eswitch_rep(out_dev) &&
4340 same_port_devs(priv, netdev_priv(out_dev));
4343 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
4344 struct mlx5_flow_attr *attr,
4346 enum mlx5e_tc_int_port_type type,
4350 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4351 struct mlx5e_tc_int_port_priv *int_port_priv;
4352 struct mlx5e_tc_flow_parse_attr *parse_attr;
4353 struct mlx5e_tc_int_port *dest_int_port;
4356 parse_attr = attr->parse_attr;
4357 int_port_priv = mlx5e_get_int_port_priv(priv);
4359 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
4360 if (IS_ERR(dest_int_port))
4361 return PTR_ERR(dest_int_port);
4363 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
4364 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
4365 mlx5e_tc_int_port_get_metadata(dest_int_port));
4367 mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
4371 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4373 esw_attr->dest_int_port = dest_int_port;
4374 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
4376 /* Forward to root fdb for matching against the new source vport */
4377 attr->dest_chain = 0;
4383 parse_tc_fdb_actions(struct mlx5e_priv *priv,
4384 struct flow_action *flow_action,
4385 struct mlx5e_tc_flow *flow,
4386 struct netlink_ext_ack *extack)
4388 struct mlx5e_tc_act_parse_state *parse_state;
4389 struct mlx5e_tc_flow_parse_attr *parse_attr;
4390 struct mlx5_flow_attr *attr = flow->attr;
4391 struct mlx5_esw_flow_attr *esw_attr;
4392 struct net_device *filter_dev;
4395 err = flow_action_supported(flow_action, extack);
4399 esw_attr = attr->esw_attr;
4400 parse_attr = attr->parse_attr;
4401 filter_dev = parse_attr->filter_dev;
4402 parse_state = &parse_attr->parse_state;
4403 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4404 parse_state->ct_priv = get_ct_priv(priv);
4406 err = parse_tc_actions(parse_state, flow_action);
4410 /* Forward to/from internal port can only have 1 dest */
4411 if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
4412 esw_attr->out_count > 1) {
4413 NL_SET_ERR_MSG_MOD(extack,
4414 "Rules with internal port can have only one destination");
4418 /* Forward from tunnel/internal port to internal port is not supported */
4419 if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
4420 esw_attr->dest_int_port) {
4421 NL_SET_ERR_MSG_MOD(extack,
4422 "Forwarding from tunnel/internal port to internal port is not supported");
4426 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4430 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4431 parse_attr, flow, extack))
4437 static void get_flags(int flags, unsigned long *flow_flags)
4439 unsigned long __flow_flags = 0;
4441 if (flags & MLX5_TC_FLAG(INGRESS))
4442 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4443 if (flags & MLX5_TC_FLAG(EGRESS))
4444 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4446 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4447 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4448 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4449 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4450 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4451 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4453 *flow_flags = __flow_flags;
4456 static const struct rhashtable_params tc_ht_params = {
4457 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4458 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4459 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4460 .automatic_shrinking = true,
4463 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4464 unsigned long flags)
4466 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4467 struct mlx5e_rep_priv *rpriv;
4469 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4470 rpriv = priv->ppriv;
4471 return &rpriv->tc_ht;
4472 } else /* NIC offload */
4476 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4478 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4479 struct mlx5_flow_attr *attr = flow->attr;
4480 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4481 flow_flag_test(flow, INGRESS);
4482 bool act_is_encap = !!(attr->action &
4483 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4484 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4485 MLX5_DEVCOM_ESW_OFFLOADS);
4490 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4491 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4492 (is_rep_ingress || act_is_encap))
4498 struct mlx5_flow_attr *
4499 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4501 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4502 sizeof(struct mlx5_esw_flow_attr) :
4503 sizeof(struct mlx5_nic_flow_attr);
4504 struct mlx5_flow_attr *attr;
4506 attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4510 INIT_LIST_HEAD(&attr->list);
4515 mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
4517 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
4523 if (attr->post_act_handle)
4524 mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
4526 clean_encap_dests(flow->priv, flow, attr, &vf_tun);
4528 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
4529 mlx5_fc_destroy(counter_dev, attr->counter);
4531 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
4532 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
4533 if (attr->modify_hdr)
4534 mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
4539 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4540 struct flow_cls_offload *f, unsigned long flow_flags,
4541 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4542 struct mlx5e_tc_flow **__flow)
4544 struct mlx5e_tc_flow_parse_attr *parse_attr;
4545 struct mlx5_flow_attr *attr;
4546 struct mlx5e_tc_flow *flow;
4550 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4551 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4552 if (!parse_attr || !flow)
4555 flow->flags = flow_flags;
4556 flow->cookie = f->cookie;
4559 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
4565 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4566 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4567 INIT_LIST_HEAD(&flow->hairpin);
4568 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4569 INIT_LIST_HEAD(&flow->attrs);
4570 refcount_set(&flow->refcnt, 1);
4571 init_completion(&flow->init_done);
4572 init_completion(&flow->del_hw_done);
4575 *__parse_attr = parse_attr;
4586 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4587 struct mlx5e_tc_flow_parse_attr *parse_attr,
4588 struct flow_cls_offload *f)
4590 attr->parse_attr = parse_attr;
4591 attr->chain = f->common.chain_index;
4592 attr->prio = f->common.prio;
4596 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4597 struct mlx5e_priv *priv,
4598 struct mlx5e_tc_flow_parse_attr *parse_attr,
4599 struct flow_cls_offload *f,
4600 struct mlx5_eswitch_rep *in_rep,
4601 struct mlx5_core_dev *in_mdev)
4603 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4604 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4606 mlx5e_flow_attr_init(attr, parse_attr, f);
4608 esw_attr->in_rep = in_rep;
4609 esw_attr->in_mdev = in_mdev;
4611 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4612 MLX5_COUNTER_SOURCE_ESWITCH)
4613 esw_attr->counter_dev = in_mdev;
4615 esw_attr->counter_dev = priv->mdev;
4618 static struct mlx5e_tc_flow *
4619 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4620 struct flow_cls_offload *f,
4621 unsigned long flow_flags,
4622 struct net_device *filter_dev,
4623 struct mlx5_eswitch_rep *in_rep,
4624 struct mlx5_core_dev *in_mdev)
4626 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4627 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4628 struct netlink_ext_ack *extack = f->common.extack;
4629 struct mlx5e_tc_flow_parse_attr *parse_attr;
4630 struct mlx5e_tc_flow *flow;
4633 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4634 attr_size = sizeof(struct mlx5_esw_flow_attr);
4635 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4636 &parse_attr, &flow);
4640 parse_attr->filter_dev = filter_dev;
4641 mlx5e_flow_esw_attr_init(flow->attr,
4643 f, in_rep, in_mdev);
4645 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4650 /* actions validation depends on parsing the ct matches first */
4651 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4652 &flow->attr->ct_attr, extack);
4656 /* always set IP version for indirect table handling */
4657 flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
4659 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4663 if (flow->attr->lag.count) {
4664 err = mlx5_lag_add_mpesw_rule(esw->dev);
4669 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4670 complete_all(&flow->init_done);
4672 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4675 add_unready_flow(flow);
4681 if (flow->attr->lag.count)
4682 mlx5_lag_del_mpesw_rule(esw->dev);
4684 mlx5e_flow_put(priv, flow);
4686 return ERR_PTR(err);
4689 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4690 struct mlx5e_tc_flow *flow,
4691 unsigned long flow_flags)
4693 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4694 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4695 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4696 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4697 struct mlx5e_tc_flow_parse_attr *parse_attr;
4698 struct mlx5e_rep_priv *peer_urpriv;
4699 struct mlx5e_tc_flow *peer_flow;
4700 struct mlx5_core_dev *in_mdev;
4703 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4707 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4708 peer_priv = netdev_priv(peer_urpriv->netdev);
4710 /* in_mdev is assigned of which the packet originated from.
4711 * So packets redirected to uplink use the same mdev of the
4712 * original flow and packets redirected from uplink use the
4715 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4716 in_mdev = peer_priv->mdev;
4718 in_mdev = priv->mdev;
4720 parse_attr = flow->attr->parse_attr;
4721 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4722 parse_attr->filter_dev,
4723 attr->in_rep, in_mdev);
4724 if (IS_ERR(peer_flow)) {
4725 err = PTR_ERR(peer_flow);
4729 flow->peer_flow = peer_flow;
4730 flow_flag_set(flow, DUP);
4731 mutex_lock(&esw->offloads.peer_mutex);
4732 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4733 mutex_unlock(&esw->offloads.peer_mutex);
4736 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4741 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4742 struct flow_cls_offload *f,
4743 unsigned long flow_flags,
4744 struct net_device *filter_dev,
4745 struct mlx5e_tc_flow **__flow)
4747 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4748 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4749 struct mlx5_core_dev *in_mdev = priv->mdev;
4750 struct mlx5e_tc_flow *flow;
4753 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4756 return PTR_ERR(flow);
4758 if (is_peer_flow_needed(flow)) {
4759 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4761 mlx5e_tc_del_fdb_flow(priv, flow);
4775 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4776 struct flow_cls_offload *f,
4777 unsigned long flow_flags,
4778 struct net_device *filter_dev,
4779 struct mlx5e_tc_flow **__flow)
4781 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4782 struct netlink_ext_ack *extack = f->common.extack;
4783 struct mlx5e_tc_flow_parse_attr *parse_attr;
4784 struct mlx5e_tc_flow *flow;
4787 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4788 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4790 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4794 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4795 attr_size = sizeof(struct mlx5_nic_flow_attr);
4796 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4797 &parse_attr, &flow);
4801 parse_attr->filter_dev = filter_dev;
4802 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4804 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4809 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4810 &flow->attr->ct_attr, extack);
4814 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4818 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4822 flow_flag_set(flow, OFFLOADED);
4828 flow_flag_set(flow, FAILED);
4829 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
4830 mlx5e_flow_put(priv, flow);
4836 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4837 struct flow_cls_offload *f,
4838 unsigned long flags,
4839 struct net_device *filter_dev,
4840 struct mlx5e_tc_flow **flow)
4842 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4843 unsigned long flow_flags;
4846 get_flags(flags, &flow_flags);
4848 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4851 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4852 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4855 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4861 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4862 struct mlx5e_rep_priv *rpriv)
4864 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4865 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4866 * function is called from NIC mode.
4868 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4871 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4872 struct flow_cls_offload *f, unsigned long flags)
4874 struct netlink_ext_ack *extack = f->common.extack;
4875 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4876 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4877 struct mlx5e_tc_flow *flow;
4880 if (!mlx5_esw_hold(priv->mdev))
4883 mlx5_esw_get(priv->mdev);
4886 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4888 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4891 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4894 NL_SET_ERR_MSG_MOD(extack,
4895 "flow cookie already exists, ignoring");
4896 netdev_warn_once(priv->netdev,
4897 "flow cookie %lx already exists, ignoring\n",
4907 trace_mlx5e_configure_flower(f);
4908 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4912 /* Flow rule offloaded to non-uplink representor sharing tc block,
4913 * set the flow's owner dev.
4915 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4916 flow->orig_dev = dev;
4918 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4922 mlx5_esw_release(priv->mdev);
4926 mlx5e_flow_put(priv, flow);
4928 mlx5_esw_put(priv->mdev);
4929 mlx5_esw_release(priv->mdev);
4933 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4935 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4936 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4938 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4939 flow_flag_test(flow, EGRESS) == dir_egress;
4942 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4943 struct flow_cls_offload *f, unsigned long flags)
4945 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4946 struct mlx5e_tc_flow *flow;
4950 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4951 if (!flow || !same_flow_direction(flow, flags)) {
4956 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4959 if (flow_flag_test_and_set(flow, DELETED)) {
4963 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4966 trace_mlx5e_delete_flower(f);
4967 mlx5e_flow_put(priv, flow);
4969 mlx5_esw_put(priv->mdev);
4977 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4978 struct flow_cls_offload *f, unsigned long flags)
4980 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4981 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4982 struct mlx5_eswitch *peer_esw;
4983 struct mlx5e_tc_flow *flow;
4984 struct mlx5_fc *counter;
4991 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4995 return PTR_ERR(flow);
4997 if (!same_flow_direction(flow, flags)) {
5002 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
5003 counter = mlx5e_tc_get_counter(flow);
5007 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
5010 /* Under multipath it's possible for one rule to be currently
5011 * un-offloaded while the other rule is offloaded.
5013 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
5017 if (flow_flag_test(flow, DUP) &&
5018 flow_flag_test(flow->peer_flow, OFFLOADED)) {
5023 counter = mlx5e_tc_get_counter(flow->peer_flow);
5025 goto no_peer_counter;
5026 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
5029 packets += packets2;
5030 lastuse = max_t(u64, lastuse, lastuse2);
5034 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
5036 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
5037 FLOW_ACTION_HW_STATS_DELAYED);
5038 trace_mlx5e_stats_flower(f);
5040 mlx5e_flow_put(priv, flow);
5044 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
5045 struct netlink_ext_ack *extack)
5047 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5048 struct mlx5_eswitch *esw;
5053 vport_num = rpriv->rep->vport;
5054 if (vport_num >= MLX5_VPORT_ECPF) {
5055 NL_SET_ERR_MSG_MOD(extack,
5056 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
5060 esw = priv->mdev->priv.eswitch;
5061 /* rate is given in bytes/sec.
5062 * First convert to bits/sec and then round to the nearest mbit/secs.
5063 * mbit means million bits.
5064 * Moreover, if rate is non zero we choose to configure to a minimum of
5068 rate = (rate * BITS_PER_BYTE) + 500000;
5069 do_div(rate, 1000000);
5070 rate_mbps = max_t(u32, rate, 1);
5073 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
5075 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
5081 tc_matchall_police_validate(const struct flow_action *action,
5082 const struct flow_action_entry *act,
5083 struct netlink_ext_ack *extack)
5085 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
5086 NL_SET_ERR_MSG_MOD(extack,
5087 "Offload not supported when conform action is not continue");
5091 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
5092 NL_SET_ERR_MSG_MOD(extack,
5093 "Offload not supported when exceed action is not drop");
5097 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
5098 !flow_action_is_last_entry(action, act)) {
5099 NL_SET_ERR_MSG_MOD(extack,
5100 "Offload not supported when conform action is ok, but action is not last");
5104 if (act->police.peakrate_bytes_ps ||
5105 act->police.avrate || act->police.overhead) {
5106 NL_SET_ERR_MSG_MOD(extack,
5107 "Offload not supported when peakrate/avrate/overhead is configured");
5114 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
5115 struct flow_action *flow_action,
5116 struct netlink_ext_ack *extack)
5118 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5119 const struct flow_action_entry *act;
5123 if (!flow_action_has_entries(flow_action)) {
5124 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
5128 if (!flow_offload_has_one_action(flow_action)) {
5129 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
5133 if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
5134 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
5138 flow_action_for_each(i, act, flow_action) {
5140 case FLOW_ACTION_POLICE:
5141 err = tc_matchall_police_validate(flow_action, act, extack);
5145 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
5149 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
5152 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
5160 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
5161 struct tc_cls_matchall_offload *ma)
5163 struct netlink_ext_ack *extack = ma->common.extack;
5165 if (ma->common.prio != 1) {
5166 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
5170 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
5173 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
5174 struct tc_cls_matchall_offload *ma)
5176 struct netlink_ext_ack *extack = ma->common.extack;
5178 return apply_police_params(priv, 0, extack);
5181 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
5182 struct tc_cls_matchall_offload *ma)
5184 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5185 struct rtnl_link_stats64 cur_stats;
5189 cur_stats = priv->stats.vf_vport;
5190 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
5191 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
5192 rpriv->prev_vf_vport_stats = cur_stats;
5193 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
5194 FLOW_ACTION_HW_STATS_DELAYED);
5197 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
5198 struct mlx5e_priv *peer_priv)
5200 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5201 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
5202 struct mlx5e_hairpin_entry *hpe, *tmp;
5203 LIST_HEAD(init_wait_list);
5207 if (!mlx5e_same_hw_devs(priv, peer_priv))
5210 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
5212 mutex_lock(&tc->hairpin_tbl_lock);
5213 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
5214 if (refcount_inc_not_zero(&hpe->refcnt))
5215 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
5216 mutex_unlock(&tc->hairpin_tbl_lock);
5218 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
5219 wait_for_completion(&hpe->res_ready);
5220 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
5221 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
5223 mlx5e_hairpin_put(priv, hpe);
5227 static int mlx5e_tc_netdev_event(struct notifier_block *this,
5228 unsigned long event, void *ptr)
5230 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5231 struct mlx5e_priv *peer_priv;
5232 struct mlx5e_tc_table *tc;
5233 struct mlx5e_priv *priv;
5235 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
5236 event != NETDEV_UNREGISTER ||
5237 ndev->reg_state == NETREG_REGISTERED)
5240 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
5242 peer_priv = netdev_priv(ndev);
5243 if (priv == peer_priv ||
5244 !(priv->netdev->features & NETIF_F_HW_TC))
5247 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
5252 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
5254 int tc_grp_size, tc_tbl_size;
5255 u32 max_flow_counter;
5257 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
5258 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
5260 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
5262 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
5263 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
5268 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
5270 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5271 struct mlx5_flow_table **ft = &tc->miss_t;
5272 struct mlx5_flow_table_attr ft_attr = {};
5273 struct mlx5_flow_namespace *ns;
5276 ft_attr.max_fte = 1;
5277 ft_attr.autogroup.max_num_groups = 1;
5278 ft_attr.level = MLX5E_TC_MISS_LEVEL;
5280 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
5282 *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
5285 netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
5291 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
5293 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5295 mlx5_destroy_flow_table(tc->miss_t);
5298 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
5300 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5301 struct mlx5_core_dev *dev = priv->mdev;
5302 struct mapping_ctx *chains_mapping;
5303 struct mlx5_chains_attr attr = {};
5307 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
5308 mutex_init(&tc->t_lock);
5309 mutex_init(&tc->hairpin_tbl_lock);
5310 hash_init(tc->hairpin_tbl);
5313 err = rhashtable_init(&tc->ht, &tc_ht_params);
5317 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
5319 mapping_id = mlx5_query_nic_system_image_guid(dev);
5321 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
5322 sizeof(struct mlx5_mapped_obj),
5323 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
5325 if (IS_ERR(chains_mapping)) {
5326 err = PTR_ERR(chains_mapping);
5329 tc->mapping = chains_mapping;
5331 err = mlx5e_tc_nic_create_miss_table(priv);
5335 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
5336 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
5337 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
5338 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
5339 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
5340 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
5341 attr.default_ft = tc->miss_t;
5342 attr.mapping = chains_mapping;
5344 tc->chains = mlx5_chains_create(dev, &attr);
5345 if (IS_ERR(tc->chains)) {
5346 err = PTR_ERR(tc->chains);
5350 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
5351 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
5352 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
5354 mlx5e_hairpin_params_init(&tc->hairpin_params, dev);
5356 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5357 err = register_netdevice_notifier_dev_net(priv->netdev,
5361 tc->netdevice_nb.notifier_call = NULL;
5362 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5366 mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
5371 mlx5_tc_ct_clean(tc->ct);
5372 mlx5e_tc_post_act_destroy(tc->post_act);
5373 mlx5_chains_destroy(tc->chains);
5375 mlx5e_tc_nic_destroy_miss_table(priv);
5377 mapping_destroy(chains_mapping);
5379 rhashtable_destroy(&tc->ht);
5383 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5385 struct mlx5e_tc_flow *flow = ptr;
5386 struct mlx5e_priv *priv = flow->priv;
5388 mlx5e_tc_del_flow(priv, flow);
5392 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5394 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5396 debugfs_remove_recursive(tc->dfs_root);
5398 if (tc->netdevice_nb.notifier_call)
5399 unregister_netdevice_notifier_dev_net(priv->netdev,
5403 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5404 mutex_destroy(&tc->hairpin_tbl_lock);
5406 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5408 if (!IS_ERR_OR_NULL(tc->t)) {
5409 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5412 mutex_destroy(&tc->t_lock);
5414 mlx5_tc_ct_clean(tc->ct);
5415 mlx5e_tc_post_act_destroy(tc->post_act);
5416 mapping_destroy(tc->mapping);
5417 mlx5_chains_destroy(tc->chains);
5418 mlx5e_tc_nic_destroy_miss_table(priv);
5421 int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
5425 err = rhashtable_init(tc_ht, &tc_ht_params);
5429 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
5434 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
5436 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5439 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
5441 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5442 struct mlx5e_rep_priv *rpriv;
5443 struct mapping_ctx *mapping;
5444 struct mlx5_eswitch *esw;
5445 struct mlx5e_priv *priv;
5449 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5450 priv = netdev_priv(rpriv->netdev);
5451 esw = priv->mdev->priv.eswitch;
5453 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
5454 MLX5_FLOW_NAMESPACE_FDB);
5455 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5457 &esw->offloads.mod_hdr,
5458 MLX5_FLOW_NAMESPACE_FDB,
5459 uplink_priv->post_act);
5461 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
5463 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
5465 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
5467 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
5468 sizeof(struct tunnel_match_key),
5469 TUNNEL_INFO_BITS_MASK, true);
5471 if (IS_ERR(mapping)) {
5472 err = PTR_ERR(mapping);
5473 goto err_tun_mapping;
5475 uplink_priv->tunnel_mapping = mapping;
5477 /* Two last values are reserved for stack devices slow path table mark
5478 * and bridge ingress push mark.
5480 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
5481 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
5482 if (IS_ERR(mapping)) {
5483 err = PTR_ERR(mapping);
5484 goto err_enc_opts_mapping;
5486 uplink_priv->tunnel_enc_opts_mapping = mapping;
5488 uplink_priv->encap = mlx5e_tc_tun_init(priv);
5489 if (IS_ERR(uplink_priv->encap)) {
5490 err = PTR_ERR(uplink_priv->encap);
5491 goto err_register_fib_notifier;
5496 err_register_fib_notifier:
5497 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5498 err_enc_opts_mapping:
5499 mapping_destroy(uplink_priv->tunnel_mapping);
5501 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5502 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5503 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5504 netdev_warn(priv->netdev,
5505 "Failed to initialize tc (eswitch), err: %d", err);
5506 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5510 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
5512 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5514 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5515 mapping_destroy(uplink_priv->tunnel_mapping);
5517 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5518 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5519 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5520 mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
5521 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5524 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5526 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5528 return atomic_read(&tc_ht->nelems);
5531 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5533 struct mlx5e_tc_flow *flow, *tmp;
5535 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5536 __mlx5e_tc_del_fdb_peer_flow(flow);
5539 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5541 struct mlx5_rep_uplink_priv *rpriv =
5542 container_of(work, struct mlx5_rep_uplink_priv,
5543 reoffload_flows_work);
5544 struct mlx5e_tc_flow *flow, *tmp;
5546 mutex_lock(&rpriv->unready_flows_lock);
5547 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5548 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5549 unready_flow_del(flow);
5551 mutex_unlock(&rpriv->unready_flows_lock);
5554 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5555 struct flow_cls_offload *cls_flower,
5556 unsigned long flags)
5558 switch (cls_flower->command) {
5559 case FLOW_CLS_REPLACE:
5560 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5562 case FLOW_CLS_DESTROY:
5563 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5565 case FLOW_CLS_STATS:
5566 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5573 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5576 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5577 struct mlx5e_priv *priv = cb_priv;
5579 if (!priv->netdev || !netif_device_present(priv->netdev))
5582 if (mlx5e_is_uplink_rep(priv))
5583 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5585 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5588 case TC_SETUP_CLSFLOWER:
5589 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5595 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5596 struct sk_buff *skb)
5598 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5599 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5600 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5601 struct mlx5_mapped_obj mapped_obj;
5602 struct tc_skb_ext *tc_skb_ext;
5603 struct mlx5e_tc_table *tc;
5606 reg_b = be32_to_cpu(cqe->ft_metadata);
5607 tc = mlx5e_fs_get_tc(priv->fs);
5608 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5610 err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
5612 netdev_dbg(priv->netdev,
5613 "Couldn't find chain for chain tag: %d, err: %d\n",
5618 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
5619 chain = mapped_obj.chain;
5620 tc_skb_ext = tc_skb_ext_alloc(skb);
5621 if (WARN_ON(!tc_skb_ext))
5624 tc_skb_ext->chain = chain;
5626 zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5629 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5633 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5636 #endif /* CONFIG_NET_TC_SKB_EXT */