2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
43 #include <net/ipv6_stubs.h>
44 #include <net/bareudp.h>
45 #include <net/bonding.h>
46 #include <net/dst_metadata.h>
49 #include "en/tc/post_act.h"
50 #include "en/tc/act_stats.h"
52 #include "en/rep/tc.h"
53 #include "en/rep/neigh.h"
58 #include "en/tc_tun.h"
59 #include "en/mapping.h"
61 #include "en/mod_hdr.h"
62 #include "en/tc_tun_encap.h"
63 #include "en/tc/sample.h"
64 #include "en/tc/act/act.h"
65 #include "en/tc/post_meter.h"
66 #include "lib/devcom.h"
67 #include "lib/geneve.h"
68 #include "lib/fs_chains.h"
69 #include "diag/en_tc_tracepoint.h"
70 #include <asm/div64.h>
74 #define MLX5E_TC_TABLE_NUM_GROUPS 4
75 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
77 struct mlx5e_tc_table {
78 /* Protects the dynamic assignment of the t parameter
79 * which is the nic tc root table.
82 struct mlx5e_priv *priv;
83 struct mlx5_flow_table *t;
84 struct mlx5_flow_table *miss_t;
85 struct mlx5_fs_chains *chains;
86 struct mlx5e_post_act *post_act;
90 struct mod_hdr_tbl mod_hdr;
91 struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
92 DECLARE_HASHTABLE(hairpin_tbl, 8);
94 struct notifier_block netdevice_nb;
95 struct netdev_net_notifier netdevice_nn;
97 struct mlx5_tc_ct_priv *ct;
98 struct mapping_ctx *mapping;
99 struct dentry *dfs_root;
101 /* tc action stats */
102 struct mlx5e_tc_act_stats_handle *action_stats_handle;
105 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
106 [MAPPED_OBJ_TO_REG] = {
107 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
112 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
117 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
119 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
120 .soffset = MLX5_BYTE_OFF(fte_match_param,
121 misc_parameters_2.metadata_reg_c_1),
123 [ZONE_TO_REG] = zone_to_reg_ct,
124 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
125 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
126 [MARK_TO_REG] = mark_to_reg_ct,
127 [LABELS_TO_REG] = labels_to_reg_ct,
128 [FTEID_TO_REG] = fteid_to_reg_ct,
129 /* For NIC rules we store the restore metadata directly
130 * into reg_b that is passed to SW since we don't
131 * jump between steering domains.
133 [NIC_MAPPED_OBJ_TO_REG] = {
134 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
138 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
139 [PACKET_COLOR_TO_REG] = packet_color_to_reg,
142 struct mlx5e_tc_jump_state {
145 struct mlx5_flow_attr *jumping_attr;
147 enum flow_action_id last_id;
151 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
153 struct mlx5e_tc_table *tc;
155 tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
156 return tc ? tc : ERR_PTR(-ENOMEM);
159 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
164 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
169 /* To avoid false lock dependency warning set the tc_ht lock
170 * class different than the lock class of the ht being used when deleting
171 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
172 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
173 * it's different than the ht->mutex here.
175 static struct lock_class_key tc_ht_lock_key;
176 static struct lock_class_key tc_ht_wq_key;
178 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
179 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
180 static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow,
181 struct mlx5_flow_attr *attr);
184 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
185 enum mlx5e_tc_attr_to_reg type,
189 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
190 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
191 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
192 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
193 u32 max_mask = GENMASK(match_len - 1, 0);
194 __be32 curr_mask_be, curr_val_be;
195 u32 curr_mask, curr_val;
197 fmask = headers_c + soffset;
198 fval = headers_v + soffset;
200 memcpy(&curr_mask_be, fmask, 4);
201 memcpy(&curr_val_be, fval, 4);
203 curr_mask = be32_to_cpu(curr_mask_be);
204 curr_val = be32_to_cpu(curr_val_be);
206 //move to correct offset
207 WARN_ON(mask > max_mask);
210 max_mask <<= moffset;
213 curr_mask &= ~max_mask;
214 curr_val &= ~max_mask;
216 //add current to mask
220 //back to be32 and write
221 curr_mask_be = cpu_to_be32(curr_mask);
222 curr_val_be = cpu_to_be32(curr_val);
224 memcpy(fmask, &curr_mask_be, 4);
225 memcpy(fval, &curr_val_be, 4);
227 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
231 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
232 enum mlx5e_tc_attr_to_reg type,
236 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
237 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
238 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
239 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
240 u32 max_mask = GENMASK(match_len - 1, 0);
241 __be32 curr_mask_be, curr_val_be;
242 u32 curr_mask, curr_val;
244 fmask = headers_c + soffset;
245 fval = headers_v + soffset;
247 memcpy(&curr_mask_be, fmask, 4);
248 memcpy(&curr_val_be, fval, 4);
250 curr_mask = be32_to_cpu(curr_mask_be);
251 curr_val = be32_to_cpu(curr_val_be);
253 *mask = (curr_mask >> moffset) & max_mask;
254 *val = (curr_val >> moffset) & max_mask;
258 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
259 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
260 enum mlx5_flow_namespace_type ns,
261 enum mlx5e_tc_attr_to_reg type,
264 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
265 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
266 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
270 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
272 return PTR_ERR(modact);
274 /* Firmware has 5bit length field and 0 means 32bits */
278 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
279 MLX5_SET(set_action_in, modact, field, mfield);
280 MLX5_SET(set_action_in, modact, offset, moffset);
281 MLX5_SET(set_action_in, modact, length, mlen);
282 MLX5_SET(set_action_in, modact, data, data);
283 err = mod_hdr_acts->num_actions;
284 mod_hdr_acts->num_actions++;
289 static struct mlx5e_tc_act_stats_handle *
290 get_act_stats_handle(struct mlx5e_priv *priv)
292 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
293 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
294 struct mlx5_rep_uplink_priv *uplink_priv;
295 struct mlx5e_rep_priv *uplink_rpriv;
297 if (is_mdev_switchdev_mode(priv->mdev)) {
298 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
299 uplink_priv = &uplink_rpriv->uplink_priv;
301 return uplink_priv->action_stats_handle;
304 return tc->action_stats_handle;
307 struct mlx5e_tc_int_port_priv *
308 mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
310 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
311 struct mlx5_rep_uplink_priv *uplink_priv;
312 struct mlx5e_rep_priv *uplink_rpriv;
314 if (is_mdev_switchdev_mode(priv->mdev)) {
315 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
316 uplink_priv = &uplink_rpriv->uplink_priv;
318 return uplink_priv->int_port_priv;
324 struct mlx5e_flow_meters *
325 mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
327 struct mlx5_eswitch *esw = dev->priv.eswitch;
328 struct mlx5_rep_uplink_priv *uplink_priv;
329 struct mlx5e_rep_priv *uplink_rpriv;
330 struct mlx5e_priv *priv;
332 if (is_mdev_switchdev_mode(dev)) {
333 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
334 uplink_priv = &uplink_rpriv->uplink_priv;
335 priv = netdev_priv(uplink_rpriv->netdev);
336 if (!uplink_priv->flow_meters)
337 uplink_priv->flow_meters =
338 mlx5e_flow_meters_init(priv,
339 MLX5_FLOW_NAMESPACE_FDB,
340 uplink_priv->post_act);
341 if (!IS_ERR(uplink_priv->flow_meters))
342 return uplink_priv->flow_meters;
348 static struct mlx5_tc_ct_priv *
349 get_ct_priv(struct mlx5e_priv *priv)
351 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
352 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
353 struct mlx5_rep_uplink_priv *uplink_priv;
354 struct mlx5e_rep_priv *uplink_rpriv;
356 if (is_mdev_switchdev_mode(priv->mdev)) {
357 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
358 uplink_priv = &uplink_rpriv->uplink_priv;
360 return uplink_priv->ct_priv;
366 static struct mlx5e_tc_psample *
367 get_sample_priv(struct mlx5e_priv *priv)
369 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
370 struct mlx5_rep_uplink_priv *uplink_priv;
371 struct mlx5e_rep_priv *uplink_rpriv;
373 if (is_mdev_switchdev_mode(priv->mdev)) {
374 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
375 uplink_priv = &uplink_rpriv->uplink_priv;
377 return uplink_priv->tc_psample;
383 static struct mlx5e_post_act *
384 get_post_action(struct mlx5e_priv *priv)
386 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
387 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
388 struct mlx5_rep_uplink_priv *uplink_priv;
389 struct mlx5e_rep_priv *uplink_rpriv;
391 if (is_mdev_switchdev_mode(priv->mdev)) {
392 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
393 uplink_priv = &uplink_rpriv->uplink_priv;
395 return uplink_priv->post_act;
401 struct mlx5_flow_handle *
402 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
403 struct mlx5_flow_spec *spec,
404 struct mlx5_flow_attr *attr)
406 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
408 if (is_mdev_switchdev_mode(priv->mdev))
409 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
411 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
415 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
416 struct mlx5_flow_handle *rule,
417 struct mlx5_flow_attr *attr)
419 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
421 if (is_mdev_switchdev_mode(priv->mdev)) {
422 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
426 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
430 is_flow_meter_action(struct mlx5_flow_attr *attr)
432 return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
433 (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) ||
434 attr->flags & MLX5_ATTR_FLAG_MTU);
438 mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
439 struct mlx5_flow_attr *attr)
441 struct mlx5e_post_act *post_act = get_post_action(priv);
442 struct mlx5e_post_meter_priv *post_meter;
443 enum mlx5_flow_namespace_type ns_type;
444 struct mlx5e_flow_meter_handle *meter;
445 enum mlx5e_post_meter_type type;
447 meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
449 mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
450 return PTR_ERR(meter);
453 ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
454 type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE;
455 post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
457 meter->act_counter, meter->drop_counter,
458 attr->branch_true, attr->branch_false);
459 if (IS_ERR(post_meter)) {
460 mlx5_core_err(priv->mdev, "Failed to init post meter\n");
464 attr->meter_attr.meter = meter;
465 attr->meter_attr.post_meter = post_meter;
466 attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
467 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
472 mlx5e_tc_meter_put(meter);
473 return PTR_ERR(post_meter);
477 mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
479 mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter);
480 mlx5e_tc_meter_put(attr->meter_attr.meter);
483 struct mlx5_flow_handle *
484 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
485 struct mlx5_flow_spec *spec,
486 struct mlx5_flow_attr *attr)
488 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
491 if (!is_mdev_switchdev_mode(priv->mdev))
492 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
494 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
495 return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
497 if (is_flow_meter_action(attr)) {
498 err = mlx5e_tc_add_flow_meter(priv, attr);
503 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
507 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
508 struct mlx5_flow_handle *rule,
509 struct mlx5_flow_attr *attr)
511 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
513 if (!is_mdev_switchdev_mode(priv->mdev)) {
514 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
518 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
519 mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
523 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
525 if (attr->meter_attr.meter)
526 mlx5e_tc_del_flow_meter(esw, attr);
530 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
531 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
532 enum mlx5_flow_namespace_type ns,
533 enum mlx5e_tc_attr_to_reg type,
536 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
538 return ret < 0 ? ret : 0;
541 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
542 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
543 enum mlx5e_tc_attr_to_reg type,
544 int act_id, u32 data)
546 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
547 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
548 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
551 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
553 /* Firmware has 5bit length field and 0 means 32bits */
557 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
558 MLX5_SET(set_action_in, modact, field, mfield);
559 MLX5_SET(set_action_in, modact, offset, moffset);
560 MLX5_SET(set_action_in, modact, length, mlen);
561 MLX5_SET(set_action_in, modact, data, data);
564 struct mlx5e_hairpin {
565 struct mlx5_hairpin *pair;
567 struct mlx5_core_dev *func_mdev;
568 struct mlx5e_priv *func_priv;
570 struct mlx5e_tir direct_tir;
574 struct mlx5e_rqt indir_rqt;
575 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
576 struct mlx5_ttc_table *ttc;
579 struct mlx5e_hairpin_entry {
580 /* a node of a hash table which keeps all the hairpin entries */
581 struct hlist_node hairpin_hlist;
583 /* protects flows list */
584 spinlock_t flows_lock;
585 /* flows sharing the same hairpin */
586 struct list_head flows;
587 /* hpe's that were not fully initialized when dead peer update event
588 * function traversed them.
590 struct list_head dead_peer_wait_list;
594 struct mlx5e_hairpin *hp;
596 struct completion res_ready;
599 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
600 struct mlx5e_tc_flow *flow);
602 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
604 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
605 return ERR_PTR(-EINVAL);
609 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
611 if (refcount_dec_and_test(&flow->refcnt)) {
612 mlx5e_tc_del_flow(priv, flow);
613 kfree_rcu(flow, rcu_head);
617 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
619 return flow_flag_test(flow, ESWITCH);
622 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
624 return flow_flag_test(flow, FT);
627 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
629 return flow_flag_test(flow, OFFLOADED);
632 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
634 return mlx5e_is_eswitch_flow(flow) ?
635 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
638 static struct mlx5_core_dev *
639 get_flow_counter_dev(struct mlx5e_tc_flow *flow)
641 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
644 static struct mod_hdr_tbl *
645 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
647 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
648 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
650 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
651 &esw->offloads.mod_hdr :
655 int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
656 struct mlx5e_tc_flow *flow,
657 struct mlx5_flow_attr *attr)
659 struct mlx5e_mod_hdr_handle *mh;
661 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
662 mlx5e_get_flow_namespace(flow),
663 &attr->parse_attr->mod_hdr_acts);
667 WARN_ON(attr->modify_hdr);
668 attr->modify_hdr = mlx5e_mod_hdr_get(mh);
674 void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
675 struct mlx5e_tc_flow *flow,
676 struct mlx5_flow_attr *attr)
678 /* flow wasn't fully initialized */
682 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
688 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
690 struct mlx5_core_dev *mdev;
691 struct net_device *netdev;
692 struct mlx5e_priv *priv;
694 netdev = dev_get_by_index(net, ifindex);
696 return ERR_PTR(-ENODEV);
698 priv = netdev_priv(netdev);
702 /* Mirred tc action holds a refcount on the ifindex net_device (see
703 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
704 * after dev_put(netdev), while we're in the context of adding a tc flow.
706 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
707 * stored in a hairpin object, which exists until all flows, that refer to it, get
710 * On the other hand, after a hairpin object has been created, the peer net_device may
711 * be removed/unbound while there are still some hairpin flows that are using it. This
712 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
713 * NETDEV_UNREGISTER event of the peer net_device.
718 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
720 struct mlx5e_tir_builder *builder;
723 builder = mlx5e_tir_builder_alloc(false);
727 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
731 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
732 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
737 mlx5e_tir_builder_free(builder);
741 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
746 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
748 mlx5e_tir_destroy(&hp->direct_tir);
749 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
752 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
754 struct mlx5e_priv *priv = hp->func_priv;
755 struct mlx5_core_dev *mdev = priv->mdev;
756 struct mlx5e_rss_params_indir *indir;
759 indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
763 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
764 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
765 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
772 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
774 struct mlx5e_priv *priv = hp->func_priv;
775 struct mlx5e_rss_params_hash rss_hash;
776 enum mlx5_traffic_types tt, max_tt;
777 struct mlx5e_tir_builder *builder;
780 builder = mlx5e_tir_builder_alloc(false);
784 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
786 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
787 struct mlx5e_rss_params_traffic_type rss_tt;
789 rss_tt = mlx5e_rss_get_default_tt_config(tt);
791 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
792 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
794 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
796 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
798 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
799 goto err_destroy_tirs;
802 mlx5e_tir_builder_clear(builder);
806 mlx5e_tir_builder_free(builder);
811 for (tt = 0; tt < max_tt; tt++)
812 mlx5e_tir_destroy(&hp->indir_tir[tt]);
817 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
821 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
822 mlx5e_tir_destroy(&hp->indir_tir[tt]);
825 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
826 struct ttc_params *ttc_params)
828 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
831 memset(ttc_params, 0, sizeof(*ttc_params));
833 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
834 MLX5_FLOW_NAMESPACE_KERNEL);
835 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
836 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
837 ttc_params->dests[tt].tir_num =
839 mlx5e_tir_get_tirn(&hp->direct_tir) :
840 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
843 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
844 ft_attr->prio = MLX5E_TC_PRIO;
847 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
849 struct mlx5e_priv *priv = hp->func_priv;
850 struct ttc_params ttc_params;
851 struct mlx5_ttc_table *ttc;
854 err = mlx5e_hairpin_create_indirect_rqt(hp);
858 err = mlx5e_hairpin_create_indirect_tirs(hp);
860 goto err_create_indirect_tirs;
862 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
863 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
864 if (IS_ERR(hp->ttc)) {
865 err = PTR_ERR(hp->ttc);
866 goto err_create_ttc_table;
869 ttc = mlx5e_fs_get_ttc(priv->fs, false);
870 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
872 mlx5_get_ttc_flow_table(ttc)->id);
876 err_create_ttc_table:
877 mlx5e_hairpin_destroy_indirect_tirs(hp);
878 err_create_indirect_tirs:
879 mlx5e_rqt_destroy(&hp->indir_rqt);
884 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
886 mlx5_destroy_ttc_table(hp->ttc);
887 mlx5e_hairpin_destroy_indirect_tirs(hp);
888 mlx5e_rqt_destroy(&hp->indir_rqt);
891 static struct mlx5e_hairpin *
892 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
895 struct mlx5_core_dev *func_mdev, *peer_mdev;
896 struct mlx5e_hairpin *hp;
897 struct mlx5_hairpin *pair;
900 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
902 return ERR_PTR(-ENOMEM);
904 func_mdev = priv->mdev;
905 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
906 if (IS_ERR(peer_mdev)) {
907 err = PTR_ERR(peer_mdev);
908 goto create_pair_err;
911 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
914 goto create_pair_err;
917 hp->func_mdev = func_mdev;
918 hp->func_priv = priv;
919 hp->num_channels = params->num_channels;
920 hp->log_num_packets = params->log_num_packets;
922 err = mlx5e_hairpin_create_transport(hp);
924 goto create_transport_err;
926 if (hp->num_channels > 1) {
927 err = mlx5e_hairpin_rss_init(hp);
935 mlx5e_hairpin_destroy_transport(hp);
936 create_transport_err:
937 mlx5_core_hairpin_destroy(hp->pair);
943 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
945 if (hp->num_channels > 1)
946 mlx5e_hairpin_rss_cleanup(hp);
947 mlx5e_hairpin_destroy_transport(hp);
948 mlx5_core_hairpin_destroy(hp->pair);
952 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
954 return (peer_vhca_id << 16 | prio);
957 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
958 u16 peer_vhca_id, u8 prio)
960 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
961 struct mlx5e_hairpin_entry *hpe;
962 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
964 hash_for_each_possible(tc->hairpin_tbl, hpe,
965 hairpin_hlist, hash_key) {
966 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
967 refcount_inc(&hpe->refcnt);
975 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
976 struct mlx5e_hairpin_entry *hpe)
978 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
979 /* no more hairpin flows for us, release the hairpin pair */
980 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
982 hash_del(&hpe->hairpin_hlist);
983 mutex_unlock(&tc->hairpin_tbl_lock);
985 if (!IS_ERR_OR_NULL(hpe->hp)) {
986 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
987 dev_name(hpe->hp->pair->peer_mdev->device));
989 mlx5e_hairpin_destroy(hpe->hp);
992 WARN_ON(!list_empty(&hpe->flows));
996 #define UNKNOWN_MATCH_PRIO 8
998 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
999 struct mlx5_flow_spec *spec, u8 *match_prio,
1000 struct netlink_ext_ack *extack)
1002 void *headers_c, *headers_v;
1003 u8 prio_val, prio_mask = 0;
1006 #ifdef CONFIG_MLX5_CORE_EN_DCB
1007 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
1008 NL_SET_ERR_MSG_MOD(extack,
1009 "only PCP trust state supported for hairpin");
1013 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
1014 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1016 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
1018 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
1019 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
1022 if (!vlan_present || !prio_mask) {
1023 prio_val = UNKNOWN_MATCH_PRIO;
1024 } else if (prio_mask != 0x7) {
1025 NL_SET_ERR_MSG_MOD(extack,
1026 "masked priority match not supported for hairpin");
1030 *match_prio = prio_val;
1034 static int debugfs_hairpin_num_active_get(void *data, u64 *val)
1036 struct mlx5e_tc_table *tc = data;
1037 struct mlx5e_hairpin_entry *hpe;
1041 mutex_lock(&tc->hairpin_tbl_lock);
1042 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
1044 mutex_unlock(&tc->hairpin_tbl_lock);
1050 DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active,
1051 debugfs_hairpin_num_active_get, NULL, "%llu\n");
1053 static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv)
1056 struct mlx5e_tc_table *tc = file->private;
1057 struct mlx5e_hairpin_entry *hpe;
1060 mutex_lock(&tc->hairpin_tbl_lock);
1061 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
1063 "Hairpin peer_vhca_id %u prio %u refcnt %u num_channels %u num_packets %lu\n",
1064 hpe->peer_vhca_id, hpe->prio,
1065 refcount_read(&hpe->refcnt), hpe->hp->num_channels,
1066 BIT(hpe->hp->log_num_packets));
1067 mutex_unlock(&tc->hairpin_tbl_lock);
1071 DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump);
1073 static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
1074 struct dentry *dfs_root)
1076 if (IS_ERR_OR_NULL(dfs_root))
1079 tc->dfs_root = debugfs_create_dir("tc", dfs_root);
1081 debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc,
1082 &fops_hairpin_num_active);
1083 debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc,
1084 &debugfs_hairpin_table_dump_fops);
1087 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
1088 struct mlx5e_tc_flow *flow,
1089 struct mlx5e_tc_flow_parse_attr *parse_attr,
1090 struct netlink_ext_ack *extack)
1092 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1093 struct devlink *devlink = priv_to_devlink(priv->mdev);
1094 int peer_ifindex = parse_attr->mirred_ifindex[0];
1095 union devlink_param_value val = {};
1096 struct mlx5_hairpin_params params;
1097 struct mlx5_core_dev *peer_mdev;
1098 struct mlx5e_hairpin_entry *hpe;
1099 struct mlx5e_hairpin *hp;
1104 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
1105 if (IS_ERR(peer_mdev)) {
1106 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
1107 return PTR_ERR(peer_mdev);
1110 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
1111 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
1115 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
1116 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
1121 mutex_lock(&tc->hairpin_tbl_lock);
1122 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
1124 mutex_unlock(&tc->hairpin_tbl_lock);
1125 wait_for_completion(&hpe->res_ready);
1127 if (IS_ERR(hpe->hp)) {
1134 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
1136 mutex_unlock(&tc->hairpin_tbl_lock);
1140 spin_lock_init(&hpe->flows_lock);
1141 INIT_LIST_HEAD(&hpe->flows);
1142 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
1143 hpe->peer_vhca_id = peer_id;
1144 hpe->prio = match_prio;
1145 refcount_set(&hpe->refcnt, 1);
1146 init_completion(&hpe->res_ready);
1148 hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
1149 hash_hairpin_info(peer_id, match_prio));
1150 mutex_unlock(&tc->hairpin_tbl_lock);
1152 err = devl_param_driverinit_value_get(
1153 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, &val);
1159 params.log_num_packets = ilog2(val.vu32);
1160 params.log_data_size =
1162 params.log_num_packets +
1163 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev),
1164 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
1165 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
1167 params.q_counter = priv->q_counter;
1168 err = devl_param_driverinit_value_get(
1169 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val);
1175 params.num_channels = val.vu32;
1177 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
1179 complete_all(&hpe->res_ready);
1185 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
1186 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
1187 dev_name(hp->pair->peer_mdev->device),
1188 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
1191 if (hpe->hp->num_channels > 1) {
1192 flow_flag_set(flow, HAIRPIN_RSS);
1193 flow->attr->nic_attr->hairpin_ft =
1194 mlx5_get_ttc_flow_table(hpe->hp->ttc);
1196 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
1200 spin_lock(&hpe->flows_lock);
1201 list_add(&flow->hairpin, &hpe->flows);
1202 spin_unlock(&hpe->flows_lock);
1207 mlx5e_hairpin_put(priv, hpe);
1211 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
1212 struct mlx5e_tc_flow *flow)
1214 /* flow wasn't fully initialized */
1218 spin_lock(&flow->hpe->flows_lock);
1219 list_del(&flow->hairpin);
1220 spin_unlock(&flow->hpe->flows_lock);
1222 mlx5e_hairpin_put(priv, flow->hpe);
1226 struct mlx5_flow_handle *
1227 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
1228 struct mlx5_flow_spec *spec,
1229 struct mlx5_flow_attr *attr)
1231 struct mlx5_flow_context *flow_context = &spec->flow_context;
1232 struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
1233 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1234 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
1235 struct mlx5_flow_destination dest[2] = {};
1236 struct mlx5_fs_chains *nic_chains;
1237 struct mlx5_flow_act flow_act = {
1238 .action = attr->action,
1239 .flags = FLOW_ACT_NO_APPEND,
1241 struct mlx5_flow_handle *rule;
1242 struct mlx5_flow_table *ft;
1245 nic_chains = mlx5e_nic_chains(tc);
1246 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1247 flow_context->flow_tag = nic_attr->flow_tag;
1249 if (attr->dest_ft) {
1250 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1251 dest[dest_ix].ft = attr->dest_ft;
1253 } else if (nic_attr->hairpin_ft) {
1254 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1255 dest[dest_ix].ft = nic_attr->hairpin_ft;
1257 } else if (nic_attr->hairpin_tirn) {
1258 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1259 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
1261 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1262 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1263 if (attr->dest_chain) {
1264 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
1265 attr->dest_chain, 1,
1267 if (IS_ERR(dest[dest_ix].ft))
1268 return ERR_CAST(dest[dest_ix].ft);
1270 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
1275 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1276 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
1277 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1279 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1280 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1281 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
1285 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1286 flow_act.modify_hdr = attr->modify_hdr;
1288 mutex_lock(&tc->t_lock);
1289 if (IS_ERR_OR_NULL(tc->t)) {
1290 /* Create the root table here if doesn't exist yet */
1292 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1294 if (IS_ERR(tc->t)) {
1295 mutex_unlock(&tc->t_lock);
1296 netdev_err(priv->netdev,
1297 "Failed to create tc offload table\n");
1298 rule = ERR_CAST(tc->t);
1302 mutex_unlock(&tc->t_lock);
1304 if (attr->chain || attr->prio)
1305 ft = mlx5_chains_get_table(nic_chains,
1306 attr->chain, attr->prio,
1312 rule = ERR_CAST(ft);
1316 if (attr->outer_match_level != MLX5_MATCH_NONE)
1317 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1319 rule = mlx5_add_flow_rules(ft, spec,
1320 &flow_act, dest, dest_ix);
1327 if (attr->chain || attr->prio)
1328 mlx5_chains_put_table(nic_chains,
1329 attr->chain, attr->prio,
1332 if (attr->dest_chain)
1333 mlx5_chains_put_table(nic_chains,
1334 attr->dest_chain, 1,
1337 return ERR_CAST(rule);
1341 alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
1342 struct mlx5_flow_attr *attr)
1345 struct mlx5_fc *counter;
1347 counter = mlx5_fc_create(counter_dev, true);
1348 if (IS_ERR(counter))
1349 return PTR_ERR(counter);
1351 attr->counter = counter;
1356 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1357 struct mlx5e_tc_flow *flow,
1358 struct netlink_ext_ack *extack)
1360 struct mlx5e_tc_flow_parse_attr *parse_attr;
1361 struct mlx5_flow_attr *attr = flow->attr;
1362 struct mlx5_core_dev *dev = priv->mdev;
1365 parse_attr = attr->parse_attr;
1367 if (flow_flag_test(flow, HAIRPIN)) {
1368 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1373 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1374 err = alloc_flow_attr_counter(dev, attr);
1379 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1380 err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
1385 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, attr);
1386 return PTR_ERR_OR_ZERO(flow->rule[0]);
1389 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1390 struct mlx5_flow_handle *rule,
1391 struct mlx5_flow_attr *attr)
1393 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1394 struct mlx5_fs_chains *nic_chains;
1396 nic_chains = mlx5e_nic_chains(tc);
1397 mlx5_del_flow_rules(rule);
1399 if (attr->chain || attr->prio)
1400 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1403 if (attr->dest_chain)
1404 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1408 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1409 struct mlx5e_tc_flow *flow)
1411 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1412 struct mlx5_flow_attr *attr = flow->attr;
1414 flow_flag_clear(flow, OFFLOADED);
1416 if (!IS_ERR_OR_NULL(flow->rule[0]))
1417 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1419 /* Remove root table if no rules are left to avoid
1420 * extra steering hops.
1422 mutex_lock(&tc->t_lock);
1423 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1424 !IS_ERR_OR_NULL(tc->t)) {
1425 mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
1428 mutex_unlock(&tc->t_lock);
1430 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1431 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1432 mlx5e_tc_detach_mod_hdr(priv, flow, attr);
1435 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1436 mlx5_fc_destroy(priv->mdev, attr->counter);
1438 if (flow_flag_test(flow, HAIRPIN))
1439 mlx5e_hairpin_flow_del(priv, flow);
1441 free_flow_post_acts(flow);
1443 kvfree(attr->parse_attr);
1447 struct mlx5_flow_handle *
1448 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1449 struct mlx5e_tc_flow *flow,
1450 struct mlx5_flow_spec *spec,
1451 struct mlx5_flow_attr *attr)
1453 struct mlx5_flow_handle *rule;
1455 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1456 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1458 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
1463 if (attr->esw_attr->split_count) {
1464 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1465 if (IS_ERR(flow->rule[1]))
1472 mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
1473 return flow->rule[1];
1476 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1477 struct mlx5e_tc_flow *flow,
1478 struct mlx5_flow_attr *attr)
1480 flow_flag_clear(flow, OFFLOADED);
1482 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1483 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1485 if (attr->esw_attr->split_count)
1486 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1488 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
1491 struct mlx5_flow_handle *
1492 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1493 struct mlx5e_tc_flow *flow,
1494 struct mlx5_flow_spec *spec)
1496 struct mlx5e_tc_mod_hdr_acts mod_acts = {};
1497 struct mlx5e_mod_hdr_handle *mh = NULL;
1498 struct mlx5_flow_attr *slow_attr;
1499 struct mlx5_flow_handle *rule;
1500 bool fwd_and_modify_cap;
1501 u32 chain_mapping = 0;
1504 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1506 return ERR_PTR(-ENOMEM);
1508 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1509 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1510 slow_attr->esw_attr->split_count = 0;
1511 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1513 fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
1514 if (!fwd_and_modify_cap)
1517 err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
1521 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
1522 MAPPED_OBJ_TO_REG, chain_mapping);
1526 mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
1527 MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
1533 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1534 slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
1537 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1539 err = PTR_ERR(rule);
1543 flow->attr->slow_mh = mh;
1544 flow->chain_mapping = chain_mapping;
1545 flow_flag_set(flow, SLOW);
1547 mlx5e_mod_hdr_dealloc(&mod_acts);
1553 if (fwd_and_modify_cap)
1554 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
1557 if (fwd_and_modify_cap)
1558 mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
1560 mlx5e_mod_hdr_dealloc(&mod_acts);
1562 return ERR_PTR(err);
1565 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1566 struct mlx5e_tc_flow *flow)
1568 struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh;
1569 struct mlx5_flow_attr *slow_attr;
1571 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1573 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1577 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1578 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1579 slow_attr->esw_attr->split_count = 0;
1580 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1582 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1583 slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh);
1585 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1587 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh);
1588 mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
1589 flow->chain_mapping = 0;
1590 flow->attr->slow_mh = NULL;
1592 flow_flag_clear(flow, SLOW);
1596 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1599 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1600 struct list_head *unready_flows)
1602 flow_flag_set(flow, NOT_READY);
1603 list_add_tail(&flow->unready, unready_flows);
1606 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1609 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1611 list_del(&flow->unready);
1612 flow_flag_clear(flow, NOT_READY);
1615 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1617 struct mlx5_rep_uplink_priv *uplink_priv;
1618 struct mlx5e_rep_priv *rpriv;
1619 struct mlx5_eswitch *esw;
1621 esw = flow->priv->mdev->priv.eswitch;
1622 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1623 uplink_priv = &rpriv->uplink_priv;
1625 mutex_lock(&uplink_priv->unready_flows_lock);
1626 unready_flow_add(flow, &uplink_priv->unready_flows);
1627 mutex_unlock(&uplink_priv->unready_flows_lock);
1630 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1632 struct mlx5_rep_uplink_priv *uplink_priv;
1633 struct mlx5e_rep_priv *rpriv;
1634 struct mlx5_eswitch *esw;
1636 esw = flow->priv->mdev->priv.eswitch;
1637 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1638 uplink_priv = &rpriv->uplink_priv;
1640 mutex_lock(&uplink_priv->unready_flows_lock);
1641 unready_flow_del(flow);
1642 mutex_unlock(&uplink_priv->unready_flows_lock);
1645 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1647 struct mlx5_core_dev *out_mdev, *route_mdev;
1648 struct mlx5e_priv *out_priv, *route_priv;
1650 out_priv = netdev_priv(out_dev);
1651 out_mdev = out_priv->mdev;
1652 route_priv = netdev_priv(route_dev);
1653 route_mdev = route_priv->mdev;
1655 if (out_mdev->coredev_type != MLX5_COREDEV_PF)
1658 if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
1659 route_mdev->coredev_type != MLX5_COREDEV_SF)
1662 return mlx5e_same_hw_devs(out_priv, route_priv);
1665 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1667 struct mlx5e_priv *out_priv, *route_priv;
1668 struct mlx5_core_dev *route_mdev;
1669 struct mlx5_eswitch *esw;
1672 out_priv = netdev_priv(out_dev);
1673 esw = out_priv->mdev->priv.eswitch;
1674 route_priv = netdev_priv(route_dev);
1675 route_mdev = route_priv->mdev;
1677 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1678 if (mlx5_lag_is_active(out_priv->mdev)) {
1679 struct mlx5_devcom *devcom;
1682 /* In lag case we may get devices from different eswitch instances.
1683 * If we failed to get vport num, it means, mostly, that we on the wrong
1686 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1691 devcom = out_priv->mdev->priv.devcom;
1692 esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1693 err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
1699 return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1703 set_encap_dests(struct mlx5e_priv *priv,
1704 struct mlx5e_tc_flow *flow,
1705 struct mlx5_flow_attr *attr,
1706 struct netlink_ext_ack *extack,
1709 struct mlx5e_tc_flow_parse_attr *parse_attr;
1710 struct mlx5_esw_flow_attr *esw_attr;
1711 struct net_device *encap_dev = NULL;
1712 struct mlx5e_rep_priv *rpriv;
1713 struct mlx5e_priv *out_priv;
1717 if (!mlx5e_is_eswitch_flow(flow))
1720 parse_attr = attr->parse_attr;
1721 esw_attr = attr->esw_attr;
1724 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1725 struct net_device *out_dev;
1728 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1731 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1732 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1734 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1738 err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
1739 extack, &encap_dev);
1744 if (esw_attr->dests[out_index].flags &
1745 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1746 !esw_attr->dest_int_port)
1749 out_priv = netdev_priv(encap_dev);
1750 rpriv = out_priv->ppriv;
1751 esw_attr->dests[out_index].rep = rpriv->rep;
1752 esw_attr->dests[out_index].mdev = out_priv->mdev;
1755 if (*vf_tun && esw_attr->out_count > 1) {
1756 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1766 clean_encap_dests(struct mlx5e_priv *priv,
1767 struct mlx5e_tc_flow *flow,
1768 struct mlx5_flow_attr *attr)
1770 struct mlx5_esw_flow_attr *esw_attr;
1773 if (!mlx5e_is_eswitch_flow(flow))
1776 esw_attr = attr->esw_attr;
1778 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1779 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1782 mlx5e_detach_encap(priv, flow, attr, out_index);
1783 kfree(attr->parse_attr->tun_info[out_index]);
1788 verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
1791 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
1792 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
1797 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
1798 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
1802 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1803 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
1804 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
1812 post_process_attr(struct mlx5e_tc_flow *flow,
1813 struct mlx5_flow_attr *attr,
1814 struct netlink_ext_ack *extack)
1819 err = verify_attr_actions(attr->action, extack);
1823 err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
1827 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1828 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
1833 if (attr->branch_true &&
1834 attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1835 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true);
1840 if (attr->branch_false &&
1841 attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1842 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false);
1847 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1848 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
1858 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1859 struct mlx5e_tc_flow *flow,
1860 struct netlink_ext_ack *extack)
1862 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1863 struct mlx5e_tc_flow_parse_attr *parse_attr;
1864 struct mlx5_flow_attr *attr = flow->attr;
1865 struct mlx5_esw_flow_attr *esw_attr;
1866 u32 max_prio, max_chain;
1869 parse_attr = attr->parse_attr;
1870 esw_attr = attr->esw_attr;
1872 /* We check chain range only for tc flows.
1873 * For ft flows, we checked attr->chain was originally 0 and set it to
1874 * FDB_FT_CHAIN which is outside tc range.
1875 * See mlx5e_rep_setup_ft_cb().
1877 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1878 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1879 NL_SET_ERR_MSG_MOD(extack,
1880 "Requested chain is out of supported range");
1885 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1886 if (attr->prio > max_prio) {
1887 NL_SET_ERR_MSG_MOD(extack,
1888 "Requested priority is out of supported range");
1893 if (flow_flag_test(flow, TUN_RX)) {
1894 err = mlx5e_attach_decap_route(priv, flow);
1898 if (!attr->chain && esw_attr->int_port &&
1899 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1900 /* If decap route device is internal port, change the
1901 * source vport value in reg_c0 back to uplink just in
1902 * case the rule performs goto chain > 0. If we have a miss
1903 * on chain > 0 we want the metadata regs to hold the
1904 * chain id so SW will resume handling of this packet
1905 * from the proper chain.
1907 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
1908 esw_attr->in_rep->vport);
1910 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
1911 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
1916 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1920 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1921 err = mlx5e_attach_decap(priv, flow, extack);
1926 if (netif_is_ovs_master(parse_attr->filter_dev)) {
1927 struct mlx5e_tc_int_port *int_port;
1930 NL_SET_ERR_MSG_MOD(extack,
1931 "Internal port rule is only supported on chain 0");
1936 if (attr->dest_chain) {
1937 NL_SET_ERR_MSG_MOD(extack,
1938 "Internal port rule offload doesn't support goto action");
1943 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1944 parse_attr->filter_dev->ifindex,
1945 flow_flag_test(flow, EGRESS) ?
1946 MLX5E_TC_INT_PORT_EGRESS :
1947 MLX5E_TC_INT_PORT_INGRESS);
1948 if (IS_ERR(int_port)) {
1949 err = PTR_ERR(int_port);
1953 esw_attr->int_port = int_port;
1956 err = post_process_attr(flow, attr, extack);
1960 err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow);
1964 /* we get here if one of the following takes place:
1965 * (1) there's no error
1966 * (2) there's an encap action and we don't have valid neigh
1968 if (flow_flag_test(flow, SLOW))
1969 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1971 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1973 if (IS_ERR(flow->rule[0])) {
1974 err = PTR_ERR(flow->rule[0]);
1977 flow_flag_set(flow, OFFLOADED);
1982 flow_flag_set(flow, FAILED);
1986 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1988 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1989 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1992 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1994 geneve_tlv_option_0_data);
1996 return !!geneve_tlv_opt_0_data;
1999 static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
2004 mlx5_free_flow_attr_actions(flow, attr);
2005 kvfree(attr->parse_attr);
2009 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
2010 struct mlx5e_tc_flow *flow)
2012 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2013 struct mlx5_flow_attr *attr = flow->attr;
2014 struct mlx5_esw_flow_attr *esw_attr;
2016 esw_attr = attr->esw_attr;
2017 mlx5e_put_flow_tunnel_id(flow);
2019 if (flow_flag_test(flow, NOT_READY))
2020 remove_unready_flow(flow);
2022 if (mlx5e_is_offloaded_flow(flow)) {
2023 if (flow_flag_test(flow, SLOW))
2024 mlx5e_tc_unoffload_from_slow_path(esw, flow);
2026 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
2028 complete_all(&flow->del_hw_done);
2030 if (mlx5_flow_has_geneve_opt(flow))
2031 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
2033 if (flow->decap_route)
2034 mlx5e_detach_decap_route(priv, flow);
2036 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
2038 if (esw_attr->int_port)
2039 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
2041 if (esw_attr->dest_int_port)
2042 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
2044 if (flow_flag_test(flow, L3_TO_L2_DECAP))
2045 mlx5e_detach_decap(priv, flow);
2047 mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
2049 free_flow_post_acts(flow);
2050 mlx5_free_flow_attr_actions(flow, attr);
2052 kvfree(attr->esw_attr->rx_tun_attr);
2053 kvfree(attr->parse_attr);
2057 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
2059 struct mlx5_flow_attr *attr;
2061 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
2062 return attr->counter;
2065 /* Iterate over tmp_list of flows attached to flow_list head. */
2066 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
2068 struct mlx5e_tc_flow *flow, *tmp;
2070 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
2071 mlx5e_flow_put(priv, flow);
2074 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
2076 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
2078 if (!flow_flag_test(flow, ESWITCH) ||
2079 !flow_flag_test(flow, DUP))
2082 mutex_lock(&esw->offloads.peer_mutex);
2083 list_del(&flow->peer);
2084 mutex_unlock(&esw->offloads.peer_mutex);
2086 flow_flag_clear(flow, DUP);
2088 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
2089 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
2090 kfree(flow->peer_flow);
2093 flow->peer_flow = NULL;
2096 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
2098 struct mlx5_core_dev *dev = flow->priv->mdev;
2099 struct mlx5_devcom *devcom = dev->priv.devcom;
2100 struct mlx5_eswitch *peer_esw;
2102 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2106 __mlx5e_tc_del_fdb_peer_flow(flow);
2107 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2110 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
2111 struct mlx5e_tc_flow *flow)
2113 if (mlx5e_is_eswitch_flow(flow)) {
2114 mlx5e_tc_del_fdb_peer_flow(flow);
2115 mlx5e_tc_del_fdb_flow(priv, flow);
2117 mlx5e_tc_del_nic_flow(priv, flow);
2121 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
2123 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2124 struct flow_action *flow_action = &rule->action;
2125 const struct flow_action_entry *act;
2131 flow_action_for_each(i, act, flow_action) {
2133 case FLOW_ACTION_GOTO:
2135 case FLOW_ACTION_SAMPLE:
2146 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
2147 struct flow_dissector_key_enc_opts *opts,
2148 struct netlink_ext_ack *extack,
2151 struct geneve_opt *opt;
2156 while (opts->len > off) {
2157 opt = (struct geneve_opt *)&opts->data[off];
2159 if (!(*dont_care) || opt->opt_class || opt->type ||
2160 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
2163 if (opt->opt_class != htons(U16_MAX) ||
2164 opt->type != U8_MAX) {
2165 NL_SET_ERR_MSG_MOD(extack,
2166 "Partial match of tunnel options in chain > 0 isn't supported");
2167 netdev_warn(priv->netdev,
2168 "Partial match of tunnel options in chain > 0 isn't supported");
2173 off += sizeof(struct geneve_opt) + opt->length * 4;
2179 #define COPY_DISSECTOR(rule, diss_key, dst)\
2181 struct flow_rule *__rule = (rule);\
2182 typeof(dst) __dst = dst;\
2185 skb_flow_dissector_target(__rule->match.dissector,\
2187 __rule->match.key),\
2191 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
2192 struct mlx5e_tc_flow *flow,
2193 struct flow_cls_offload *f,
2194 struct net_device *filter_dev)
2196 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2197 struct netlink_ext_ack *extack = f->common.extack;
2198 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
2199 struct flow_match_enc_opts enc_opts_match;
2200 struct tunnel_match_enc_opts tun_enc_opts;
2201 struct mlx5_rep_uplink_priv *uplink_priv;
2202 struct mlx5_flow_attr *attr = flow->attr;
2203 struct mlx5e_rep_priv *uplink_rpriv;
2204 struct tunnel_match_key tunnel_key;
2205 bool enc_opts_is_dont_care = true;
2206 u32 tun_id, enc_opts_id = 0;
2207 struct mlx5_eswitch *esw;
2211 esw = priv->mdev->priv.eswitch;
2212 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2213 uplink_priv = &uplink_rpriv->uplink_priv;
2215 memset(&tunnel_key, 0, sizeof(tunnel_key));
2216 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2217 &tunnel_key.enc_control);
2218 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
2219 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
2220 &tunnel_key.enc_ipv4);
2222 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
2223 &tunnel_key.enc_ipv6);
2224 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
2225 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
2226 &tunnel_key.enc_tp);
2227 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
2228 &tunnel_key.enc_key_id);
2229 tunnel_key.filter_ifindex = filter_dev->ifindex;
2231 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
2235 flow_rule_match_enc_opts(rule, &enc_opts_match);
2236 err = enc_opts_is_dont_care_or_full_match(priv,
2237 enc_opts_match.mask,
2239 &enc_opts_is_dont_care);
2243 if (!enc_opts_is_dont_care) {
2244 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2245 memcpy(&tun_enc_opts.key, enc_opts_match.key,
2246 sizeof(*enc_opts_match.key));
2247 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2248 sizeof(*enc_opts_match.mask));
2250 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2251 &tun_enc_opts, &enc_opts_id);
2256 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2257 mask = enc_opts_id ? TUNNEL_ID_MASK :
2258 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2261 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2262 TUNNEL_TO_REG, value, mask);
2264 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2265 err = mlx5e_tc_match_to_reg_set(priv->mdev,
2266 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2267 TUNNEL_TO_REG, value);
2271 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2274 flow->attr->tunnel_id = value;
2279 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2282 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2286 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2288 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
2289 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
2290 struct mlx5_rep_uplink_priv *uplink_priv;
2291 struct mlx5e_rep_priv *uplink_rpriv;
2292 struct mlx5_eswitch *esw;
2294 esw = flow->priv->mdev->priv.eswitch;
2295 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2296 uplink_priv = &uplink_rpriv->uplink_priv;
2299 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2301 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2305 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2306 struct flow_match_basic *match, bool outer,
2307 void *headers_c, void *headers_v)
2309 bool ip_version_cap;
2311 ip_version_cap = outer ?
2312 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2313 ft_field_support.outer_ip_version) :
2314 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2315 ft_field_support.inner_ip_version);
2317 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2318 (match->key->n_proto == htons(ETH_P_IP) ||
2319 match->key->n_proto == htons(ETH_P_IPV6))) {
2320 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2321 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2322 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2324 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2325 ntohs(match->mask->n_proto));
2326 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2327 ntohs(match->key->n_proto));
2331 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
2338 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2340 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2342 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
2343 /* Return ip_version converted from ethertype anyway */
2345 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2346 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
2348 else if (ethertype == ETH_P_IPV6)
2354 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
2355 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
2356 * +---------+----------------------------------------+
2357 * |Arriving | Arriving Outer Header |
2358 * | Inner +---------+---------+---------+----------+
2359 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
2360 * +---------+---------+---------+---------+----------+
2361 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
2362 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
2363 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
2364 * | CE | CE | CE | CE | CE |
2365 * +---------+---------+---------+---------+----------+
2367 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
2368 * the inner ip_ecn value before hardware decap action.
2370 * Cells marked are changed from original inner packet ip_ecn value during decap, and
2371 * so matching those values on inner ip_ecn before decap will fail.
2373 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
2374 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
2375 * and such we can drop the inner ip_ecn=CE match.
2378 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
2379 struct flow_cls_offload *f,
2380 bool *match_inner_ecn)
2382 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
2383 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2384 struct netlink_ext_ack *extack = f->common.extack;
2385 struct flow_match_ip match;
2387 *match_inner_ecn = true;
2389 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
2390 flow_rule_match_enc_ip(rule, &match);
2391 outer_ecn_key = match.key->tos & INET_ECN_MASK;
2392 outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
2395 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2396 flow_rule_match_ip(rule, &match);
2397 inner_ecn_key = match.key->tos & INET_ECN_MASK;
2398 inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
2401 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
2402 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
2403 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
2407 if (!outer_ecn_mask) {
2408 if (!inner_ecn_mask)
2411 NL_SET_ERR_MSG_MOD(extack,
2412 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2413 netdev_warn(priv->netdev,
2414 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2418 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
2419 NL_SET_ERR_MSG_MOD(extack,
2420 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2421 netdev_warn(priv->netdev,
2422 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2426 if (!inner_ecn_mask)
2429 /* Both inner and outer have full mask on ecn */
2431 if (outer_ecn_key == INET_ECN_ECT_1) {
2432 /* inner ecn might change by DECAP action */
2434 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
2435 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
2439 if (outer_ecn_key != INET_ECN_CE)
2442 if (inner_ecn_key != INET_ECN_CE) {
2443 /* Can't happen in software, as packet ecn will be changed to CE after decap */
2444 NL_SET_ERR_MSG_MOD(extack,
2445 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2446 netdev_warn(priv->netdev,
2447 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2451 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
2452 * drop match on inner ecn
2454 *match_inner_ecn = false;
2459 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2460 struct mlx5e_tc_flow *flow,
2461 struct mlx5_flow_spec *spec,
2462 struct flow_cls_offload *f,
2463 struct net_device *filter_dev,
2467 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
2468 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2469 struct netlink_ext_ack *extack = f->common.extack;
2470 bool needs_mapping, sets_mapping;
2473 if (!mlx5e_is_eswitch_flow(flow)) {
2474 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
2478 needs_mapping = !!flow->attr->chain;
2479 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2480 *match_inner = !needs_mapping;
2482 if ((needs_mapping || sets_mapping) &&
2483 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2484 NL_SET_ERR_MSG_MOD(extack,
2485 "Chains on tunnel devices isn't supported without register loopback support");
2486 netdev_warn(priv->netdev,
2487 "Chains on tunnel devices isn't supported without register loopback support");
2491 if (!flow->attr->chain) {
2492 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2495 NL_SET_ERR_MSG_MOD(extack,
2496 "Failed to parse tunnel attributes");
2497 netdev_warn(priv->netdev,
2498 "Failed to parse tunnel attributes");
2502 /* With mpls over udp we decapsulate using packet reformat
2505 if (!netif_is_bareudp(filter_dev))
2506 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2507 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2510 } else if (tunnel) {
2511 struct mlx5_flow_spec *tmp_spec;
2513 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2515 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
2516 netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
2519 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2521 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2524 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2525 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2528 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2534 if (!needs_mapping && !sets_mapping)
2537 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2540 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2542 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2546 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2548 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2552 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2554 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2558 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2560 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2564 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
2566 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2567 get_match_inner_headers_value(spec) :
2568 get_match_outer_headers_value(spec);
2571 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
2573 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2574 get_match_inner_headers_criteria(spec) :
2575 get_match_outer_headers_criteria(spec);
2578 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2579 struct flow_cls_offload *f)
2581 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2582 struct netlink_ext_ack *extack = f->common.extack;
2583 struct net_device *ingress_dev;
2584 struct flow_match_meta match;
2586 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2589 flow_rule_match_meta(rule, &match);
2590 if (!match.mask->ingress_ifindex)
2593 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2594 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2598 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2599 match.key->ingress_ifindex);
2601 NL_SET_ERR_MSG_MOD(extack,
2602 "Can't find the ingress port to match on");
2606 if (ingress_dev != filter_dev) {
2607 NL_SET_ERR_MSG_MOD(extack,
2608 "Can't match on the ingress filter port");
2615 static bool skip_key_basic(struct net_device *filter_dev,
2616 struct flow_cls_offload *f)
2618 /* When doing mpls over udp decap, the user needs to provide
2619 * MPLS_UC as the protocol in order to be able to match on mpls
2620 * label fields. However, the actual ethertype is IP so we want to
2621 * avoid matching on this, otherwise we'll fail the match.
2623 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2629 static int __parse_cls_flower(struct mlx5e_priv *priv,
2630 struct mlx5e_tc_flow *flow,
2631 struct mlx5_flow_spec *spec,
2632 struct flow_cls_offload *f,
2633 struct net_device *filter_dev,
2634 u8 *inner_match_level, u8 *outer_match_level)
2636 struct netlink_ext_ack *extack = f->common.extack;
2637 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2639 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2641 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2643 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2645 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2647 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2649 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2650 struct flow_dissector *dissector = rule->match.dissector;
2651 enum fs_flow_table_type fs_type;
2652 bool match_inner_ecn = true;
2658 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2659 match_level = outer_match_level;
2661 if (dissector->used_keys &
2662 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2663 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2664 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2665 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2666 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2667 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2668 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2669 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2670 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2671 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2672 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2673 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2674 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2675 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2676 BIT(FLOW_DISSECTOR_KEY_TCP) |
2677 BIT(FLOW_DISSECTOR_KEY_IP) |
2678 BIT(FLOW_DISSECTOR_KEY_CT) |
2679 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2680 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2681 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2682 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2683 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2684 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2685 dissector->used_keys);
2689 if (mlx5e_get_tc_tun(filter_dev)) {
2690 bool match_inner = false;
2692 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2693 outer_match_level, &match_inner);
2698 /* header pointers should point to the inner headers
2699 * if the packet was decapsulated already.
2700 * outer headers are set by parse_tunnel_attr.
2702 match_level = inner_match_level;
2703 headers_c = get_match_inner_headers_criteria(spec);
2704 headers_v = get_match_inner_headers_value(spec);
2707 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
2712 err = mlx5e_flower_parse_meta(filter_dev, f);
2716 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2717 !skip_key_basic(filter_dev, f)) {
2718 struct flow_match_basic match;
2720 flow_rule_match_basic(rule, &match);
2721 mlx5e_tc_set_ethertype(priv->mdev, &match,
2722 match_level == outer_match_level,
2723 headers_c, headers_v);
2725 if (match.mask->n_proto)
2726 *match_level = MLX5_MATCH_L2;
2728 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2729 is_vlan_dev(filter_dev)) {
2730 struct flow_dissector_key_vlan filter_dev_mask;
2731 struct flow_dissector_key_vlan filter_dev_key;
2732 struct flow_match_vlan match;
2734 if (is_vlan_dev(filter_dev)) {
2735 match.key = &filter_dev_key;
2736 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2737 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2738 match.key->vlan_priority = 0;
2739 match.mask = &filter_dev_mask;
2740 memset(match.mask, 0xff, sizeof(*match.mask));
2741 match.mask->vlan_priority = 0;
2743 flow_rule_match_vlan(rule, &match);
2745 if (match.mask->vlan_id ||
2746 match.mask->vlan_priority ||
2747 match.mask->vlan_tpid) {
2748 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2749 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2751 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2754 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2756 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2760 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2761 match.mask->vlan_id);
2762 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2763 match.key->vlan_id);
2765 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2766 match.mask->vlan_priority);
2767 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2768 match.key->vlan_priority);
2770 *match_level = MLX5_MATCH_L2;
2772 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
2773 match.mask->vlan_eth_type &&
2774 MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
2775 ft_field_support.outer_second_vid,
2777 MLX5_SET(fte_match_set_misc, misc_c,
2778 outer_second_cvlan_tag, 1);
2779 spec->match_criteria_enable |=
2780 MLX5_MATCH_MISC_PARAMETERS;
2783 } else if (*match_level != MLX5_MATCH_NONE) {
2784 /* cvlan_tag enabled in match criteria and
2785 * disabled in match value means both S & C tags
2786 * don't exist (untagged of both)
2788 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2789 *match_level = MLX5_MATCH_L2;
2792 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2793 struct flow_match_vlan match;
2795 flow_rule_match_cvlan(rule, &match);
2796 if (match.mask->vlan_id ||
2797 match.mask->vlan_priority ||
2798 match.mask->vlan_tpid) {
2799 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2801 NL_SET_ERR_MSG_MOD(extack,
2802 "Matching on CVLAN is not supported");
2806 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2807 MLX5_SET(fte_match_set_misc, misc_c,
2808 outer_second_svlan_tag, 1);
2809 MLX5_SET(fte_match_set_misc, misc_v,
2810 outer_second_svlan_tag, 1);
2812 MLX5_SET(fte_match_set_misc, misc_c,
2813 outer_second_cvlan_tag, 1);
2814 MLX5_SET(fte_match_set_misc, misc_v,
2815 outer_second_cvlan_tag, 1);
2818 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2819 match.mask->vlan_id);
2820 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2821 match.key->vlan_id);
2822 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2823 match.mask->vlan_priority);
2824 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2825 match.key->vlan_priority);
2827 *match_level = MLX5_MATCH_L2;
2828 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2832 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2833 struct flow_match_eth_addrs match;
2835 flow_rule_match_eth_addrs(rule, &match);
2836 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2839 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2843 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2846 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2850 if (!is_zero_ether_addr(match.mask->src) ||
2851 !is_zero_ether_addr(match.mask->dst))
2852 *match_level = MLX5_MATCH_L2;
2855 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2856 struct flow_match_control match;
2858 flow_rule_match_control(rule, &match);
2859 addr_type = match.key->addr_type;
2861 /* the HW doesn't support frag first/later */
2862 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
2863 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
2867 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2868 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2869 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2870 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2872 /* the HW doesn't need L3 inline to match on frag=no */
2873 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2874 *match_level = MLX5_MATCH_L2;
2875 /* *** L2 attributes parsing up to here *** */
2877 *match_level = MLX5_MATCH_L3;
2881 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2882 struct flow_match_basic match;
2884 flow_rule_match_basic(rule, &match);
2885 ip_proto = match.key->ip_proto;
2887 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2888 match.mask->ip_proto);
2889 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2890 match.key->ip_proto);
2892 if (match.mask->ip_proto)
2893 *match_level = MLX5_MATCH_L3;
2896 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2897 struct flow_match_ipv4_addrs match;
2899 flow_rule_match_ipv4_addrs(rule, &match);
2900 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2901 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2902 &match.mask->src, sizeof(match.mask->src));
2903 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2904 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2905 &match.key->src, sizeof(match.key->src));
2906 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2907 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2908 &match.mask->dst, sizeof(match.mask->dst));
2909 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2910 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2911 &match.key->dst, sizeof(match.key->dst));
2913 if (match.mask->src || match.mask->dst)
2914 *match_level = MLX5_MATCH_L3;
2917 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2918 struct flow_match_ipv6_addrs match;
2920 flow_rule_match_ipv6_addrs(rule, &match);
2921 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2922 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2923 &match.mask->src, sizeof(match.mask->src));
2924 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2925 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2926 &match.key->src, sizeof(match.key->src));
2928 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2929 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2930 &match.mask->dst, sizeof(match.mask->dst));
2931 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2932 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2933 &match.key->dst, sizeof(match.key->dst));
2935 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2936 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2937 *match_level = MLX5_MATCH_L3;
2940 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2941 struct flow_match_ip match;
2943 flow_rule_match_ip(rule, &match);
2944 if (match_inner_ecn) {
2945 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2946 match.mask->tos & 0x3);
2947 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2948 match.key->tos & 0x3);
2951 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2952 match.mask->tos >> 2);
2953 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2954 match.key->tos >> 2);
2956 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2958 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2961 if (match.mask->ttl &&
2962 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2963 ft_field_support.outer_ipv4_ttl)) {
2964 NL_SET_ERR_MSG_MOD(extack,
2965 "Matching on TTL is not supported");
2969 if (match.mask->tos || match.mask->ttl)
2970 *match_level = MLX5_MATCH_L3;
2973 /* *** L3 attributes parsing up to here *** */
2975 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2976 struct flow_match_ports match;
2978 flow_rule_match_ports(rule, &match);
2981 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2982 tcp_sport, ntohs(match.mask->src));
2983 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2984 tcp_sport, ntohs(match.key->src));
2986 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2987 tcp_dport, ntohs(match.mask->dst));
2988 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2989 tcp_dport, ntohs(match.key->dst));
2993 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2994 udp_sport, ntohs(match.mask->src));
2995 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2996 udp_sport, ntohs(match.key->src));
2998 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2999 udp_dport, ntohs(match.mask->dst));
3000 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3001 udp_dport, ntohs(match.key->dst));
3004 NL_SET_ERR_MSG_MOD(extack,
3005 "Only UDP and TCP transports are supported for L4 matching");
3006 netdev_err(priv->netdev,
3007 "Only UDP and TCP transport are supported\n");
3011 if (match.mask->src || match.mask->dst)
3012 *match_level = MLX5_MATCH_L4;
3015 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
3016 struct flow_match_tcp match;
3018 flow_rule_match_tcp(rule, &match);
3019 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
3020 ntohs(match.mask->flags));
3021 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3022 ntohs(match.key->flags));
3024 if (match.mask->flags)
3025 *match_level = MLX5_MATCH_L4;
3027 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
3028 struct flow_match_icmp match;
3030 flow_rule_match_icmp(rule, &match);
3033 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
3034 MLX5_FLEX_PROTO_ICMP)) {
3035 NL_SET_ERR_MSG_MOD(extack,
3036 "Match on Flex protocols for ICMP is not supported");
3039 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
3041 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
3043 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
3045 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
3048 case IPPROTO_ICMPV6:
3049 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
3050 MLX5_FLEX_PROTO_ICMPV6)) {
3051 NL_SET_ERR_MSG_MOD(extack,
3052 "Match on Flex protocols for ICMPV6 is not supported");
3055 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
3057 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
3059 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
3061 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
3065 NL_SET_ERR_MSG_MOD(extack,
3066 "Code and type matching only with ICMP and ICMPv6");
3067 netdev_err(priv->netdev,
3068 "Code and type matching only with ICMP and ICMPv6\n");
3071 if (match.mask->code || match.mask->type) {
3072 *match_level = MLX5_MATCH_L4;
3073 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
3076 /* Currently supported only for MPLS over UDP */
3077 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
3078 !netif_is_bareudp(filter_dev)) {
3079 NL_SET_ERR_MSG_MOD(extack,
3080 "Matching on MPLS is supported only for MPLS over UDP");
3081 netdev_err(priv->netdev,
3082 "Matching on MPLS is supported only for MPLS over UDP\n");
3089 static int parse_cls_flower(struct mlx5e_priv *priv,
3090 struct mlx5e_tc_flow *flow,
3091 struct mlx5_flow_spec *spec,
3092 struct flow_cls_offload *f,
3093 struct net_device *filter_dev)
3095 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
3096 struct netlink_ext_ack *extack = f->common.extack;
3097 struct mlx5_core_dev *dev = priv->mdev;
3098 struct mlx5_eswitch *esw = dev->priv.eswitch;
3099 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3100 struct mlx5_eswitch_rep *rep;
3101 bool is_eswitch_flow;
3104 inner_match_level = MLX5_MATCH_NONE;
3105 outer_match_level = MLX5_MATCH_NONE;
3107 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
3108 &inner_match_level, &outer_match_level);
3109 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
3110 outer_match_level : inner_match_level;
3112 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
3113 if (!err && is_eswitch_flow) {
3115 if (rep->vport != MLX5_VPORT_UPLINK &&
3116 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
3117 esw->offloads.inline_mode < non_tunnel_match_level)) {
3118 NL_SET_ERR_MSG_MOD(extack,
3119 "Flow is not offloaded due to min inline setting");
3120 netdev_warn(priv->netdev,
3121 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
3122 non_tunnel_match_level, esw->offloads.inline_mode);
3127 flow->attr->inner_match_level = inner_match_level;
3128 flow->attr->outer_match_level = outer_match_level;
3134 struct mlx5_fields {
3142 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
3143 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
3144 offsetof(struct pedit_headers, field) + (off), \
3145 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
3147 /* masked values are the same and there are no rewrites that do not have a
3150 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
3151 type matchmaskx = *(type *)(matchmaskp); \
3152 type matchvalx = *(type *)(matchvalp); \
3153 type maskx = *(type *)(maskp); \
3154 type valx = *(type *)(valp); \
3156 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
3160 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
3161 void *matchmaskp, u8 bsize)
3167 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
3170 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
3173 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
3180 static struct mlx5_fields fields[] = {
3181 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
3182 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
3183 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
3184 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
3185 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
3186 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
3188 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
3189 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
3190 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
3191 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
3193 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
3194 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
3195 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
3196 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
3197 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
3198 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
3199 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
3200 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
3201 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
3202 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
3203 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
3204 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
3205 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
3206 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
3207 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
3208 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
3209 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
3210 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
3212 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
3213 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
3214 /* in linux iphdr tcp_flags is 8 bits long */
3215 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
3217 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
3218 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
3221 static unsigned long mask_to_le(unsigned long mask, int size)
3227 mask_be32 = (__force __be32)(mask);
3228 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
3229 } else if (size == 16) {
3230 mask_be32 = (__force __be32)(mask);
3231 mask_be16 = *(__be16 *)&mask_be32;
3232 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
3238 static int offload_pedit_fields(struct mlx5e_priv *priv,
3240 struct mlx5e_tc_flow_parse_attr *parse_attr,
3242 struct netlink_ext_ack *extack)
3244 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
3245 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3246 void *headers_c, *headers_v, *action, *vals_p;
3247 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
3248 struct mlx5e_tc_mod_hdr_acts *mod_acts;
3249 unsigned long mask, field_mask;
3250 int i, first, last, next_z;
3251 struct mlx5_fields *f;
3254 mod_acts = &parse_attr->mod_hdr_acts;
3255 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
3256 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
3258 set_masks = &hdrs[0].masks;
3259 add_masks = &hdrs[1].masks;
3260 set_vals = &hdrs[0].vals;
3261 add_vals = &hdrs[1].vals;
3263 for (i = 0; i < ARRAY_SIZE(fields); i++) {
3267 /* avoid seeing bits set from previous iterations */
3271 s_masks_p = (void *)set_masks + f->offset;
3272 a_masks_p = (void *)add_masks + f->offset;
3274 s_mask = *s_masks_p & f->field_mask;
3275 a_mask = *a_masks_p & f->field_mask;
3277 if (!s_mask && !a_mask) /* nothing to offload here */
3280 if (s_mask && a_mask) {
3281 NL_SET_ERR_MSG_MOD(extack,
3282 "can't set and add to the same HW field");
3283 netdev_warn(priv->netdev,
3284 "mlx5: can't set and add to the same HW field (%x)\n",
3291 void *match_mask = headers_c + f->match_offset;
3292 void *match_val = headers_v + f->match_offset;
3294 cmd = MLX5_ACTION_TYPE_SET;
3296 vals_p = (void *)set_vals + f->offset;
3297 /* don't rewrite if we have a match on the same value */
3298 if (cmp_val_mask(vals_p, s_masks_p, match_val,
3299 match_mask, f->field_bsize))
3301 /* clear to denote we consumed this field */
3302 *s_masks_p &= ~f->field_mask;
3304 cmd = MLX5_ACTION_TYPE_ADD;
3306 vals_p = (void *)add_vals + f->offset;
3307 /* add 0 is no change */
3308 if ((*(u32 *)vals_p & f->field_mask) == 0)
3310 /* clear to denote we consumed this field */
3311 *a_masks_p &= ~f->field_mask;
3316 mask = mask_to_le(mask, f->field_bsize);
3318 first = find_first_bit(&mask, f->field_bsize);
3319 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
3320 last = find_last_bit(&mask, f->field_bsize);
3321 if (first < next_z && next_z < last) {
3322 NL_SET_ERR_MSG_MOD(extack,
3323 "rewrite of few sub-fields isn't supported");
3324 netdev_warn(priv->netdev,
3325 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
3330 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
3331 if (IS_ERR(action)) {
3332 NL_SET_ERR_MSG_MOD(extack,
3333 "too many pedit actions, can't offload");
3334 mlx5_core_warn(priv->mdev,
3335 "mlx5: parsed %d pedit actions, can't do more\n",
3336 mod_acts->num_actions);
3337 return PTR_ERR(action);
3340 MLX5_SET(set_action_in, action, action_type, cmd);
3341 MLX5_SET(set_action_in, action, field, f->field);
3343 if (cmd == MLX5_ACTION_TYPE_SET) {
3346 field_mask = mask_to_le(f->field_mask, f->field_bsize);
3348 /* if field is bit sized it can start not from first bit */
3349 start = find_first_bit(&field_mask, f->field_bsize);
3351 MLX5_SET(set_action_in, action, offset, first - start);
3352 /* length is num of bits to be written, zero means length of 32 */
3353 MLX5_SET(set_action_in, action, length, (last - first + 1));
3356 if (f->field_bsize == 32)
3357 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
3358 else if (f->field_bsize == 16)
3359 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
3360 else if (f->field_bsize == 8)
3361 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
3363 ++mod_acts->num_actions;
3369 static const struct pedit_headers zero_masks = {};
3371 static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
3372 struct mlx5e_tc_flow_parse_attr *parse_attr,
3373 struct netlink_ext_ack *extack)
3375 struct pedit_headers *cmd_masks;
3378 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3379 cmd_masks = &parse_attr->hdrs[cmd].masks;
3380 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3381 NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
3382 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3383 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3384 16, 1, cmd_masks, sizeof(zero_masks), true);
3392 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3393 struct mlx5e_tc_flow_parse_attr *parse_attr,
3395 struct netlink_ext_ack *extack)
3399 err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
3401 goto out_dealloc_parsed_actions;
3403 err = verify_offload_pedit_fields(priv, parse_attr, extack);
3405 goto out_dealloc_parsed_actions;
3409 out_dealloc_parsed_actions:
3410 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3414 struct ip_ttl_word {
3420 struct ipv6_hoplimit_word {
3427 is_flow_action_modify_ip_header(struct flow_action *flow_action)
3429 const struct flow_action_entry *act;
3434 /* For IPv4 & IPv6 header check 4 byte word,
3435 * to determine that modified fields
3436 * are NOT ttl & hop_limit only.
3438 flow_action_for_each(i, act, flow_action) {
3439 if (act->id != FLOW_ACTION_MANGLE &&
3440 act->id != FLOW_ACTION_ADD)
3443 htype = act->mangle.htype;
3444 offset = act->mangle.offset;
3445 mask = ~act->mangle.mask;
3447 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3448 struct ip_ttl_word *ttl_word =
3449 (struct ip_ttl_word *)&mask;
3451 if (offset != offsetof(struct iphdr, ttl) ||
3452 ttl_word->protocol ||
3455 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3456 struct ipv6_hoplimit_word *hoplimit_word =
3457 (struct ipv6_hoplimit_word *)&mask;
3459 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3460 hoplimit_word->payload_len ||
3461 hoplimit_word->nexthdr)
3469 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3470 struct mlx5_flow_spec *spec,
3471 struct flow_action *flow_action,
3473 struct netlink_ext_ack *extack)
3475 bool modify_ip_header;
3481 headers_c = mlx5e_get_match_headers_criteria(actions, spec);
3482 headers_v = mlx5e_get_match_headers_value(actions, spec);
3483 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3485 /* for non-IP we only re-write MACs, so we're okay */
3486 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3487 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3490 modify_ip_header = is_flow_action_modify_ip_header(flow_action);
3491 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3492 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3493 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3494 NL_SET_ERR_MSG_MOD(extack,
3495 "can't offload re-write of non TCP/UDP");
3496 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3506 actions_match_supported_fdb(struct mlx5e_priv *priv,
3507 struct mlx5e_tc_flow *flow,
3508 struct netlink_ext_ack *extack)
3510 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3512 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3513 NL_SET_ERR_MSG_MOD(extack,
3514 "current firmware doesn't support split rule for port mirroring");
3515 netdev_warn_once(priv->netdev,
3516 "current firmware doesn't support split rule for port mirroring\n");
3524 actions_match_supported(struct mlx5e_priv *priv,
3525 struct flow_action *flow_action,
3527 struct mlx5e_tc_flow_parse_attr *parse_attr,
3528 struct mlx5e_tc_flow *flow,
3529 struct netlink_ext_ack *extack)
3531 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3532 !modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions,
3536 if (mlx5e_is_eswitch_flow(flow) &&
3537 !actions_match_supported_fdb(priv, flow, extack))
3543 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3545 return priv->mdev == peer_priv->mdev;
3548 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3550 struct mlx5_core_dev *fmdev, *pmdev;
3551 u64 fsystem_guid, psystem_guid;
3554 pmdev = peer_priv->mdev;
3556 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3557 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3559 return (fsystem_guid == psystem_guid);
3563 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
3564 struct mlx5e_tc_flow *flow,
3565 struct mlx5_flow_attr *attr,
3566 struct netlink_ext_ack *extack)
3568 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3569 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3570 enum mlx5_flow_namespace_type ns_type;
3573 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
3574 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
3577 ns_type = mlx5e_get_flow_namespace(flow);
3579 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
3583 if (parse_attr->mod_hdr_acts.num_actions > 0)
3586 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
3587 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3588 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3590 if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
3593 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3594 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3595 attr->esw_attr->split_count = 0;
3600 static struct mlx5_flow_attr*
3601 mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
3602 enum mlx5_flow_namespace_type ns_type)
3604 struct mlx5e_tc_flow_parse_attr *parse_attr;
3605 u32 attr_sz = ns_to_attr_sz(ns_type);
3606 struct mlx5_flow_attr *attr2;
3608 attr2 = mlx5_alloc_flow_attr(ns_type);
3609 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3610 if (!attr2 || !parse_attr) {
3616 memcpy(attr2, attr, attr_sz);
3617 INIT_LIST_HEAD(&attr2->list);
3618 parse_attr->filter_dev = attr->parse_attr->filter_dev;
3620 attr2->counter = NULL;
3621 attr2->tc_act_cookies_count = 0;
3623 attr2->parse_attr = parse_attr;
3624 attr2->dest_chain = 0;
3625 attr2->dest_ft = NULL;
3626 attr2->act_id_restore_rule = NULL;
3627 memset(&attr2->ct_attr, 0, sizeof(attr2->ct_attr));
3629 if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3630 attr2->esw_attr->out_count = 0;
3631 attr2->esw_attr->split_count = 0;
3634 attr2->branch_true = NULL;
3635 attr2->branch_false = NULL;
3636 attr2->jumping_attr = NULL;
3640 struct mlx5_flow_attr *
3641 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
3643 struct mlx5_esw_flow_attr *esw_attr;
3644 struct mlx5_flow_attr *attr;
3647 list_for_each_entry(attr, &flow->attrs, list) {
3648 esw_attr = attr->esw_attr;
3649 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
3650 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
3659 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
3661 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3662 struct mlx5_flow_attr *attr;
3664 list_for_each_entry(attr, &flow->attrs, list) {
3665 if (list_is_last(&attr->list, &flow->attrs))
3668 mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
3673 free_flow_post_acts(struct mlx5e_tc_flow *flow)
3675 struct mlx5_flow_attr *attr, *tmp;
3677 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
3678 if (list_is_last(&attr->list, &flow->attrs))
3681 mlx5_free_flow_attr_actions(flow, attr);
3683 list_del(&attr->list);
3684 kvfree(attr->parse_attr);
3690 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
3692 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3693 struct mlx5_flow_attr *attr;
3696 list_for_each_entry(attr, &flow->attrs, list) {
3697 if (list_is_last(&attr->list, &flow->attrs))
3700 err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
3708 /* TC filter rule HW translation:
3710 * +---------------------+
3711 * + ft prio (tc chain) +
3712 * + original match +
3713 * +---------------------+
3715 * | if multi table action
3718 * +---------------------+
3719 * + post act ft |<----.
3720 * + match fte id | | split on multi table action
3721 * + do actions |-----'
3722 * +---------------------+
3726 * Do rest of the actions after last multi table action.
3729 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
3731 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3732 struct mlx5_flow_attr *attr, *next_attr = NULL;
3733 struct mlx5e_post_act_handle *handle;
3736 /* This is going in reverse order as needed.
3737 * The first entry is the last attribute.
3739 list_for_each_entry(attr, &flow->attrs, list) {
3741 /* Set counter action on last post act rule. */
3742 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3745 if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) {
3746 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
3751 /* Don't add post_act rule for first attr (last in the list).
3752 * It's being handled by the caller.
3754 if (list_is_last(&attr->list, &flow->attrs))
3757 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
3761 err = post_process_attr(flow, attr, extack);
3765 handle = mlx5e_tc_post_act_add(post_act, attr);
3766 if (IS_ERR(handle)) {
3767 err = PTR_ERR(handle);
3771 attr->post_act_handle = handle;
3773 if (attr->jumping_attr) {
3774 err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr);
3782 if (flow_flag_test(flow, SLOW))
3785 err = mlx5e_tc_offload_flow_post_acts(flow);
3793 free_flow_post_acts(flow);
3798 alloc_branch_attr(struct mlx5e_tc_flow *flow,
3799 struct mlx5e_tc_act_branch_ctrl *cond,
3800 struct mlx5_flow_attr **cond_attr,
3802 struct netlink_ext_ack *extack)
3804 struct mlx5_flow_attr *attr;
3807 *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr,
3808 mlx5e_get_flow_namespace(flow));
3814 switch (cond->act_id) {
3815 case FLOW_ACTION_DROP:
3816 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3818 case FLOW_ACTION_ACCEPT:
3819 case FLOW_ACTION_PIPE:
3820 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3821 attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
3823 case FLOW_ACTION_JUMP:
3825 NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps");
3829 *jump_count = cond->extval;
3830 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3831 attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
3846 dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
3847 struct mlx5_flow_attr *attr, struct mlx5e_priv *priv,
3848 struct mlx5e_tc_jump_state *jump_state)
3850 if (!jump_state->jump_count)
3853 /* Single tc action can instantiate multiple offload actions (e.g. pedit)
3854 * Jump only over a tc action
3856 if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index)
3859 jump_state->last_id = act->id;
3860 jump_state->last_index = act->hw_index;
3862 /* nothing to do for intermediate actions */
3863 if (--jump_state->jump_count > 1)
3866 if (jump_state->jump_count == 1) { /* last action in the jump action list */
3868 /* create a new attribute after this action */
3869 jump_state->jump_target = true;
3871 if (tc_act->is_terminating_action) { /* the branch ends here */
3872 attr->flags |= MLX5_ATTR_FLAG_TERMINATING;
3873 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3874 } else { /* the branch continues executing the rest of the actions */
3875 struct mlx5e_post_act *post_act;
3877 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3878 post_act = get_post_action(priv);
3879 attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
3881 } else if (jump_state->jump_count == 0) { /* first attr after the jump action list */
3882 /* This is the post action for the jumping attribute (either red or green)
3883 * Use the stored jumping_attr to set the post act id on the jumping attribute
3885 attr->jumping_attr = jump_state->jumping_attr;
3890 parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
3891 struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr,
3892 struct mlx5e_tc_jump_state *jump_state,
3893 struct netlink_ext_ack *extack)
3895 struct mlx5e_tc_act_branch_ctrl cond_true, cond_false;
3896 u32 jump_count = jump_state->jump_count;
3899 if (!tc_act->get_branch_ctrl)
3902 tc_act->get_branch_ctrl(act, &cond_true, &cond_false);
3904 err = alloc_branch_attr(flow, &cond_true,
3905 &attr->branch_true, &jump_count, extack);
3910 jump_state->jumping_attr = attr->branch_true;
3912 err = alloc_branch_attr(flow, &cond_false,
3913 &attr->branch_false, &jump_count, extack);
3915 goto err_branch_false;
3917 if (jump_count && !jump_state->jumping_attr)
3918 jump_state->jumping_attr = attr->branch_false;
3920 jump_state->jump_count = jump_count;
3922 /* branching action requires its own counter */
3923 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3924 flow_flag_set(flow, USE_ACT_STATS);
3929 free_branch_attr(flow, attr->branch_true);
3935 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
3936 struct flow_action *flow_action)
3938 struct netlink_ext_ack *extack = parse_state->extack;
3939 struct mlx5e_tc_flow *flow = parse_state->flow;
3940 struct mlx5e_tc_jump_state jump_state = {};
3941 struct mlx5_flow_attr *attr = flow->attr;
3942 enum mlx5_flow_namespace_type ns_type;
3943 struct mlx5e_priv *priv = flow->priv;
3944 struct mlx5_flow_attr *prev_attr;
3945 struct flow_action_entry *act;
3946 struct mlx5e_tc_act *tc_act;
3950 ns_type = mlx5e_get_flow_namespace(flow);
3951 list_add(&attr->list, &flow->attrs);
3953 flow_action_for_each(i, act, flow_action) {
3954 jump_state.jump_target = false;
3955 is_missable = false;
3958 tc_act = mlx5e_tc_act_get(act->id, ns_type);
3960 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
3962 goto out_free_post_acts;
3965 if (tc_act->can_offload && !tc_act->can_offload(parse_state, act, i, attr)) {
3967 goto out_free_post_acts;
3970 err = tc_act->parse_action(parse_state, act, priv, attr);
3972 goto out_free_post_acts;
3974 dec_jump_count(act, tc_act, attr, priv, &jump_state);
3976 err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
3978 goto out_free_post_acts;
3980 parse_state->actions |= attr->action;
3982 /* Split attr for multi table act if not the last act. */
3983 if (jump_state.jump_target ||
3984 (tc_act->is_multi_table_act &&
3985 tc_act->is_multi_table_act(priv, act, attr) &&
3986 i < flow_action->num_entries - 1)) {
3987 is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false;
3989 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
3991 goto out_free_post_acts;
3993 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
3996 goto out_free_post_acts;
3999 list_add(&attr->list, &flow->attrs);
4003 /* Add counter to prev, and assign act to new (next) attr */
4004 prev_attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4005 flow_flag_set(flow, USE_ACT_STATS);
4007 attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
4008 } else if (!tc_act->stats_action) {
4009 prev_attr->tc_act_cookies[prev_attr->tc_act_cookies_count++] = act->cookie;
4013 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
4015 goto out_free_post_acts;
4017 err = alloc_flow_post_acts(flow, extack);
4019 goto out_free_post_acts;
4024 free_flow_post_acts(flow);
4030 flow_action_supported(struct flow_action *flow_action,
4031 struct netlink_ext_ack *extack)
4033 if (!flow_action_has_entries(flow_action)) {
4034 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
4038 if (!flow_action_hw_stats_check(flow_action, extack,
4039 FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
4040 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4048 parse_tc_nic_actions(struct mlx5e_priv *priv,
4049 struct flow_action *flow_action,
4050 struct mlx5e_tc_flow *flow,
4051 struct netlink_ext_ack *extack)
4053 struct mlx5e_tc_act_parse_state *parse_state;
4054 struct mlx5e_tc_flow_parse_attr *parse_attr;
4055 struct mlx5_flow_attr *attr = flow->attr;
4058 err = flow_action_supported(flow_action, extack);
4062 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
4063 parse_attr = attr->parse_attr;
4064 parse_state = &parse_attr->parse_state;
4065 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4066 parse_state->ct_priv = get_ct_priv(priv);
4068 err = parse_tc_actions(parse_state, flow_action);
4072 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4076 err = verify_attr_actions(attr->action, extack);
4080 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4081 parse_attr, flow, extack))
4087 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
4088 struct net_device *peer_netdev)
4090 struct mlx5e_priv *peer_priv;
4092 peer_priv = netdev_priv(peer_netdev);
4094 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
4095 mlx5e_eswitch_vf_rep(priv->netdev) &&
4096 mlx5e_eswitch_vf_rep(peer_netdev) &&
4097 mlx5e_same_hw_devs(priv, peer_priv));
4100 static bool same_hw_reps(struct mlx5e_priv *priv,
4101 struct net_device *peer_netdev)
4103 struct mlx5e_priv *peer_priv;
4105 peer_priv = netdev_priv(peer_netdev);
4107 return mlx5e_eswitch_rep(priv->netdev) &&
4108 mlx5e_eswitch_rep(peer_netdev) &&
4109 mlx5e_same_hw_devs(priv, peer_priv);
4112 static bool is_lag_dev(struct mlx5e_priv *priv,
4113 struct net_device *peer_netdev)
4115 return ((mlx5_lag_is_sriov(priv->mdev) ||
4116 mlx5_lag_is_multipath(priv->mdev)) &&
4117 same_hw_reps(priv, peer_netdev));
4120 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
4122 return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev);
4125 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
4126 struct net_device *out_dev)
4128 if (is_merged_eswitch_vfs(priv, out_dev))
4131 if (is_multiport_eligible(priv, out_dev))
4134 if (is_lag_dev(priv, out_dev))
4137 return mlx5e_eswitch_rep(out_dev) &&
4138 same_port_devs(priv, netdev_priv(out_dev));
4141 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
4142 struct mlx5_flow_attr *attr,
4144 enum mlx5e_tc_int_port_type type,
4148 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4149 struct mlx5e_tc_int_port_priv *int_port_priv;
4150 struct mlx5e_tc_flow_parse_attr *parse_attr;
4151 struct mlx5e_tc_int_port *dest_int_port;
4154 parse_attr = attr->parse_attr;
4155 int_port_priv = mlx5e_get_int_port_priv(priv);
4157 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
4158 if (IS_ERR(dest_int_port))
4159 return PTR_ERR(dest_int_port);
4161 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
4162 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
4163 mlx5e_tc_int_port_get_metadata(dest_int_port));
4165 mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
4169 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4171 esw_attr->dest_int_port = dest_int_port;
4172 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
4173 esw_attr->split_count = out_index;
4175 /* Forward to root fdb for matching against the new source vport */
4176 attr->dest_chain = 0;
4182 parse_tc_fdb_actions(struct mlx5e_priv *priv,
4183 struct flow_action *flow_action,
4184 struct mlx5e_tc_flow *flow,
4185 struct netlink_ext_ack *extack)
4187 struct mlx5e_tc_act_parse_state *parse_state;
4188 struct mlx5e_tc_flow_parse_attr *parse_attr;
4189 struct mlx5_flow_attr *attr = flow->attr;
4190 struct mlx5_esw_flow_attr *esw_attr;
4191 struct net_device *filter_dev;
4194 err = flow_action_supported(flow_action, extack);
4198 esw_attr = attr->esw_attr;
4199 parse_attr = attr->parse_attr;
4200 filter_dev = parse_attr->filter_dev;
4201 parse_state = &parse_attr->parse_state;
4202 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4203 parse_state->ct_priv = get_ct_priv(priv);
4205 err = parse_tc_actions(parse_state, flow_action);
4209 /* Forward to/from internal port can only have 1 dest */
4210 if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
4211 esw_attr->out_count > 1) {
4212 NL_SET_ERR_MSG_MOD(extack,
4213 "Rules with internal port can have only one destination");
4217 /* Forward from tunnel/internal port to internal port is not supported */
4218 if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
4219 esw_attr->dest_int_port) {
4220 NL_SET_ERR_MSG_MOD(extack,
4221 "Forwarding from tunnel/internal port to internal port is not supported");
4225 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4229 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4230 parse_attr, flow, extack))
4236 static void get_flags(int flags, unsigned long *flow_flags)
4238 unsigned long __flow_flags = 0;
4240 if (flags & MLX5_TC_FLAG(INGRESS))
4241 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4242 if (flags & MLX5_TC_FLAG(EGRESS))
4243 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4245 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4246 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4247 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4248 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4249 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4250 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4252 *flow_flags = __flow_flags;
4255 static const struct rhashtable_params tc_ht_params = {
4256 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4257 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4258 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4259 .automatic_shrinking = true,
4262 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4263 unsigned long flags)
4265 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4266 struct mlx5e_rep_priv *rpriv;
4268 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4269 rpriv = priv->ppriv;
4270 return &rpriv->tc_ht;
4271 } else /* NIC offload */
4275 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4277 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4278 struct mlx5_flow_attr *attr = flow->attr;
4279 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4280 flow_flag_test(flow, INGRESS);
4281 bool act_is_encap = !!(attr->action &
4282 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4283 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4284 MLX5_DEVCOM_ESW_OFFLOADS);
4289 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4290 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4291 (is_rep_ingress || act_is_encap))
4294 if (mlx5_lag_is_mpesw(esw_attr->in_mdev))
4300 struct mlx5_flow_attr *
4301 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4303 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4304 sizeof(struct mlx5_esw_flow_attr) :
4305 sizeof(struct mlx5_nic_flow_attr);
4306 struct mlx5_flow_attr *attr;
4308 attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4312 INIT_LIST_HEAD(&attr->list);
4317 mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
4319 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
4324 if (attr->post_act_handle)
4325 mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
4327 clean_encap_dests(flow->priv, flow, attr);
4329 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
4330 mlx5_fc_destroy(counter_dev, attr->counter);
4332 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
4333 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
4334 mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
4337 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
4339 free_branch_attr(flow, attr->branch_true);
4340 free_branch_attr(flow, attr->branch_false);
4344 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4345 struct flow_cls_offload *f, unsigned long flow_flags,
4346 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4347 struct mlx5e_tc_flow **__flow)
4349 struct mlx5e_tc_flow_parse_attr *parse_attr;
4350 struct mlx5_flow_attr *attr;
4351 struct mlx5e_tc_flow *flow;
4355 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4356 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4357 if (!parse_attr || !flow)
4360 flow->flags = flow_flags;
4361 flow->cookie = f->cookie;
4364 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
4370 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4371 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4372 INIT_LIST_HEAD(&flow->hairpin);
4373 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4374 INIT_LIST_HEAD(&flow->attrs);
4375 refcount_set(&flow->refcnt, 1);
4376 init_completion(&flow->init_done);
4377 init_completion(&flow->del_hw_done);
4380 *__parse_attr = parse_attr;
4391 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4392 struct mlx5e_tc_flow_parse_attr *parse_attr,
4393 struct flow_cls_offload *f)
4395 attr->parse_attr = parse_attr;
4396 attr->chain = f->common.chain_index;
4397 attr->prio = f->common.prio;
4401 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4402 struct mlx5e_priv *priv,
4403 struct mlx5e_tc_flow_parse_attr *parse_attr,
4404 struct flow_cls_offload *f,
4405 struct mlx5_eswitch_rep *in_rep,
4406 struct mlx5_core_dev *in_mdev)
4408 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4409 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4411 mlx5e_flow_attr_init(attr, parse_attr, f);
4413 esw_attr->in_rep = in_rep;
4414 esw_attr->in_mdev = in_mdev;
4416 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4417 MLX5_COUNTER_SOURCE_ESWITCH)
4418 esw_attr->counter_dev = in_mdev;
4420 esw_attr->counter_dev = priv->mdev;
4423 static struct mlx5e_tc_flow *
4424 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4425 struct flow_cls_offload *f,
4426 unsigned long flow_flags,
4427 struct net_device *filter_dev,
4428 struct mlx5_eswitch_rep *in_rep,
4429 struct mlx5_core_dev *in_mdev)
4431 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4432 struct netlink_ext_ack *extack = f->common.extack;
4433 struct mlx5e_tc_flow_parse_attr *parse_attr;
4434 struct mlx5e_tc_flow *flow;
4437 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4438 attr_size = sizeof(struct mlx5_esw_flow_attr);
4439 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4440 &parse_attr, &flow);
4444 parse_attr->filter_dev = filter_dev;
4445 mlx5e_flow_esw_attr_init(flow->attr,
4447 f, in_rep, in_mdev);
4449 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4454 /* actions validation depends on parsing the ct matches first */
4455 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4456 &flow->attr->ct_attr, extack);
4460 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4464 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4465 complete_all(&flow->init_done);
4467 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4470 add_unready_flow(flow);
4476 mlx5e_flow_put(priv, flow);
4478 return ERR_PTR(err);
4481 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4482 struct mlx5e_tc_flow *flow,
4483 unsigned long flow_flags)
4485 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4486 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4487 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4488 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4489 struct mlx5e_tc_flow_parse_attr *parse_attr;
4490 struct mlx5e_rep_priv *peer_urpriv;
4491 struct mlx5e_tc_flow *peer_flow;
4492 struct mlx5_core_dev *in_mdev;
4495 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4499 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4500 peer_priv = netdev_priv(peer_urpriv->netdev);
4502 /* in_mdev is assigned of which the packet originated from.
4503 * So packets redirected to uplink use the same mdev of the
4504 * original flow and packets redirected from uplink use the
4506 * In multiport eswitch it's a special case that we need to
4507 * keep the original mdev.
4509 if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev))
4510 in_mdev = peer_priv->mdev;
4512 in_mdev = priv->mdev;
4514 parse_attr = flow->attr->parse_attr;
4515 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4516 parse_attr->filter_dev,
4517 attr->in_rep, in_mdev);
4518 if (IS_ERR(peer_flow)) {
4519 err = PTR_ERR(peer_flow);
4523 flow->peer_flow = peer_flow;
4524 flow_flag_set(flow, DUP);
4525 mutex_lock(&esw->offloads.peer_mutex);
4526 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4527 mutex_unlock(&esw->offloads.peer_mutex);
4530 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4535 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4536 struct flow_cls_offload *f,
4537 unsigned long flow_flags,
4538 struct net_device *filter_dev,
4539 struct mlx5e_tc_flow **__flow)
4541 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4542 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4543 struct mlx5_core_dev *in_mdev = priv->mdev;
4544 struct mlx5e_tc_flow *flow;
4547 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4550 return PTR_ERR(flow);
4552 if (is_peer_flow_needed(flow)) {
4553 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4555 mlx5e_tc_del_fdb_flow(priv, flow);
4569 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4570 struct flow_cls_offload *f,
4571 unsigned long flow_flags,
4572 struct net_device *filter_dev,
4573 struct mlx5e_tc_flow **__flow)
4575 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4576 struct netlink_ext_ack *extack = f->common.extack;
4577 struct mlx5e_tc_flow_parse_attr *parse_attr;
4578 struct mlx5e_tc_flow *flow;
4581 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4582 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4584 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4588 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4589 attr_size = sizeof(struct mlx5_nic_flow_attr);
4590 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4591 &parse_attr, &flow);
4595 parse_attr->filter_dev = filter_dev;
4596 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4598 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4603 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4604 &flow->attr->ct_attr, extack);
4608 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4612 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4616 flow_flag_set(flow, OFFLOADED);
4622 flow_flag_set(flow, FAILED);
4623 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
4624 mlx5e_flow_put(priv, flow);
4630 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4631 struct flow_cls_offload *f,
4632 unsigned long flags,
4633 struct net_device *filter_dev,
4634 struct mlx5e_tc_flow **flow)
4636 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4637 unsigned long flow_flags;
4640 get_flags(flags, &flow_flags);
4642 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4645 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4646 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4649 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4655 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4656 struct mlx5e_rep_priv *rpriv)
4658 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4659 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4660 * function is called from NIC mode.
4662 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4665 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4666 struct flow_cls_offload *f, unsigned long flags)
4668 struct netlink_ext_ack *extack = f->common.extack;
4669 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4670 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4671 struct mlx5e_tc_flow *flow;
4674 if (!mlx5_esw_hold(priv->mdev))
4677 mlx5_esw_get(priv->mdev);
4680 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4682 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4685 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4688 NL_SET_ERR_MSG_MOD(extack,
4689 "flow cookie already exists, ignoring");
4690 netdev_warn_once(priv->netdev,
4691 "flow cookie %lx already exists, ignoring\n",
4701 trace_mlx5e_configure_flower(f);
4702 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4706 /* Flow rule offloaded to non-uplink representor sharing tc block,
4707 * set the flow's owner dev.
4709 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4710 flow->orig_dev = dev;
4712 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4716 mlx5_esw_release(priv->mdev);
4720 mlx5e_flow_put(priv, flow);
4722 mlx5_esw_put(priv->mdev);
4723 mlx5_esw_release(priv->mdev);
4727 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4729 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4730 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4732 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4733 flow_flag_test(flow, EGRESS) == dir_egress;
4736 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4737 struct flow_cls_offload *f, unsigned long flags)
4739 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4740 struct mlx5e_tc_flow *flow;
4744 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4745 if (!flow || !same_flow_direction(flow, flags)) {
4750 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4753 if (flow_flag_test_and_set(flow, DELETED)) {
4757 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4760 trace_mlx5e_delete_flower(f);
4761 mlx5e_flow_put(priv, flow);
4763 mlx5_esw_put(priv->mdev);
4771 int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
4772 struct flow_offload_action *fl_act)
4774 return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act);
4777 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4778 struct flow_cls_offload *f, unsigned long flags)
4780 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4781 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4782 struct mlx5_eswitch *peer_esw;
4783 struct mlx5e_tc_flow *flow;
4784 struct mlx5_fc *counter;
4791 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4795 return PTR_ERR(flow);
4797 if (!same_flow_direction(flow, flags)) {
4802 if (mlx5e_is_offloaded_flow(flow)) {
4803 if (flow_flag_test(flow, USE_ACT_STATS)) {
4804 f->use_act_stats = true;
4806 counter = mlx5e_tc_get_counter(flow);
4810 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4814 /* Under multipath it's possible for one rule to be currently
4815 * un-offloaded while the other rule is offloaded.
4817 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4821 if (flow_flag_test(flow, DUP) &&
4822 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4827 if (flow_flag_test(flow, USE_ACT_STATS)) {
4828 f->use_act_stats = true;
4830 counter = mlx5e_tc_get_counter(flow->peer_flow);
4832 goto no_peer_counter;
4833 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4836 packets += packets2;
4837 lastuse = max_t(u64, lastuse, lastuse2);
4842 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4844 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4845 FLOW_ACTION_HW_STATS_DELAYED);
4846 trace_mlx5e_stats_flower(f);
4848 mlx5e_flow_put(priv, flow);
4852 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4853 struct netlink_ext_ack *extack)
4855 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4856 struct mlx5_eswitch *esw;
4861 vport_num = rpriv->rep->vport;
4862 if (vport_num >= MLX5_VPORT_ECPF) {
4863 NL_SET_ERR_MSG_MOD(extack,
4864 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4868 esw = priv->mdev->priv.eswitch;
4869 /* rate is given in bytes/sec.
4870 * First convert to bits/sec and then round to the nearest mbit/secs.
4871 * mbit means million bits.
4872 * Moreover, if rate is non zero we choose to configure to a minimum of
4876 rate = (rate * BITS_PER_BYTE) + 500000;
4877 do_div(rate, 1000000);
4878 rate_mbps = max_t(u32, rate, 1);
4881 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
4883 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4889 tc_matchall_police_validate(const struct flow_action *action,
4890 const struct flow_action_entry *act,
4891 struct netlink_ext_ack *extack)
4893 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
4894 NL_SET_ERR_MSG_MOD(extack,
4895 "Offload not supported when conform action is not continue");
4899 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
4900 NL_SET_ERR_MSG_MOD(extack,
4901 "Offload not supported when exceed action is not drop");
4905 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
4906 !flow_action_is_last_entry(action, act)) {
4907 NL_SET_ERR_MSG_MOD(extack,
4908 "Offload not supported when conform action is ok, but action is not last");
4912 if (act->police.peakrate_bytes_ps ||
4913 act->police.avrate || act->police.overhead) {
4914 NL_SET_ERR_MSG_MOD(extack,
4915 "Offload not supported when peakrate/avrate/overhead is configured");
4922 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4923 struct flow_action *flow_action,
4924 struct netlink_ext_ack *extack)
4926 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4927 const struct flow_action_entry *act;
4931 if (!flow_action_has_entries(flow_action)) {
4932 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4936 if (!flow_offload_has_one_action(flow_action)) {
4937 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4941 if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
4942 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4946 flow_action_for_each(i, act, flow_action) {
4948 case FLOW_ACTION_POLICE:
4949 err = tc_matchall_police_validate(flow_action, act, extack);
4953 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4957 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4960 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4968 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4969 struct tc_cls_matchall_offload *ma)
4971 struct netlink_ext_ack *extack = ma->common.extack;
4973 if (ma->common.prio != 1) {
4974 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4978 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4981 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4982 struct tc_cls_matchall_offload *ma)
4984 struct netlink_ext_ack *extack = ma->common.extack;
4986 return apply_police_params(priv, 0, extack);
4989 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4990 struct tc_cls_matchall_offload *ma)
4992 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4993 struct rtnl_link_stats64 cur_stats;
4997 cur_stats = priv->stats.vf_vport;
4998 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4999 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
5000 rpriv->prev_vf_vport_stats = cur_stats;
5001 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
5002 FLOW_ACTION_HW_STATS_DELAYED);
5005 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
5006 struct mlx5e_priv *peer_priv)
5008 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5009 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
5010 struct mlx5e_hairpin_entry *hpe, *tmp;
5011 LIST_HEAD(init_wait_list);
5015 if (!mlx5e_same_hw_devs(priv, peer_priv))
5018 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
5020 mutex_lock(&tc->hairpin_tbl_lock);
5021 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
5022 if (refcount_inc_not_zero(&hpe->refcnt))
5023 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
5024 mutex_unlock(&tc->hairpin_tbl_lock);
5026 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
5027 wait_for_completion(&hpe->res_ready);
5028 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
5029 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
5031 mlx5e_hairpin_put(priv, hpe);
5035 static int mlx5e_tc_netdev_event(struct notifier_block *this,
5036 unsigned long event, void *ptr)
5038 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5039 struct mlx5e_priv *peer_priv;
5040 struct mlx5e_tc_table *tc;
5041 struct mlx5e_priv *priv;
5043 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
5044 event != NETDEV_UNREGISTER ||
5045 ndev->reg_state == NETREG_REGISTERED)
5048 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
5050 peer_priv = netdev_priv(ndev);
5051 if (priv == peer_priv ||
5052 !(priv->netdev->features & NETIF_F_HW_TC))
5055 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
5060 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
5062 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5063 struct mlx5_flow_table **ft = &tc->miss_t;
5064 struct mlx5_flow_table_attr ft_attr = {};
5065 struct mlx5_flow_namespace *ns;
5068 ft_attr.max_fte = 1;
5069 ft_attr.autogroup.max_num_groups = 1;
5070 ft_attr.level = MLX5E_TC_MISS_LEVEL;
5072 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
5074 *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
5077 netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
5083 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
5085 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5087 mlx5_destroy_flow_table(tc->miss_t);
5090 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
5092 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5093 struct mlx5_core_dev *dev = priv->mdev;
5094 struct mapping_ctx *chains_mapping;
5095 struct mlx5_chains_attr attr = {};
5099 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
5100 mutex_init(&tc->t_lock);
5101 mutex_init(&tc->hairpin_tbl_lock);
5102 hash_init(tc->hairpin_tbl);
5105 err = rhashtable_init(&tc->ht, &tc_ht_params);
5109 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
5110 lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
5112 mapping_id = mlx5_query_nic_system_image_guid(dev);
5114 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
5115 sizeof(struct mlx5_mapped_obj),
5116 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
5118 if (IS_ERR(chains_mapping)) {
5119 err = PTR_ERR(chains_mapping);
5122 tc->mapping = chains_mapping;
5124 err = mlx5e_tc_nic_create_miss_table(priv);
5128 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
5129 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
5130 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
5131 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
5132 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
5133 attr.default_ft = tc->miss_t;
5134 attr.mapping = chains_mapping;
5135 attr.fs_base_prio = MLX5E_TC_PRIO;
5137 tc->chains = mlx5_chains_create(dev, &attr);
5138 if (IS_ERR(tc->chains)) {
5139 err = PTR_ERR(tc->chains);
5143 mlx5_chains_print_info(tc->chains);
5145 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
5146 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
5147 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
5149 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5150 err = register_netdevice_notifier_dev_net(priv->netdev,
5154 tc->netdevice_nb.notifier_call = NULL;
5155 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5159 mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
5161 tc->action_stats_handle = mlx5e_tc_act_stats_create();
5162 if (IS_ERR(tc->action_stats_handle)) {
5163 err = PTR_ERR(tc->action_stats_handle);
5170 unregister_netdevice_notifier_dev_net(priv->netdev,
5174 mlx5_tc_ct_clean(tc->ct);
5175 mlx5e_tc_post_act_destroy(tc->post_act);
5176 mlx5_chains_destroy(tc->chains);
5178 mlx5e_tc_nic_destroy_miss_table(priv);
5180 mapping_destroy(chains_mapping);
5182 rhashtable_destroy(&tc->ht);
5186 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5188 struct mlx5e_tc_flow *flow = ptr;
5189 struct mlx5e_priv *priv = flow->priv;
5191 mlx5e_tc_del_flow(priv, flow);
5195 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5197 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5199 debugfs_remove_recursive(tc->dfs_root);
5201 if (tc->netdevice_nb.notifier_call)
5202 unregister_netdevice_notifier_dev_net(priv->netdev,
5206 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5207 mutex_destroy(&tc->hairpin_tbl_lock);
5209 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5211 if (!IS_ERR_OR_NULL(tc->t)) {
5212 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5215 mutex_destroy(&tc->t_lock);
5217 mlx5_tc_ct_clean(tc->ct);
5218 mlx5e_tc_post_act_destroy(tc->post_act);
5219 mapping_destroy(tc->mapping);
5220 mlx5_chains_destroy(tc->chains);
5221 mlx5e_tc_nic_destroy_miss_table(priv);
5222 mlx5e_tc_act_stats_free(tc->action_stats_handle);
5225 int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
5229 err = rhashtable_init(tc_ht, &tc_ht_params);
5233 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
5234 lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
5239 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
5241 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5244 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
5246 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5247 struct mlx5e_rep_priv *rpriv;
5248 struct mapping_ctx *mapping;
5249 struct mlx5_eswitch *esw;
5250 struct mlx5e_priv *priv;
5254 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5255 priv = netdev_priv(rpriv->netdev);
5256 esw = priv->mdev->priv.eswitch;
5258 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
5259 MLX5_FLOW_NAMESPACE_FDB);
5260 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5262 &esw->offloads.mod_hdr,
5263 MLX5_FLOW_NAMESPACE_FDB,
5264 uplink_priv->post_act);
5266 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
5268 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
5270 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
5272 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
5273 sizeof(struct tunnel_match_key),
5274 TUNNEL_INFO_BITS_MASK, true);
5276 if (IS_ERR(mapping)) {
5277 err = PTR_ERR(mapping);
5278 goto err_tun_mapping;
5280 uplink_priv->tunnel_mapping = mapping;
5282 /* Two last values are reserved for stack devices slow path table mark
5283 * and bridge ingress push mark.
5285 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
5286 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
5287 if (IS_ERR(mapping)) {
5288 err = PTR_ERR(mapping);
5289 goto err_enc_opts_mapping;
5291 uplink_priv->tunnel_enc_opts_mapping = mapping;
5293 uplink_priv->encap = mlx5e_tc_tun_init(priv);
5294 if (IS_ERR(uplink_priv->encap)) {
5295 err = PTR_ERR(uplink_priv->encap);
5296 goto err_register_fib_notifier;
5299 uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
5300 if (IS_ERR(uplink_priv->action_stats_handle)) {
5301 err = PTR_ERR(uplink_priv->action_stats_handle);
5302 goto err_action_counter;
5305 mlx5_esw_offloads_devcom_init(esw);
5310 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5311 err_register_fib_notifier:
5312 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5313 err_enc_opts_mapping:
5314 mapping_destroy(uplink_priv->tunnel_mapping);
5316 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5317 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5318 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5319 netdev_warn(priv->netdev,
5320 "Failed to initialize tc (eswitch), err: %d", err);
5321 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5325 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
5327 struct mlx5e_rep_priv *rpriv;
5328 struct mlx5_eswitch *esw;
5329 struct mlx5e_priv *priv;
5331 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5332 priv = netdev_priv(rpriv->netdev);
5333 esw = priv->mdev->priv.eswitch;
5335 mlx5_esw_offloads_devcom_cleanup(esw);
5337 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5339 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5340 mapping_destroy(uplink_priv->tunnel_mapping);
5342 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5343 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5344 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5345 mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
5346 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5347 mlx5e_tc_act_stats_free(uplink_priv->action_stats_handle);
5350 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5352 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5354 return atomic_read(&tc_ht->nelems);
5357 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5359 struct mlx5e_tc_flow *flow, *tmp;
5361 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5362 __mlx5e_tc_del_fdb_peer_flow(flow);
5365 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5367 struct mlx5_rep_uplink_priv *rpriv =
5368 container_of(work, struct mlx5_rep_uplink_priv,
5369 reoffload_flows_work);
5370 struct mlx5e_tc_flow *flow, *tmp;
5372 mutex_lock(&rpriv->unready_flows_lock);
5373 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5374 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5375 unready_flow_del(flow);
5377 mutex_unlock(&rpriv->unready_flows_lock);
5380 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5381 struct flow_cls_offload *cls_flower,
5382 unsigned long flags)
5384 switch (cls_flower->command) {
5385 case FLOW_CLS_REPLACE:
5386 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5388 case FLOW_CLS_DESTROY:
5389 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5391 case FLOW_CLS_STATS:
5392 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5399 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5402 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5403 struct mlx5e_priv *priv = cb_priv;
5405 if (!priv->netdev || !netif_device_present(priv->netdev))
5408 if (mlx5e_is_uplink_rep(priv))
5409 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5411 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5414 case TC_SETUP_CLSFLOWER:
5415 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5421 static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
5422 struct mlx5e_tc_update_priv *tc_priv,
5425 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5426 struct tunnel_match_enc_opts enc_opts = {};
5427 struct mlx5_rep_uplink_priv *uplink_priv;
5428 struct mlx5e_rep_priv *uplink_rpriv;
5429 struct metadata_dst *tun_dst;
5430 struct tunnel_match_key key;
5431 u32 tun_id, enc_opts_id;
5432 struct net_device *dev;
5435 enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
5436 tun_id = tunnel_id >> ENC_OPTS_BITS;
5441 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
5442 uplink_priv = &uplink_rpriv->uplink_priv;
5444 err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
5446 netdev_dbg(priv->netdev,
5447 "Couldn't find tunnel for tun_id: %d, err: %d\n",
5453 err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
5454 enc_opts_id, &enc_opts);
5456 netdev_dbg(priv->netdev,
5457 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
5463 switch (key.enc_control.addr_type) {
5464 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
5465 tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
5466 key.enc_ip.tos, key.enc_ip.ttl,
5467 key.enc_tp.dst, TUNNEL_KEY,
5468 key32_to_tunnel_id(key.enc_key_id.keyid),
5471 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
5472 tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
5473 key.enc_ip.tos, key.enc_ip.ttl,
5474 key.enc_tp.dst, 0, TUNNEL_KEY,
5475 key32_to_tunnel_id(key.enc_key_id.keyid),
5479 netdev_dbg(priv->netdev,
5480 "Couldn't restore tunnel, unsupported addr_type: %d\n",
5481 key.enc_control.addr_type);
5486 netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
5490 tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
5492 if (enc_opts.key.len)
5493 ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
5496 enc_opts.key.dst_opt_type);
5498 skb_dst_set(skb, (struct dst_entry *)tun_dst);
5499 dev = dev_get_by_index(&init_net, key.filter_ifindex);
5501 netdev_dbg(priv->netdev,
5502 "Couldn't find tunnel device with ifindex: %d\n",
5503 key.filter_ifindex);
5507 /* Set fwd_dev so we do dev_put() after datapath */
5508 tc_priv->fwd_dev = dev;
5515 static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
5516 struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id,
5517 u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
5519 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5520 struct tc_skb_ext *tc_skb_ext;
5521 u64 act_miss_cookie;
5524 chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0;
5525 act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ?
5526 mapped_obj->act_miss_cookie : 0;
5527 if (chain || act_miss_cookie) {
5528 if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id))
5531 tc_skb_ext = tc_skb_ext_alloc(skb);
5537 if (act_miss_cookie) {
5538 tc_skb_ext->act_miss_cookie = act_miss_cookie;
5539 tc_skb_ext->act_miss = 1;
5541 tc_skb_ext->chain = chain;
5546 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
5551 static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
5552 struct mlx5_mapped_obj *mapped_obj,
5553 struct mlx5e_tc_update_priv *tc_priv)
5555 if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
5556 netdev_dbg(priv->netdev,
5557 "Failed to restore tunnel info for sampled packet\n");
5560 mlx5e_tc_sample_skb(skb, mapped_obj);
5563 static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
5564 struct mlx5_mapped_obj *mapped_obj,
5565 struct mlx5e_tc_update_priv *tc_priv,
5568 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5569 struct mlx5_rep_uplink_priv *uplink_priv;
5570 struct mlx5e_rep_priv *uplink_rpriv;
5571 bool forward_tx = false;
5573 /* Tunnel restore takes precedence over int port restore */
5575 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
5577 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
5578 uplink_priv = &uplink_rpriv->uplink_priv;
5580 if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
5581 mapped_obj->int_port_metadata, &forward_tx)) {
5582 /* Set fwd_dev for future dev_put */
5583 tc_priv->fwd_dev = skb->dev;
5584 tc_priv->forward_tx = forward_tx;
5592 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
5593 struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
5594 struct mlx5_tc_ct_priv *ct_priv,
5595 u32 zone_restore_id, u32 tunnel_id,
5596 struct mlx5e_tc_update_priv *tc_priv)
5598 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5599 struct mlx5_mapped_obj mapped_obj;
5602 err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj);
5604 netdev_dbg(skb->dev,
5605 "Couldn't find mapped object for mapped_obj_id: %d, err: %d\n",
5606 mapped_obj_id, err);
5610 switch (mapped_obj.type) {
5611 case MLX5_MAPPED_OBJ_CHAIN:
5612 case MLX5_MAPPED_OBJ_ACT_MISS:
5613 return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id,
5614 tunnel_id, tc_priv);
5615 case MLX5_MAPPED_OBJ_SAMPLE:
5616 mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
5617 tc_priv->skb_done = true;
5619 case MLX5_MAPPED_OBJ_INT_PORT_METADATA:
5620 return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id);
5622 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5629 bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
5631 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5632 u32 mapped_obj_id, reg_b, zone_restore_id;
5633 struct mlx5_tc_ct_priv *ct_priv;
5634 struct mapping_ctx *mapping_ctx;
5635 struct mlx5e_tc_table *tc;
5637 reg_b = be32_to_cpu(cqe->ft_metadata);
5638 tc = mlx5e_fs_get_tc(priv->fs);
5639 mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5640 zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5643 mapping_ctx = tc->mapping;
5645 return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id,
5649 static struct mapping_ctx *
5650 mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
5652 struct mlx5e_tc_table *tc;
5653 struct mlx5_eswitch *esw;
5654 struct mapping_ctx *ctx;
5656 if (is_mdev_switchdev_mode(priv->mdev)) {
5657 esw = priv->mdev->priv.eswitch;
5658 ctx = esw->offloads.reg_c0_obj_pool;
5660 tc = mlx5e_fs_get_tc(priv->fs);
5667 int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
5668 u64 act_miss_cookie, u32 *act_miss_mapping)
5670 struct mlx5_mapped_obj mapped_obj = {};
5671 struct mlx5_eswitch *esw;
5672 struct mapping_ctx *ctx;
5675 ctx = mlx5e_get_priv_obj_mapping(priv);
5676 mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
5677 mapped_obj.act_miss_cookie = act_miss_cookie;
5678 err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
5682 if (!is_mdev_switchdev_mode(priv->mdev))
5685 esw = priv->mdev->priv.eswitch;
5686 attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
5687 if (IS_ERR(attr->act_id_restore_rule))
5693 mapping_remove(ctx, *act_miss_mapping);
5697 void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
5698 u32 act_miss_mapping)
5700 struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
5702 if (is_mdev_switchdev_mode(priv->mdev))
5703 mlx5_del_flow_rules(attr->act_id_restore_rule);
5704 mapping_remove(ctx, act_miss_mapping);