2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
46 #include "lib/devcom.h"
48 #include "lib/fs_chains.h"
50 #include "en/mapping.h"
53 #include "en/tc/post_meter.h"
55 #define mlx5_esw_for_each_rep(esw, i, rep) \
56 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
58 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \
59 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
61 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \
62 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
63 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
65 /* There are two match-all miss flows, one for unicast dst mac and
68 #define MLX5_ESW_MISS_FLOWS (2)
69 #define UPLINK_REP_INDEX 0
71 #define MLX5_ESW_VPORT_TBL_SIZE 128
72 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
74 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
76 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
77 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
78 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
82 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
85 return xa_load(&esw->offloads.vport_reps, vport_num);
89 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
90 struct mlx5_flow_spec *spec,
91 struct mlx5_esw_flow_attr *attr)
93 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
97 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
102 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
103 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
104 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
107 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
108 * are not needed as well in the following process. So clear them all for simplicity.
111 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
113 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
116 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
117 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
119 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
120 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
122 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
123 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
128 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
129 struct mlx5_flow_spec *spec,
130 struct mlx5_flow_attr *attr,
131 struct mlx5_eswitch *src_esw,
134 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
139 /* Use metadata matching because vport is not represented by single
140 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
142 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
143 if (mlx5_esw_indir_table_decap_vport(attr))
144 vport = mlx5_esw_indir_table_decap_vport(attr);
146 if (!attr->chain && esw_attr && esw_attr->int_port)
148 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
151 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
153 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
154 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
156 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
157 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
158 mlx5_eswitch_get_vport_metadata_mask());
160 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
162 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
163 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
165 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166 MLX5_SET(fte_match_set_misc, misc,
167 source_eswitch_owner_vhca_id,
168 MLX5_CAP_GEN(src_esw->dev, vhca_id));
170 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
171 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
172 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
173 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
174 source_eswitch_owner_vhca_id);
176 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
181 esw_setup_decap_indir(struct mlx5_eswitch *esw,
182 struct mlx5_flow_attr *attr)
184 struct mlx5_flow_table *ft;
186 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
189 ft = mlx5_esw_indir_table_get(esw, attr,
190 mlx5_esw_indir_table_decap_vport(attr), true);
191 return PTR_ERR_OR_ZERO(ft);
195 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
196 struct mlx5_flow_attr *attr)
198 if (mlx5_esw_indir_table_decap_vport(attr))
199 mlx5_esw_indir_table_put(esw,
200 mlx5_esw_indir_table_decap_vport(attr),
205 esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
206 struct mlx5e_meter_attr *meter,
209 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
210 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
211 dest[i].range.min = 0;
212 dest[i].range.max = meter->params.mtu;
213 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
214 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
220 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
221 struct mlx5_flow_act *flow_act,
225 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
226 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
227 dest[i].sampler_id = sampler_id;
233 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
234 struct mlx5_flow_act *flow_act,
235 struct mlx5_eswitch *esw,
236 struct mlx5_flow_attr *attr,
239 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
240 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
241 dest[i].ft = attr->dest_ft;
243 if (mlx5_esw_indir_table_decap_vport(attr))
244 return esw_setup_decap_indir(esw, attr);
249 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
250 struct mlx5_fs_chains *chains, int i)
252 if (mlx5_chains_ignore_flow_level_supported(chains))
253 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
254 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
255 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
259 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
260 struct mlx5_eswitch *esw, int i)
262 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
263 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
264 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
265 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
269 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
270 struct mlx5_flow_act *flow_act,
271 struct mlx5_fs_chains *chains,
272 u32 chain, u32 prio, u32 level,
275 struct mlx5_flow_table *ft;
277 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
278 ft = mlx5_chains_get_table(chains, chain, prio, level);
282 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
287 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
290 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
291 struct mlx5_fs_chains *chains = esw_chains(esw);
294 for (i = from; i < to; i++)
295 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
296 mlx5_chains_put_table(chains, 0, 1, 0);
297 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
298 esw_attr->dests[i].mdev))
299 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
304 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
308 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
309 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
315 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
316 struct mlx5_flow_act *flow_act,
317 struct mlx5_eswitch *esw,
318 struct mlx5_fs_chains *chains,
319 struct mlx5_flow_attr *attr,
322 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
325 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
328 /* flow steering cannot handle more than one dest with the same ft
331 if (esw_attr->out_count - esw_attr->split_count > 1)
334 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
338 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
339 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
340 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
347 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
348 struct mlx5_flow_attr *attr)
350 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
352 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
356 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
358 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
362 /* Indirect table is supported only for flows with in_port uplink
363 * and the destination is vport on the same eswitch as the uplink,
364 * return false in case at least one of destinations doesn't meet
367 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
368 if (esw_attr->dests[i].rep &&
369 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
370 esw_attr->dests[i].mdev)) {
381 esw_setup_indir_table(struct mlx5_flow_destination *dest,
382 struct mlx5_flow_act *flow_act,
383 struct mlx5_eswitch *esw,
384 struct mlx5_flow_attr *attr,
385 bool ignore_flow_lvl,
388 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
391 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
394 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
396 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
397 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
399 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
400 esw_attr->dests[j].rep->vport, false);
401 if (IS_ERR(dest[*i].ft)) {
402 err = PTR_ERR(dest[*i].ft);
403 goto err_indir_tbl_get;
407 if (mlx5_esw_indir_table_decap_vport(attr)) {
408 err = esw_setup_decap_indir(esw, attr);
410 goto err_indir_tbl_get;
416 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
420 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
422 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
424 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
425 esw_cleanup_decap_indir(esw, attr);
429 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
431 mlx5_chains_put_table(chains, chain, prio, level);
435 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
436 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
437 int attr_idx, int dest_idx, bool pkt_reformat)
439 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
440 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
441 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
442 dest[dest_idx].vport.vhca_id =
443 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
444 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
445 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
446 mlx5_lag_is_mpesw(esw->dev))
447 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
449 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
451 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
452 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
454 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
455 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
460 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
461 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
466 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
467 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
472 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
474 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
475 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
476 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
480 esw_setup_dests(struct mlx5_flow_destination *dest,
481 struct mlx5_flow_act *flow_act,
482 struct mlx5_eswitch *esw,
483 struct mlx5_flow_attr *attr,
484 struct mlx5_flow_spec *spec,
487 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
488 struct mlx5_fs_chains *chains = esw_chains(esw);
491 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
492 esw_src_port_rewrite_supported(esw))
493 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
495 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
496 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
501 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
502 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
504 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
505 esw_setup_accept_dest(dest, flow_act, chains, *i);
507 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
508 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
510 } else if (esw_is_indir_table(esw, attr)) {
511 err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
512 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
513 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
515 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
518 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
520 } else if (attr->dest_chain) {
521 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
532 esw_cleanup_dests(struct mlx5_eswitch *esw,
533 struct mlx5_flow_attr *attr)
535 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
536 struct mlx5_fs_chains *chains = esw_chains(esw);
539 esw_cleanup_decap_indir(esw, attr);
540 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
541 if (attr->dest_chain)
542 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
543 else if (esw_is_indir_table(esw, attr))
544 esw_cleanup_indir_table(esw, attr);
545 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
546 esw_cleanup_chain_src_port_rewrite(esw, attr);
551 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
553 struct mlx5e_flow_meter_handle *meter;
555 meter = attr->meter_attr.meter;
556 flow_act->exe_aso.type = attr->exe_aso_type;
557 flow_act->exe_aso.object_id = meter->obj_id;
558 flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
559 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
560 /* use metadata reg 5 for packet color */
561 flow_act->exe_aso.return_reg_id = 5;
564 struct mlx5_flow_handle *
565 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
566 struct mlx5_flow_spec *spec,
567 struct mlx5_flow_attr *attr)
569 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
570 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
571 struct mlx5_fs_chains *chains = esw_chains(esw);
572 bool split = !!(esw_attr->split_count);
573 struct mlx5_vport_tbl_attr fwd_attr;
574 struct mlx5_flow_destination *dest;
575 struct mlx5_flow_handle *rule;
576 struct mlx5_flow_table *fdb;
579 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
580 return ERR_PTR(-EOPNOTSUPP);
582 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
583 return ERR_PTR(-EOPNOTSUPP);
585 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
587 return ERR_PTR(-ENOMEM);
589 flow_act.action = attr->action;
591 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
592 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
593 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
594 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
595 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
596 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
597 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
598 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
602 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
604 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
607 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
610 goto err_create_goto_table;
614 if (esw_attr->decap_pkt_reformat)
615 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
617 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
618 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
619 dest[i].counter_id = mlx5_fc_id(attr->counter);
623 if (attr->outer_match_level != MLX5_MATCH_NONE)
624 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
625 if (attr->inner_match_level != MLX5_MATCH_NONE)
626 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
628 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
629 flow_act.modify_hdr = attr->modify_hdr;
631 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
632 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
633 esw_setup_meter(attr, &flow_act);
636 fwd_attr.chain = attr->chain;
637 fwd_attr.prio = attr->prio;
638 fwd_attr.vport = esw_attr->in_rep->vport;
639 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
641 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
643 if (attr->chain || attr->prio)
644 fdb = mlx5_chains_get_table(chains, attr->chain,
649 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
650 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
651 esw_attr->in_mdev->priv.eswitch,
652 esw_attr->in_rep->vport);
655 rule = ERR_CAST(fdb);
664 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
665 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
668 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
672 atomic64_inc(&esw->offloads.num_flows);
679 mlx5_esw_vporttbl_put(esw, &fwd_attr);
680 else if (attr->chain || attr->prio)
681 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
683 esw_cleanup_dests(esw, attr);
684 err_create_goto_table:
689 struct mlx5_flow_handle *
690 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
691 struct mlx5_flow_spec *spec,
692 struct mlx5_flow_attr *attr)
694 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
695 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
696 struct mlx5_fs_chains *chains = esw_chains(esw);
697 struct mlx5_vport_tbl_attr fwd_attr;
698 struct mlx5_flow_destination *dest;
699 struct mlx5_flow_table *fast_fdb;
700 struct mlx5_flow_table *fwd_fdb;
701 struct mlx5_flow_handle *rule;
704 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
706 return ERR_PTR(-ENOMEM);
708 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
709 if (IS_ERR(fast_fdb)) {
710 rule = ERR_CAST(fast_fdb);
714 fwd_attr.chain = attr->chain;
715 fwd_attr.prio = attr->prio;
716 fwd_attr.vport = esw_attr->in_rep->vport;
717 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
718 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
719 if (IS_ERR(fwd_fdb)) {
720 rule = ERR_CAST(fwd_fdb);
724 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
725 for (i = 0; i < esw_attr->split_count; i++) {
726 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
727 /* Source port rewrite (forward to ovs internal port or statck device) isn't
728 * supported in the rule of split action.
732 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
736 goto err_chain_src_rewrite;
739 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
740 dest[i].ft = fwd_fdb;
743 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
744 esw_attr->in_mdev->priv.eswitch,
745 esw_attr->in_rep->vport);
747 if (attr->outer_match_level != MLX5_MATCH_NONE)
748 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
750 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
751 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
754 i = esw_attr->split_count;
755 goto err_chain_src_rewrite;
758 atomic64_inc(&esw->offloads.num_flows);
762 err_chain_src_rewrite:
763 mlx5_esw_vporttbl_put(esw, &fwd_attr);
765 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
772 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
773 struct mlx5_flow_handle *rule,
774 struct mlx5_flow_attr *attr,
777 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
778 struct mlx5_fs_chains *chains = esw_chains(esw);
779 bool split = (esw_attr->split_count > 0);
780 struct mlx5_vport_tbl_attr fwd_attr;
783 mlx5_del_flow_rules(rule);
785 if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
786 /* unref the term table */
787 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
788 if (esw_attr->dests[i].termtbl)
789 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
793 atomic64_dec(&esw->offloads.num_flows);
795 if (fwd_rule || split) {
796 fwd_attr.chain = attr->chain;
797 fwd_attr.prio = attr->prio;
798 fwd_attr.vport = esw_attr->in_rep->vport;
799 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
803 mlx5_esw_vporttbl_put(esw, &fwd_attr);
804 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
807 mlx5_esw_vporttbl_put(esw, &fwd_attr);
808 else if (attr->chain || attr->prio)
809 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
810 esw_cleanup_dests(esw, attr);
815 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
816 struct mlx5_flow_handle *rule,
817 struct mlx5_flow_attr *attr)
819 __mlx5_eswitch_del_rule(esw, rule, attr, false);
823 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
824 struct mlx5_flow_handle *rule,
825 struct mlx5_flow_attr *attr)
827 __mlx5_eswitch_del_rule(esw, rule, attr, true);
830 struct mlx5_flow_handle *
831 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
832 struct mlx5_eswitch *from_esw,
833 struct mlx5_eswitch_rep *rep,
836 struct mlx5_flow_act flow_act = {0};
837 struct mlx5_flow_destination dest = {};
838 struct mlx5_flow_handle *flow_rule;
839 struct mlx5_flow_spec *spec;
843 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
845 flow_rule = ERR_PTR(-ENOMEM);
849 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
850 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
852 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
853 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
855 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
857 /* source vport is the esw manager */
858 vport = from_esw->manager_vport;
860 if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
861 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
862 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
863 mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
865 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
866 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
867 mlx5_eswitch_get_vport_metadata_mask());
869 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
871 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
872 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
874 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
875 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
876 MLX5_CAP_GEN(from_esw->dev, vhca_id));
878 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
879 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
881 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
882 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
883 source_eswitch_owner_vhca_id);
885 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
888 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
889 dest.vport.num = rep->vport;
890 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
891 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
892 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
894 if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
895 rep->vport == MLX5_VPORT_UPLINK)
896 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
898 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
899 spec, &flow_act, &dest, 1);
900 if (IS_ERR(flow_rule))
901 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
907 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
909 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
911 mlx5_del_flow_rules(rule);
914 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
917 mlx5_del_flow_rules(rule);
920 struct mlx5_flow_handle *
921 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
923 struct mlx5_flow_destination dest = {};
924 struct mlx5_flow_act flow_act = {0};
925 struct mlx5_flow_handle *flow_rule;
926 struct mlx5_flow_spec *spec;
928 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
930 return ERR_PTR(-ENOMEM);
932 MLX5_SET(fte_match_param, spec->match_criteria,
933 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
934 MLX5_SET(fte_match_param, spec->match_criteria,
935 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
936 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
937 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
939 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
940 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
941 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
943 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
944 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
945 dest.vport.num = vport_num;
947 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
948 spec, &flow_act, &dest, 1);
949 if (IS_ERR(flow_rule))
950 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
951 vport_num, PTR_ERR(flow_rule));
957 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
959 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
960 MLX5_FDB_TO_VPORT_REG_C_1;
963 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
965 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
966 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
967 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
971 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
972 !mlx5_eswitch_vport_match_metadata_enabled(esw))
975 MLX5_SET(query_esw_vport_context_in, in, opcode,
976 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
977 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
981 curr = MLX5_GET(query_esw_vport_context_out, out,
982 esw_vport_context.fdb_to_vport_reg_c_id);
983 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
984 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
985 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
992 MLX5_SET(modify_esw_vport_context_in, min,
993 esw_vport_context.fdb_to_vport_reg_c_id, curr);
994 MLX5_SET(modify_esw_vport_context_in, min,
995 field_select.fdb_to_vport_reg_c_id, 1);
997 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
999 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1000 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1002 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1008 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1009 struct mlx5_core_dev *peer_dev,
1010 struct mlx5_flow_spec *spec,
1011 struct mlx5_flow_destination *dest)
1015 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1016 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1018 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1019 mlx5_eswitch_get_vport_metadata_mask());
1021 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1023 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1026 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1027 MLX5_CAP_GEN(peer_dev, vhca_id));
1029 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1031 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1033 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1034 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1035 source_eswitch_owner_vhca_id);
1038 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1039 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1040 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1041 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1044 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1045 struct mlx5_eswitch *peer_esw,
1046 struct mlx5_flow_spec *spec,
1051 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1052 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1054 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1055 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1058 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1060 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1064 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1065 struct mlx5_core_dev *peer_dev)
1067 struct mlx5_flow_destination dest = {};
1068 struct mlx5_flow_act flow_act = {0};
1069 struct mlx5_flow_handle **flows;
1070 /* total vports is the same for both e-switches */
1071 int nvports = esw->total_vports;
1072 struct mlx5_flow_handle *flow;
1073 struct mlx5_flow_spec *spec;
1074 struct mlx5_vport *vport;
1079 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1083 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1085 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1088 goto alloc_flows_err;
1091 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1092 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1095 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1096 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1097 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1098 spec, MLX5_VPORT_PF);
1100 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1101 spec, &flow_act, &dest, 1);
1103 err = PTR_ERR(flow);
1104 goto add_pf_flow_err;
1106 flows[vport->index] = flow;
1109 if (mlx5_ecpf_vport_exists(esw->dev)) {
1110 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1111 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1112 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1113 spec, &flow_act, &dest, 1);
1115 err = PTR_ERR(flow);
1116 goto add_ecpf_flow_err;
1118 flows[vport->index] = flow;
1121 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1122 esw_set_peer_miss_rule_source_port(esw,
1123 peer_dev->priv.eswitch,
1124 spec, vport->vport);
1126 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1127 spec, &flow_act, &dest, 1);
1129 err = PTR_ERR(flow);
1130 goto add_vf_flow_err;
1132 flows[vport->index] = flow;
1135 esw->fdb_table.offloads.peer_miss_rules = flows;
1141 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1142 if (!flows[vport->index])
1144 mlx5_del_flow_rules(flows[vport->index]);
1146 if (mlx5_ecpf_vport_exists(esw->dev)) {
1147 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1148 mlx5_del_flow_rules(flows[vport->index]);
1151 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1152 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1153 mlx5_del_flow_rules(flows[vport->index]);
1156 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1163 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1165 struct mlx5_flow_handle **flows;
1166 struct mlx5_vport *vport;
1169 flows = esw->fdb_table.offloads.peer_miss_rules;
1171 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1172 mlx5_del_flow_rules(flows[vport->index]);
1174 if (mlx5_ecpf_vport_exists(esw->dev)) {
1175 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1176 mlx5_del_flow_rules(flows[vport->index]);
1179 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1180 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1181 mlx5_del_flow_rules(flows[vport->index]);
1186 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1188 struct mlx5_flow_act flow_act = {0};
1189 struct mlx5_flow_destination dest = {};
1190 struct mlx5_flow_handle *flow_rule = NULL;
1191 struct mlx5_flow_spec *spec;
1198 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1204 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1205 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1207 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1208 outer_headers.dmac_47_16);
1211 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1212 dest.vport.num = esw->manager_vport;
1213 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1215 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1216 spec, &flow_act, &dest, 1);
1217 if (IS_ERR(flow_rule)) {
1218 err = PTR_ERR(flow_rule);
1219 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1223 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1225 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1227 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1228 outer_headers.dmac_47_16);
1230 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1231 spec, &flow_act, &dest, 1);
1232 if (IS_ERR(flow_rule)) {
1233 err = PTR_ERR(flow_rule);
1234 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1235 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1239 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1246 struct mlx5_flow_handle *
1247 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1249 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1250 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1251 struct mlx5_flow_context *flow_context;
1252 struct mlx5_flow_handle *flow_rule;
1253 struct mlx5_flow_destination dest;
1254 struct mlx5_flow_spec *spec;
1257 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1258 return ERR_PTR(-EOPNOTSUPP);
1260 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1262 return ERR_PTR(-ENOMEM);
1264 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1266 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1267 ESW_REG_C0_USER_DATA_METADATA_MASK);
1268 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1270 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1271 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1272 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1273 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1274 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1276 flow_context = &spec->flow_context;
1277 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1278 flow_context->flow_tag = tag;
1279 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1280 dest.ft = esw->offloads.ft_offloads;
1282 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1285 if (IS_ERR(flow_rule))
1287 "Failed to create restore rule for tag: %d, err(%d)\n",
1288 tag, (int)PTR_ERR(flow_rule));
1293 #define MAX_PF_SQ 256
1294 #define MAX_SQ_NVPORTS 32
1297 mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1301 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1305 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1306 MLX5_SET(create_flow_group_in, flow_group_in,
1307 match_criteria_enable,
1308 MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
1310 MLX5_SET(fte_match_param, match_criteria,
1311 misc_parameters_2.metadata_reg_c_0,
1312 mlx5_eswitch_get_vport_metadata_mask());
1314 MLX5_SET(create_flow_group_in, flow_group_in,
1315 match_criteria_enable,
1316 MLX5_MATCH_MISC_PARAMETERS | match_params);
1318 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1319 misc_parameters.source_port);
1323 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1324 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1326 struct mlx5_vport_tbl_attr attr;
1327 struct mlx5_vport *vport;
1332 mlx5_esw_for_each_vport(esw, i, vport) {
1333 attr.vport = vport->vport;
1334 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1335 mlx5_esw_vporttbl_put(esw, &attr);
1339 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1341 struct mlx5_vport_tbl_attr attr;
1342 struct mlx5_flow_table *fdb;
1343 struct mlx5_vport *vport;
1348 mlx5_esw_for_each_vport(esw, i, vport) {
1349 attr.vport = vport->vport;
1350 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1351 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1358 esw_vport_tbl_put(esw);
1359 return PTR_ERR(fdb);
1362 #define fdb_modify_header_fwd_to_table_supported(esw) \
1363 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1364 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1366 struct mlx5_core_dev *dev = esw->dev;
1368 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1369 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1371 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1372 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1373 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1374 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1375 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1376 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1377 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1378 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1379 /* Disabled when ttl workaround is needed, e.g
1380 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1383 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1384 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1386 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1387 esw_info(dev, "Supported tc chains and prios offload\n");
1390 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1391 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1395 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1397 struct mlx5_core_dev *dev = esw->dev;
1398 struct mlx5_flow_table *nf_ft, *ft;
1399 struct mlx5_chains_attr attr = {};
1400 struct mlx5_fs_chains *chains;
1403 esw_init_chains_offload_flags(esw, &attr.flags);
1404 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1405 attr.fs_base_prio = FDB_TC_OFFLOAD;
1406 attr.max_grp_num = esw->params.large_group_num;
1407 attr.default_ft = miss_fdb;
1408 attr.mapping = esw->offloads.reg_c0_obj_pool;
1410 chains = mlx5_chains_create(dev, &attr);
1411 if (IS_ERR(chains)) {
1412 err = PTR_ERR(chains);
1413 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1416 mlx5_chains_print_info(chains);
1418 esw->fdb_table.offloads.esw_chains_priv = chains;
1420 /* Create tc_end_ft which is the always created ft chain */
1421 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1423 if (IS_ERR(nf_ft)) {
1424 err = PTR_ERR(nf_ft);
1428 /* Always open the root for fast path */
1429 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1435 /* Open level 1 for split fdb rules now if prios isn't supported */
1436 if (!mlx5_chains_prios_supported(chains)) {
1437 err = esw_vport_tbl_get(esw);
1442 mlx5_chains_set_end_ft(chains, nf_ft);
1447 mlx5_chains_put_table(chains, 0, 1, 0);
1449 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1451 mlx5_chains_destroy(chains);
1452 esw->fdb_table.offloads.esw_chains_priv = NULL;
1458 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1460 if (!mlx5_chains_prios_supported(chains))
1461 esw_vport_tbl_put(esw);
1462 mlx5_chains_put_table(chains, 0, 1, 0);
1463 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1464 mlx5_chains_destroy(chains);
1467 #else /* CONFIG_MLX5_CLS_ACT */
1470 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1474 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1480 esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
1481 struct mlx5_flow_table *fdb,
1485 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1486 struct mlx5_flow_group *g;
1487 void *match_criteria;
1490 memset(flow_group_in, 0, inlen);
1492 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
1494 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1495 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1497 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1498 MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1499 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1500 misc_parameters.source_eswitch_owner_vhca_id);
1501 MLX5_SET(create_flow_group_in, flow_group_in,
1502 source_eswitch_owner_vhca_id_valid, 1);
1505 /* See comment at table_size calculation */
1506 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1507 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1508 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
1511 g = mlx5_create_flow_group(fdb, flow_group_in);
1514 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1517 esw->fdb_table.offloads.send_to_vport_grp = g;
1524 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
1525 struct mlx5_flow_table *fdb,
1529 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1530 struct mlx5_flow_group *g;
1531 void *match_criteria;
1534 if (!esw_src_port_rewrite_supported(esw))
1537 memset(flow_group_in, 0, inlen);
1539 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1540 MLX5_MATCH_MISC_PARAMETERS_2);
1542 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1544 MLX5_SET(fte_match_param, match_criteria,
1545 misc_parameters_2.metadata_reg_c_0,
1546 mlx5_eswitch_get_vport_metadata_mask());
1547 MLX5_SET(fte_match_param, match_criteria,
1548 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1550 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1551 MLX5_SET(create_flow_group_in, flow_group_in,
1552 end_flow_index, *ix + esw->total_vports - 1);
1553 *ix += esw->total_vports;
1555 g = mlx5_create_flow_group(fdb, flow_group_in);
1559 "Failed to create send-to-vport meta flow group err(%d)\n", err);
1560 goto send_vport_meta_err;
1562 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1566 send_vport_meta_err:
1571 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
1572 struct mlx5_flow_table *fdb,
1576 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1577 struct mlx5_flow_group *g;
1578 void *match_criteria;
1581 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1584 memset(flow_group_in, 0, inlen);
1586 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1588 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1589 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1593 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1594 misc_parameters.source_eswitch_owner_vhca_id);
1596 MLX5_SET(create_flow_group_in, flow_group_in,
1597 source_eswitch_owner_vhca_id_valid, 1);
1600 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1601 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1602 *ix + esw->total_vports - 1);
1603 *ix += esw->total_vports;
1605 g = mlx5_create_flow_group(fdb, flow_group_in);
1608 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
1611 esw->fdb_table.offloads.peer_miss_grp = g;
1618 esw_create_miss_group(struct mlx5_eswitch *esw,
1619 struct mlx5_flow_table *fdb,
1623 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1624 struct mlx5_flow_group *g;
1625 void *match_criteria;
1629 memset(flow_group_in, 0, inlen);
1631 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1632 MLX5_MATCH_OUTER_HEADERS);
1633 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1635 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1636 outer_headers.dmac_47_16);
1639 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1640 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1641 *ix + MLX5_ESW_MISS_FLOWS);
1643 g = mlx5_create_flow_group(fdb, flow_group_in);
1646 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
1649 esw->fdb_table.offloads.miss_grp = g;
1651 err = esw_add_fdb_miss_rule(esw);
1658 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1663 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1665 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1666 struct mlx5_flow_table_attr ft_attr = {};
1667 struct mlx5_core_dev *dev = esw->dev;
1668 struct mlx5_flow_namespace *root_ns;
1669 struct mlx5_flow_table *fdb = NULL;
1670 int table_size, ix = 0, err = 0;
1671 u32 flags = 0, *flow_group_in;
1673 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1675 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1679 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1681 esw_warn(dev, "Failed to get FDB flow namespace\n");
1685 esw->fdb_table.offloads.ns = root_ns;
1686 err = mlx5_flow_namespace_set_mode(root_ns,
1687 esw->dev->priv.steering->mode);
1689 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1693 /* To be strictly correct:
1694 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1696 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1697 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1698 * but as the peer device might not be in switchdev mode it's not
1699 * possible. We use the fact that by default FW sets max vfs and max sfs
1700 * to the same value on both devices. If it needs to be changed in the future note
1701 * the peer miss group should also be created based on the number of
1702 * total vports of the peer (currently is also uses esw->total_vports).
1704 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1705 esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS;
1707 /* create the slow path fdb with encap set, so further table instances
1708 * can be created at run time while VFs are probed if the FW allows that.
1710 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1711 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1712 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1714 ft_attr.flags = flags;
1715 ft_attr.max_fte = table_size;
1716 ft_attr.prio = FDB_SLOW_PATH;
1718 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1721 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1724 esw->fdb_table.offloads.slow_fdb = fdb;
1726 /* Create empty TC-miss managed table. This allows plugging in following
1727 * priorities without directly exposing their level 0 table to
1728 * eswitch_offloads and passing it as miss_fdb to following call to
1729 * esw_chains_create().
1731 memset(&ft_attr, 0, sizeof(ft_attr));
1732 ft_attr.prio = FDB_TC_MISS;
1733 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1734 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1735 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1736 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1737 goto tc_miss_table_err;
1740 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1742 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1743 goto fdb_chains_err;
1746 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1748 goto send_vport_err;
1750 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1752 goto send_vport_meta_err;
1754 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
1758 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
1762 kvfree(flow_group_in);
1766 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1767 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1769 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1770 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1771 send_vport_meta_err:
1772 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1774 esw_chains_destroy(esw, esw_chains(esw));
1776 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1778 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1780 /* Holds true only as long as DMFS is the default */
1781 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1783 kvfree(flow_group_in);
1787 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1789 if (!mlx5_eswitch_get_slow_fdb(esw))
1792 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1793 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1794 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1795 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1796 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1797 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1798 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1799 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1800 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1802 esw_chains_destroy(esw, esw_chains(esw));
1804 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1805 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1806 /* Holds true only as long as DMFS is the default */
1807 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1808 MLX5_FLOW_STEERING_MODE_DMFS);
1809 atomic64_set(&esw->user_count, 0);
1812 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
1816 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1817 if (mlx5e_tc_int_port_supported(esw))
1818 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1823 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1825 struct mlx5_flow_table_attr ft_attr = {};
1826 struct mlx5_core_dev *dev = esw->dev;
1827 struct mlx5_flow_table *ft_offloads;
1828 struct mlx5_flow_namespace *ns;
1831 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1833 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1837 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
1838 MLX5_ESW_FT_OFFLOADS_DROP_RULE;
1841 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1842 if (IS_ERR(ft_offloads)) {
1843 err = PTR_ERR(ft_offloads);
1844 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1848 esw->offloads.ft_offloads = ft_offloads;
1852 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1854 struct mlx5_esw_offload *offloads = &esw->offloads;
1856 mlx5_destroy_flow_table(offloads->ft_offloads);
1859 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1861 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1862 struct mlx5_flow_group *g;
1867 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
1868 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1872 /* create vport rx group */
1873 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1875 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1876 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1878 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1882 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1886 esw->offloads.vport_rx_group = g;
1888 kvfree(flow_group_in);
1892 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1894 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1897 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
1899 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
1900 * for the drop rule, which is placed at the end of the table.
1901 * So return the total of vport and int_port as rule index.
1903 return esw_get_nr_ft_offloads_steering_src_ports(esw);
1906 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
1908 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1909 struct mlx5_flow_group *g;
1914 flow_index = esw_create_vport_rx_drop_rule_index(esw);
1916 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1920 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1921 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1923 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1927 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
1931 esw->offloads.vport_rx_drop_group = g;
1933 kvfree(flow_group_in);
1937 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
1939 if (esw->offloads.vport_rx_drop_group)
1940 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
1944 mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
1946 struct mlx5_flow_spec *spec)
1950 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1951 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1952 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1953 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1955 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1956 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1957 mlx5_eswitch_get_vport_metadata_mask());
1959 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1961 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1962 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1964 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1965 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1967 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1971 struct mlx5_flow_handle *
1972 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1973 struct mlx5_flow_destination *dest)
1975 struct mlx5_flow_act flow_act = {0};
1976 struct mlx5_flow_handle *flow_rule;
1977 struct mlx5_flow_spec *spec;
1979 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1981 flow_rule = ERR_PTR(-ENOMEM);
1985 mlx5_esw_set_spec_source_port(esw, vport, spec);
1987 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1988 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1989 &flow_act, dest, 1);
1990 if (IS_ERR(flow_rule)) {
1991 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
2000 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2002 struct mlx5_flow_act flow_act = {};
2003 struct mlx5_flow_handle *flow_rule;
2005 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2006 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
2007 &flow_act, NULL, 0);
2008 if (IS_ERR(flow_rule)) {
2010 "fs offloads: Failed to add vport rx drop rule err %ld\n",
2011 PTR_ERR(flow_rule));
2012 return PTR_ERR(flow_rule);
2015 esw->offloads.vport_rx_drop_rule = flow_rule;
2020 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2022 if (esw->offloads.vport_rx_drop_rule)
2023 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
2026 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2028 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2029 struct mlx5_core_dev *dev = esw->dev;
2030 struct mlx5_vport *vport;
2033 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2036 if (!mlx5_esw_is_fdb_created(esw))
2039 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2040 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2041 mlx5_mode = MLX5_INLINE_MODE_NONE;
2043 case MLX5_CAP_INLINE_MODE_L2:
2044 mlx5_mode = MLX5_INLINE_MODE_L2;
2046 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2051 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2052 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2053 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2054 if (prev_mlx5_mode != mlx5_mode)
2056 prev_mlx5_mode = mlx5_mode;
2064 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2066 struct mlx5_esw_offload *offloads = &esw->offloads;
2068 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2071 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2072 mlx5_destroy_flow_group(offloads->restore_group);
2073 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2076 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2078 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2079 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2080 struct mlx5_flow_table_attr ft_attr = {};
2081 struct mlx5_core_dev *dev = esw->dev;
2082 struct mlx5_flow_namespace *ns;
2083 struct mlx5_modify_hdr *mod_hdr;
2084 void *match_criteria, *misc;
2085 struct mlx5_flow_table *ft;
2086 struct mlx5_flow_group *g;
2090 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2093 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2095 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2099 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2100 if (!flow_group_in) {
2105 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2106 ft = mlx5_create_flow_table(ns, &ft_attr);
2109 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2114 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2116 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2119 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2120 ESW_REG_C0_USER_DATA_METADATA_MASK);
2121 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2122 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2123 ft_attr.max_fte - 1);
2124 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2125 MLX5_MATCH_MISC_PARAMETERS_2);
2126 g = mlx5_create_flow_group(ft, flow_group_in);
2129 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2134 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2135 MLX5_SET(copy_action_in, modact, src_field,
2136 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2137 MLX5_SET(copy_action_in, modact, dst_field,
2138 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2139 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2140 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2142 if (IS_ERR(mod_hdr)) {
2143 err = PTR_ERR(mod_hdr);
2144 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2149 esw->offloads.ft_offloads_restore = ft;
2150 esw->offloads.restore_group = g;
2151 esw->offloads.restore_copy_hdr_id = mod_hdr;
2153 kvfree(flow_group_in);
2158 mlx5_destroy_flow_group(g);
2160 mlx5_destroy_flow_table(ft);
2162 kvfree(flow_group_in);
2167 static int esw_offloads_start(struct mlx5_eswitch *esw,
2168 struct netlink_ext_ack *extack)
2172 esw->mode = MLX5_ESWITCH_OFFLOADS;
2173 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
2175 NL_SET_ERR_MSG_MOD(extack,
2176 "Failed setting eswitch to offloads");
2177 esw->mode = MLX5_ESWITCH_LEGACY;
2178 mlx5_rescan_drivers(esw->dev);
2180 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2181 if (mlx5_eswitch_inline_mode_get(esw,
2182 &esw->offloads.inline_mode)) {
2183 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2184 NL_SET_ERR_MSG_MOD(extack,
2185 "Inline mode is different between vports");
2191 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
2192 struct mlx5_eswitch_rep *rep,
2197 /* Copy the mark from vport to its rep */
2198 mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
2200 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
2203 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2205 struct mlx5_eswitch_rep *rep;
2209 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2213 rep->vport = vport->vport;
2214 rep->vport_index = vport->index;
2215 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2216 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2218 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2222 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
2223 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
2224 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
2232 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2233 struct mlx5_eswitch_rep *rep)
2235 xa_erase(&esw->offloads.vport_reps, rep->vport);
2239 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2241 struct mlx5_eswitch_rep *rep;
2244 mlx5_esw_for_each_rep(esw, i, rep)
2245 mlx5_esw_offloads_rep_cleanup(esw, rep);
2246 xa_destroy(&esw->offloads.vport_reps);
2249 static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2251 struct mlx5_vport *vport;
2255 xa_init(&esw->offloads.vport_reps);
2257 mlx5_esw_for_each_vport(esw, i, vport) {
2258 err = mlx5_esw_offloads_rep_init(esw, vport);
2265 esw_offloads_cleanup_reps(esw);
2269 static int esw_port_metadata_set(struct devlink *devlink, u32 id,
2270 struct devlink_param_gset_ctx *ctx)
2272 struct mlx5_core_dev *dev = devlink_priv(devlink);
2273 struct mlx5_eswitch *esw = dev->priv.eswitch;
2276 down_write(&esw->mode_lock);
2277 if (mlx5_esw_is_fdb_created(esw)) {
2281 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2286 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2288 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2290 up_write(&esw->mode_lock);
2294 static int esw_port_metadata_get(struct devlink *devlink, u32 id,
2295 struct devlink_param_gset_ctx *ctx)
2297 struct mlx5_core_dev *dev = devlink_priv(devlink);
2299 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
2303 static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
2304 union devlink_param_value val,
2305 struct netlink_ext_ack *extack)
2307 struct mlx5_core_dev *dev = devlink_priv(devlink);
2310 esw_mode = mlx5_eswitch_mode(dev);
2311 if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
2312 NL_SET_ERR_MSG_MOD(extack,
2313 "E-Switch must either disabled or non switchdev mode");
2319 static const struct devlink_param esw_devlink_params[] = {
2320 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
2321 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
2322 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2323 esw_port_metadata_get,
2324 esw_port_metadata_set,
2325 esw_port_metadata_validate),
2328 int esw_offloads_init(struct mlx5_eswitch *esw)
2332 err = esw_offloads_init_reps(esw);
2336 err = devl_params_register(priv_to_devlink(esw->dev),
2338 ARRAY_SIZE(esw_devlink_params));
2345 esw_offloads_cleanup_reps(esw);
2349 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2351 devl_params_unregister(priv_to_devlink(esw->dev),
2353 ARRAY_SIZE(esw_devlink_params));
2354 esw_offloads_cleanup_reps(esw);
2357 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2358 struct mlx5_eswitch_rep *rep, u8 rep_type)
2360 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2361 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2362 esw->offloads.rep_ops[rep_type]->unload(rep);
2365 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2367 struct mlx5_eswitch_rep *rep;
2370 mlx5_esw_for_each_sf_rep(esw, i, rep)
2371 __esw_offloads_unload_rep(esw, rep, rep_type);
2374 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2376 struct mlx5_eswitch_rep *rep;
2379 __unload_reps_sf_vport(esw, rep_type);
2381 mlx5_esw_for_each_vf_rep(esw, i, rep)
2382 __esw_offloads_unload_rep(esw, rep, rep_type);
2384 if (mlx5_ecpf_vport_exists(esw->dev)) {
2385 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2386 __esw_offloads_unload_rep(esw, rep, rep_type);
2389 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2390 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2391 __esw_offloads_unload_rep(esw, rep, rep_type);
2394 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2395 __esw_offloads_unload_rep(esw, rep, rep_type);
2398 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2400 struct mlx5_eswitch_rep *rep;
2404 rep = mlx5_eswitch_get_rep(esw, vport_num);
2405 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2406 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2407 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2408 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2416 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2417 for (--rep_type; rep_type >= 0; rep_type--)
2418 __esw_offloads_unload_rep(esw, rep, rep_type);
2422 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2424 struct mlx5_eswitch_rep *rep;
2427 rep = mlx5_eswitch_get_rep(esw, vport_num);
2428 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2429 __esw_offloads_unload_rep(esw, rep, rep_type);
2432 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2436 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2439 if (vport_num != MLX5_VPORT_UPLINK) {
2440 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2445 err = mlx5_esw_offloads_rep_load(esw, vport_num);
2451 if (vport_num != MLX5_VPORT_UPLINK)
2452 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2456 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2458 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2461 mlx5_esw_offloads_rep_unload(esw, vport_num);
2463 if (vport_num != MLX5_VPORT_UPLINK)
2464 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2467 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2468 struct mlx5_core_dev *slave)
2470 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2471 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2472 struct mlx5_flow_root_namespace *root;
2473 struct mlx5_flow_namespace *ns;
2476 MLX5_SET(set_flow_table_root_in, in, opcode,
2477 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2478 MLX5_SET(set_flow_table_root_in, in, table_type,
2482 ns = mlx5_get_flow_namespace(master,
2483 MLX5_FLOW_NAMESPACE_FDB);
2484 root = find_root(&ns->node);
2485 mutex_lock(&root->chain_lock);
2486 MLX5_SET(set_flow_table_root_in, in,
2487 table_eswitch_owner_vhca_id_valid, 1);
2488 MLX5_SET(set_flow_table_root_in, in,
2489 table_eswitch_owner_vhca_id,
2490 MLX5_CAP_GEN(master, vhca_id));
2491 MLX5_SET(set_flow_table_root_in, in, table_id,
2494 ns = mlx5_get_flow_namespace(slave,
2495 MLX5_FLOW_NAMESPACE_FDB);
2496 root = find_root(&ns->node);
2497 mutex_lock(&root->chain_lock);
2498 MLX5_SET(set_flow_table_root_in, in, table_id,
2502 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2503 mutex_unlock(&root->chain_lock);
2508 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2509 struct mlx5_core_dev *slave,
2510 struct mlx5_vport *vport,
2511 struct mlx5_flow_table *acl)
2513 struct mlx5_flow_handle *flow_rule = NULL;
2514 struct mlx5_flow_destination dest = {};
2515 struct mlx5_flow_act flow_act = {};
2516 struct mlx5_flow_spec *spec;
2520 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2524 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2525 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2527 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2528 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
2529 MLX5_CAP_GEN(slave, vhca_id));
2531 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2532 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2533 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2534 source_eswitch_owner_vhca_id);
2536 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2537 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2538 dest.vport.num = slave->priv.eswitch->manager_vport;
2539 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2540 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2542 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2544 if (IS_ERR(flow_rule))
2545 err = PTR_ERR(flow_rule);
2547 vport->egress.offloads.bounce_rule = flow_rule;
2553 static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2554 struct mlx5_core_dev *slave)
2556 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2557 struct mlx5_eswitch *esw = master->priv.eswitch;
2558 struct mlx5_flow_table_attr ft_attr = {
2559 .max_fte = 1, .prio = 0, .level = 0,
2560 .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
2562 struct mlx5_flow_namespace *egress_ns;
2563 struct mlx5_flow_table *acl;
2564 struct mlx5_flow_group *g;
2565 struct mlx5_vport *vport;
2566 void *match_criteria;
2570 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2572 return PTR_ERR(vport);
2574 egress_ns = mlx5_get_flow_vport_acl_namespace(master,
2575 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2580 if (vport->egress.acl)
2583 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2587 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2593 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2595 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2596 misc_parameters.source_port);
2597 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2598 misc_parameters.source_eswitch_owner_vhca_id);
2599 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2600 MLX5_MATCH_MISC_PARAMETERS);
2602 MLX5_SET(create_flow_group_in, flow_group_in,
2603 source_eswitch_owner_vhca_id_valid, 1);
2604 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2605 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
2607 g = mlx5_create_flow_group(acl, flow_group_in);
2613 err = __esw_set_master_egress_rule(master, slave, vport, acl);
2617 vport->egress.acl = acl;
2618 vport->egress.offloads.bounce_grp = g;
2620 kvfree(flow_group_in);
2625 mlx5_destroy_flow_group(g);
2627 mlx5_destroy_flow_table(acl);
2629 kvfree(flow_group_in);
2633 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev)
2635 struct mlx5_vport *vport;
2637 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2638 dev->priv.eswitch->manager_vport);
2640 esw_acl_egress_ofld_cleanup(vport);
2643 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
2644 struct mlx5_eswitch *slave_esw)
2648 err = esw_set_slave_root_fdb(master_esw->dev,
2653 err = esw_set_master_egress_rule(master_esw->dev,
2661 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2666 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
2667 struct mlx5_eswitch *slave_esw)
2669 esw_unset_master_egress_rule(master_esw->dev);
2670 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2673 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2674 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2676 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
2677 struct mlx5_eswitch *peer_esw)
2679 const struct mlx5_eswitch_rep_ops *ops;
2680 struct mlx5_eswitch_rep *rep;
2684 mlx5_esw_for_each_rep(esw, i, rep) {
2685 rep_type = NUM_REP_TYPES;
2686 while (rep_type--) {
2687 ops = esw->offloads.rep_ops[rep_type];
2688 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2690 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
2695 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
2696 struct mlx5_eswitch *peer_esw)
2698 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2699 mlx5e_tc_clean_fdb_peer_flows(esw);
2701 mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
2702 esw_del_fdb_peer_miss_rules(esw);
2705 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2706 struct mlx5_eswitch *peer_esw)
2708 const struct mlx5_eswitch_rep_ops *ops;
2709 struct mlx5_eswitch_rep *rep;
2714 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2718 mlx5_esw_for_each_rep(esw, i, rep) {
2719 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2720 ops = esw->offloads.rep_ops[rep_type];
2721 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2723 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2733 mlx5_esw_offloads_unpair(esw, peer_esw);
2737 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2738 struct mlx5_eswitch *peer_esw,
2741 struct mlx5_flow_root_namespace *peer_ns;
2742 struct mlx5_flow_root_namespace *ns;
2745 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2746 ns = esw->dev->priv.steering->fdb_root_ns;
2749 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2753 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
2755 mlx5_flow_namespace_set_peer(ns, NULL);
2759 mlx5_flow_namespace_set_peer(ns, NULL);
2760 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2766 static int mlx5_esw_offloads_devcom_event(int event,
2770 struct mlx5_eswitch *esw = my_data;
2771 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2772 struct mlx5_eswitch *peer_esw = event_data;
2776 case ESW_OFFLOADS_DEVCOM_PAIR:
2777 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2778 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2781 if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
2784 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2787 err = mlx5_esw_offloads_pair(esw, peer_esw);
2791 err = mlx5_esw_offloads_pair(peer_esw, esw);
2795 esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
2796 peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
2797 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2800 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2801 if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
2804 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2805 esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
2806 peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
2807 mlx5_esw_offloads_unpair(peer_esw, esw);
2808 mlx5_esw_offloads_unpair(esw, peer_esw);
2809 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2816 mlx5_esw_offloads_unpair(esw, peer_esw);
2818 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2820 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2825 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2827 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2829 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2830 mutex_init(&esw->offloads.peer_mutex);
2832 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2835 if (!mlx5_is_lag_supported(esw->dev))
2838 mlx5_devcom_register_component(devcom,
2839 MLX5_DEVCOM_ESW_OFFLOADS,
2840 mlx5_esw_offloads_devcom_event,
2843 mlx5_devcom_send_event(devcom,
2844 MLX5_DEVCOM_ESW_OFFLOADS,
2845 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2848 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2850 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2852 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2855 if (!mlx5_is_lag_supported(esw->dev))
2858 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2859 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2861 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2864 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2866 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2869 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2870 MLX5_FDB_TO_VPORT_REG_C_0))
2876 #define MLX5_ESW_METADATA_RSVD_UPLINK 1
2878 /* Share the same metadata for uplink's. This is fine because:
2879 * (a) In shared FDB mode (LAG) both uplink's are treated the
2880 * same and tagged with the same metadata.
2881 * (b) In non shared FDB mode, packets from physical port0
2882 * cannot hit eswitch of PF1 and vice versa.
2884 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
2886 return MLX5_ESW_METADATA_RSVD_UPLINK;
2889 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2891 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2892 /* Reserve 0xf for internal port offload */
2893 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
2897 /* Only 4 bits of pf_num */
2898 pf_num = mlx5_get_dev_index(esw->dev);
2899 if (pf_num > max_pf_num)
2902 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2903 /* Use only non-zero vport_id (2-4095) for all PF's */
2904 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
2905 MLX5_ESW_METADATA_RSVD_UPLINK + 1,
2906 vport_end_ida, GFP_KERNEL);
2909 id = (pf_num << ESW_VPORT_BITS) | id;
2913 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2915 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2917 /* Metadata contains only 12 bits of actual ida id */
2918 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2921 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2922 struct mlx5_vport *vport)
2924 if (vport->vport == MLX5_VPORT_UPLINK)
2925 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
2927 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2929 vport->metadata = vport->default_metadata;
2930 return vport->metadata ? 0 : -ENOSPC;
2933 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2934 struct mlx5_vport *vport)
2936 if (!vport->default_metadata)
2939 if (vport->vport == MLX5_VPORT_UPLINK)
2942 WARN_ON(vport->metadata != vport->default_metadata);
2943 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2946 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2948 struct mlx5_vport *vport;
2951 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2954 mlx5_esw_for_each_vport(esw, i, vport)
2955 esw_offloads_vport_metadata_cleanup(esw, vport);
2958 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2960 struct mlx5_vport *vport;
2964 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2967 mlx5_esw_for_each_vport(esw, i, vport) {
2968 err = esw_offloads_vport_metadata_setup(esw, vport);
2976 esw_offloads_metadata_uninit(esw);
2981 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2982 struct mlx5_vport *vport)
2986 err = esw_acl_ingress_ofld_setup(esw, vport);
2990 err = esw_acl_egress_ofld_setup(esw, vport);
2997 esw_acl_ingress_ofld_cleanup(esw, vport);
3002 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
3003 struct mlx5_vport *vport)
3005 esw_acl_egress_ofld_cleanup(vport);
3006 esw_acl_ingress_ofld_cleanup(esw, vport);
3009 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
3011 struct mlx5_vport *vport;
3013 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3015 return PTR_ERR(vport);
3017 return esw_vport_create_offloads_acl_tables(esw, vport);
3020 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
3022 struct mlx5_vport *vport;
3024 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3028 esw_vport_destroy_offloads_acl_tables(esw, vport);
3031 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
3033 struct mlx5_eswitch_rep *rep;
3037 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3040 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3041 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3044 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3048 mlx5_esw_for_each_rep(esw, i, rep) {
3049 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3050 mlx5_esw_offloads_rep_load(esw, rep->vport);
3056 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3058 struct mlx5_esw_indir_table *indir;
3061 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3062 mutex_init(&esw->fdb_table.offloads.vports.lock);
3063 hash_init(esw->fdb_table.offloads.vports.table);
3064 atomic64_set(&esw->user_count, 0);
3066 indir = mlx5_esw_indir_table_init();
3067 if (IS_ERR(indir)) {
3068 err = PTR_ERR(indir);
3069 goto create_indir_err;
3071 esw->fdb_table.offloads.indir = indir;
3073 err = esw_create_uplink_offloads_acl_tables(esw);
3075 goto create_acl_err;
3077 err = esw_create_offloads_table(esw);
3079 goto create_offloads_err;
3081 err = esw_create_restore_table(esw);
3083 goto create_restore_err;
3085 err = esw_create_offloads_fdb_tables(esw);
3087 goto create_fdb_err;
3089 err = esw_create_vport_rx_group(esw);
3093 err = esw_create_vport_rx_drop_group(esw);
3095 goto create_rx_drop_fg_err;
3097 err = esw_create_vport_rx_drop_rule(esw);
3099 goto create_rx_drop_rule_err;
3103 create_rx_drop_rule_err:
3104 esw_destroy_vport_rx_drop_group(esw);
3105 create_rx_drop_fg_err:
3106 esw_destroy_vport_rx_group(esw);
3108 esw_destroy_offloads_fdb_tables(esw);
3110 esw_destroy_restore_table(esw);
3112 esw_destroy_offloads_table(esw);
3113 create_offloads_err:
3114 esw_destroy_uplink_offloads_acl_tables(esw);
3116 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3118 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3122 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3124 esw_destroy_vport_rx_drop_rule(esw);
3125 esw_destroy_vport_rx_drop_group(esw);
3126 esw_destroy_vport_rx_group(esw);
3127 esw_destroy_offloads_fdb_tables(esw);
3128 esw_destroy_restore_table(esw);
3129 esw_destroy_offloads_table(esw);
3130 esw_destroy_uplink_offloads_acl_tables(esw);
3131 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3132 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3136 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3138 struct devlink *devlink;
3139 bool host_pf_disabled;
3142 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3143 host_params_context.host_num_of_vfs);
3144 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3145 host_params_context.host_pf_disabled);
3147 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3150 devlink = priv_to_devlink(esw->dev);
3152 /* Number of VFs can only change from "0 to x" or "x to 0". */
3153 if (esw->esw_funcs.num_vfs > 0) {
3154 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3158 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3159 MLX5_VPORT_UC_ADDR_CHANGE);
3161 devl_unlock(devlink);
3165 esw->esw_funcs.num_vfs = new_num_vfs;
3166 devl_unlock(devlink);
3169 static void esw_functions_changed_event_handler(struct work_struct *work)
3171 struct mlx5_host_work *host_work;
3172 struct mlx5_eswitch *esw;
3175 host_work = container_of(work, struct mlx5_host_work, work);
3176 esw = host_work->esw;
3178 out = mlx5_esw_query_functions(esw->dev);
3182 esw_vfs_changed_event_handler(esw, out);
3188 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3190 struct mlx5_esw_functions *esw_funcs;
3191 struct mlx5_host_work *host_work;
3192 struct mlx5_eswitch *esw;
3194 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3198 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3199 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3201 host_work->esw = esw;
3203 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3204 queue_work(esw->work_queue, &host_work->work);
3209 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3211 const u32 *query_host_out;
3213 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3216 query_host_out = mlx5_esw_query_functions(esw->dev);
3217 if (IS_ERR(query_host_out))
3218 return PTR_ERR(query_host_out);
3220 /* Mark non local controller with non zero controller number. */
3221 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3222 host_params_context.host_number);
3223 kvfree(query_host_out);
3227 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3229 /* Local controller is always valid */
3230 if (controller == 0)
3233 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3236 /* External host number starts with zero in device */
3237 return (controller == esw->offloads.host_number + 1);
3240 int esw_offloads_enable(struct mlx5_eswitch *esw)
3242 struct mapping_ctx *reg_c0_obj_pool;
3243 struct mlx5_vport *vport;
3248 mutex_init(&esw->offloads.termtbl_mutex);
3249 mlx5_rdma_enable_roce(esw->dev);
3251 err = mlx5_esw_host_number_init(esw);
3255 err = esw_offloads_metadata_init(esw);
3259 err = esw_set_passing_vport_metadata(esw, true);
3261 goto err_vport_metadata;
3263 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3265 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
3266 sizeof(struct mlx5_mapped_obj),
3267 ESW_REG_C0_USER_DATA_METADATA_MASK,
3270 if (IS_ERR(reg_c0_obj_pool)) {
3271 err = PTR_ERR(reg_c0_obj_pool);
3274 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3276 err = esw_offloads_steering_init(esw);
3278 goto err_steering_init;
3280 /* Representor will control the vport link state */
3281 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3282 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3284 /* Uplink vport rep must load first. */
3285 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
3289 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3296 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3298 esw_offloads_steering_cleanup(esw);
3300 mapping_destroy(reg_c0_obj_pool);
3302 esw_set_passing_vport_metadata(esw, false);
3304 esw_offloads_metadata_uninit(esw);
3306 mlx5_rdma_disable_roce(esw->dev);
3307 mutex_destroy(&esw->offloads.termtbl_mutex);
3311 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3312 struct netlink_ext_ack *extack)
3316 esw->mode = MLX5_ESWITCH_LEGACY;
3318 /* If changing from switchdev to legacy mode without sriov enabled,
3319 * no need to create legacy fdb.
3321 if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
3324 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3326 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3331 void esw_offloads_disable(struct mlx5_eswitch *esw)
3333 mlx5_eswitch_disable_pf_vf_vports(esw);
3334 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3335 esw_set_passing_vport_metadata(esw, false);
3336 esw_offloads_steering_cleanup(esw);
3337 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3338 esw_offloads_metadata_uninit(esw);
3339 mlx5_rdma_disable_roce(esw->dev);
3340 mutex_destroy(&esw->offloads.termtbl_mutex);
3343 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3346 case DEVLINK_ESWITCH_MODE_LEGACY:
3347 *mlx5_mode = MLX5_ESWITCH_LEGACY;
3349 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3350 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3359 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3361 switch (mlx5_mode) {
3362 case MLX5_ESWITCH_LEGACY:
3363 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3365 case MLX5_ESWITCH_OFFLOADS:
3366 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3375 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3378 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3379 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3381 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3382 *mlx5_mode = MLX5_INLINE_MODE_L2;
3384 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3385 *mlx5_mode = MLX5_INLINE_MODE_IP;
3387 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3388 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3397 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3399 switch (mlx5_mode) {
3400 case MLX5_INLINE_MODE_NONE:
3401 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3403 case MLX5_INLINE_MODE_L2:
3404 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3406 case MLX5_INLINE_MODE_IP:
3407 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3409 case MLX5_INLINE_MODE_TCP_UDP:
3410 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3419 static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
3421 struct net *devl_net, *netdev_net;
3422 struct mlx5_eswitch *esw;
3424 esw = mlx5_devlink_eswitch_get(devlink);
3425 netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
3426 devl_net = devlink_net(devlink);
3428 return net_eq(devl_net, netdev_net);
3431 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3432 struct netlink_ext_ack *extack)
3434 u16 cur_mlx5_mode, mlx5_mode = 0;
3435 struct mlx5_eswitch *esw;
3438 esw = mlx5_devlink_eswitch_get(devlink);
3440 return PTR_ERR(esw);
3442 if (esw_mode_from_devlink(mode, &mlx5_mode))
3445 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
3446 !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
3447 NL_SET_ERR_MSG_MOD(extack,
3448 "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
3452 mlx5_lag_disable_change(esw->dev);
3453 err = mlx5_esw_try_lock(esw);
3455 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
3458 cur_mlx5_mode = err;
3461 if (cur_mlx5_mode == mlx5_mode)
3464 mlx5_eswitch_disable_locked(esw);
3465 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3466 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3467 NL_SET_ERR_MSG_MOD(extack,
3468 "Can't change mode while devlink traps are active");
3472 err = esw_offloads_start(esw, extack);
3473 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
3474 err = esw_offloads_stop(esw, extack);
3475 mlx5_rescan_drivers(esw->dev);
3481 mlx5_esw_unlock(esw);
3483 mlx5_lag_enable_change(esw->dev);
3487 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3489 struct mlx5_eswitch *esw;
3492 esw = mlx5_devlink_eswitch_get(devlink);
3494 return PTR_ERR(esw);
3496 down_read(&esw->mode_lock);
3497 err = esw_mode_to_devlink(esw->mode, mode);
3498 up_read(&esw->mode_lock);
3502 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3503 struct netlink_ext_ack *extack)
3505 struct mlx5_core_dev *dev = esw->dev;
3506 struct mlx5_vport *vport;
3507 u16 err_vport_num = 0;
3511 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3512 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3514 err_vport_num = vport->vport;
3515 NL_SET_ERR_MSG_MOD(extack,
3516 "Failed to set min inline on vport");
3517 goto revert_inline_mode;
3523 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3524 if (vport->vport == err_vport_num)
3526 mlx5_modify_nic_vport_min_inline(dev,
3528 esw->offloads.inline_mode);
3533 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3534 struct netlink_ext_ack *extack)
3536 struct mlx5_core_dev *dev = devlink_priv(devlink);
3537 struct mlx5_eswitch *esw;
3541 esw = mlx5_devlink_eswitch_get(devlink);
3543 return PTR_ERR(esw);
3545 down_write(&esw->mode_lock);
3547 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3548 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3549 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3555 case MLX5_CAP_INLINE_MODE_L2:
3556 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3559 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3563 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3564 NL_SET_ERR_MSG_MOD(extack,
3565 "Can't set inline mode when flows are configured");
3570 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3574 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3578 esw->offloads.inline_mode = mlx5_mode;
3579 up_write(&esw->mode_lock);
3583 up_write(&esw->mode_lock);
3587 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3589 struct mlx5_eswitch *esw;
3592 esw = mlx5_devlink_eswitch_get(devlink);
3594 return PTR_ERR(esw);
3596 down_read(&esw->mode_lock);
3597 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3598 up_read(&esw->mode_lock);
3602 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
3604 struct devlink *devlink = priv_to_devlink(dev);
3605 struct mlx5_eswitch *esw;
3608 esw = mlx5_devlink_eswitch_get(devlink);
3610 devl_unlock(devlink);
3611 /* Failure means no eswitch => not possible to change encap */
3615 down_write(&esw->mode_lock);
3616 if (esw->mode != MLX5_ESWITCH_LEGACY &&
3617 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
3618 up_write(&esw->mode_lock);
3619 devl_unlock(devlink);
3623 esw->offloads.num_block_encap++;
3624 up_write(&esw->mode_lock);
3625 devl_unlock(devlink);
3629 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
3631 struct devlink *devlink = priv_to_devlink(dev);
3632 struct mlx5_eswitch *esw;
3634 esw = mlx5_devlink_eswitch_get(devlink);
3638 down_write(&esw->mode_lock);
3639 esw->offloads.num_block_encap--;
3640 up_write(&esw->mode_lock);
3643 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3644 enum devlink_eswitch_encap_mode encap,
3645 struct netlink_ext_ack *extack)
3647 struct mlx5_core_dev *dev = devlink_priv(devlink);
3648 struct mlx5_eswitch *esw;
3651 esw = mlx5_devlink_eswitch_get(devlink);
3653 return PTR_ERR(esw);
3655 down_write(&esw->mode_lock);
3657 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3658 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3659 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3664 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3669 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3670 esw->offloads.encap = encap;
3674 if (esw->offloads.encap == encap)
3677 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3678 NL_SET_ERR_MSG_MOD(extack,
3679 "Can't set encapsulation when flows are configured");
3684 if (esw->offloads.num_block_encap) {
3685 NL_SET_ERR_MSG_MOD(extack,
3686 "Can't set encapsulation when IPsec SA and/or policies are configured");
3691 esw_destroy_offloads_fdb_tables(esw);
3693 esw->offloads.encap = encap;
3695 err = esw_create_offloads_fdb_tables(esw);
3698 NL_SET_ERR_MSG_MOD(extack,
3699 "Failed re-creating fast FDB table");
3700 esw->offloads.encap = !encap;
3701 (void)esw_create_offloads_fdb_tables(esw);
3705 up_write(&esw->mode_lock);
3709 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3710 enum devlink_eswitch_encap_mode *encap)
3712 struct mlx5_eswitch *esw;
3714 esw = mlx5_devlink_eswitch_get(devlink);
3716 return PTR_ERR(esw);
3718 down_read(&esw->mode_lock);
3719 *encap = esw->offloads.encap;
3720 up_read(&esw->mode_lock);
3725 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3727 /* Currently, only ECPF based device has representor for host PF. */
3728 if (vport_num == MLX5_VPORT_PF &&
3729 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3732 if (vport_num == MLX5_VPORT_ECPF &&
3733 !mlx5_ecpf_vport_exists(esw->dev))
3739 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
3740 const struct mlx5_eswitch_rep_ops *ops,
3743 struct mlx5_eswitch_rep_data *rep_data;
3744 struct mlx5_eswitch_rep *rep;
3747 esw->offloads.rep_ops[rep_type] = ops;
3748 mlx5_esw_for_each_rep(esw, i, rep) {
3749 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
3751 rep_data = &rep->rep_data[rep_type];
3752 atomic_set(&rep_data->state, REP_REGISTERED);
3756 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
3758 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
3760 struct mlx5_eswitch_rep *rep;
3763 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
3764 __unload_reps_all_vport(esw, rep_type);
3766 mlx5_esw_for_each_rep(esw, i, rep)
3767 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
3769 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
3771 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
3773 struct mlx5_eswitch_rep *rep;
3775 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3776 return rep->rep_data[rep_type].priv;
3779 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
3783 struct mlx5_eswitch_rep *rep;
3785 rep = mlx5_eswitch_get_rep(esw, vport);
3787 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3788 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3789 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
3792 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
3794 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3796 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
3798 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3800 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
3803 return mlx5_eswitch_get_rep(esw, vport);
3805 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
3807 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3809 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3811 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3813 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3815 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3817 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3819 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
3822 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3824 if (WARN_ON_ONCE(IS_ERR(vport)))
3827 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
3829 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
3831 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3832 u16 vport_num, u32 controller, u32 sfnum)
3836 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3840 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
3844 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3850 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3852 mlx5_esw_vport_disable(esw, vport_num);
3856 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3858 mlx5_esw_offloads_rep_unload(esw, vport_num);
3859 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3860 mlx5_esw_vport_disable(esw, vport_num);
3863 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3865 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3871 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3872 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3875 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3879 err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
3883 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3884 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3891 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3893 u16 *old_entry, *vhca_map_entry, vhca_id;
3896 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3898 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3903 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3904 if (!vhca_map_entry)
3907 *vhca_map_entry = vport_num;
3908 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3909 if (xa_is_err(old_entry)) {
3910 kfree(vhca_map_entry);
3911 return xa_err(old_entry);
3917 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3919 u16 *vhca_map_entry, vhca_id;
3922 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3924 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3927 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3928 kfree(vhca_map_entry);
3931 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3933 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3942 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3945 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3947 if (WARN_ON_ONCE(IS_ERR(vport)))
3950 return vport->metadata;
3952 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
3955 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
3957 return vport_num == MLX5_VPORT_PF ||
3958 mlx5_eswitch_is_vf_vport(esw, vport_num) ||
3959 mlx5_esw_is_sf_vport(esw, vport_num);
3962 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
3963 u8 *hw_addr, int *hw_addr_len,
3964 struct netlink_ext_ack *extack)
3966 struct mlx5_eswitch *esw;
3967 struct mlx5_vport *vport;
3970 esw = mlx5_devlink_eswitch_get(port->devlink);
3972 return PTR_ERR(esw);
3974 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3975 if (!is_port_function_supported(esw, vport_num))
3978 vport = mlx5_eswitch_get_vport(esw, vport_num);
3979 if (IS_ERR(vport)) {
3980 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
3981 return PTR_ERR(vport);
3984 mutex_lock(&esw->state_lock);
3985 ether_addr_copy(hw_addr, vport->info.mac);
3986 *hw_addr_len = ETH_ALEN;
3987 mutex_unlock(&esw->state_lock);
3991 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
3992 const u8 *hw_addr, int hw_addr_len,
3993 struct netlink_ext_ack *extack)
3995 struct mlx5_eswitch *esw;
3998 esw = mlx5_devlink_eswitch_get(port->devlink);
4000 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
4001 return PTR_ERR(esw);
4004 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4005 if (!is_port_function_supported(esw, vport_num)) {
4006 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
4010 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
4013 static struct mlx5_vport *
4014 mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
4018 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
4019 return ERR_PTR(-EOPNOTSUPP);
4021 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4022 if (!is_port_function_supported(esw, vport_num))
4023 return ERR_PTR(-EOPNOTSUPP);
4025 return mlx5_eswitch_get_vport(esw, vport_num);
4028 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
4029 struct netlink_ext_ack *extack)
4031 struct mlx5_eswitch *esw;
4032 struct mlx5_vport *vport;
4033 int err = -EOPNOTSUPP;
4035 esw = mlx5_devlink_eswitch_get(port->devlink);
4037 return PTR_ERR(esw);
4039 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4040 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4044 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4045 if (IS_ERR(vport)) {
4046 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4047 return PTR_ERR(vport);
4050 mutex_lock(&esw->state_lock);
4051 if (vport->enabled) {
4052 *is_enabled = vport->info.mig_enabled;
4055 mutex_unlock(&esw->state_lock);
4059 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
4060 struct netlink_ext_ack *extack)
4062 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4063 struct mlx5_eswitch *esw;
4064 struct mlx5_vport *vport;
4067 int err = -EOPNOTSUPP;
4069 esw = mlx5_devlink_eswitch_get(port->devlink);
4071 return PTR_ERR(esw);
4073 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4074 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4078 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4079 if (IS_ERR(vport)) {
4080 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4081 return PTR_ERR(vport);
4084 mutex_lock(&esw->state_lock);
4085 if (!vport->enabled) {
4086 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4090 if (vport->info.mig_enabled == enable) {
4095 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4101 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4102 MLX5_CAP_GENERAL_2);
4104 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4108 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4109 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
4111 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4112 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
4114 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
4118 vport->info.mig_enabled = enable;
4123 mutex_unlock(&esw->state_lock);
4127 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
4128 struct netlink_ext_ack *extack)
4130 struct mlx5_eswitch *esw;
4131 struct mlx5_vport *vport;
4132 int err = -EOPNOTSUPP;
4134 esw = mlx5_devlink_eswitch_get(port->devlink);
4136 return PTR_ERR(esw);
4138 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4139 if (IS_ERR(vport)) {
4140 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4141 return PTR_ERR(vport);
4144 mutex_lock(&esw->state_lock);
4145 if (vport->enabled) {
4146 *is_enabled = vport->info.roce_enabled;
4149 mutex_unlock(&esw->state_lock);
4153 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
4154 struct netlink_ext_ack *extack)
4156 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4157 struct mlx5_eswitch *esw;
4158 struct mlx5_vport *vport;
4159 int err = -EOPNOTSUPP;
4164 esw = mlx5_devlink_eswitch_get(port->devlink);
4166 return PTR_ERR(esw);
4168 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4169 if (IS_ERR(vport)) {
4170 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4171 return PTR_ERR(vport);
4173 vport_num = vport->vport;
4175 mutex_lock(&esw->state_lock);
4176 if (!vport->enabled) {
4177 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4181 if (vport->info.roce_enabled == enable) {
4186 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4192 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4195 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4199 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4200 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
4202 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
4203 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
4205 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
4209 vport->info.roce_enabled = enable;
4214 mutex_unlock(&esw->state_lock);