2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
46 #include "lib/devcom.h"
48 #include "lib/fs_chains.h"
50 #include "en/mapping.h"
53 #include "en/tc/post_meter.h"
55 #define mlx5_esw_for_each_rep(esw, i, rep) \
56 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
58 /* There are two match-all miss flows, one for unicast dst mac and
61 #define MLX5_ESW_MISS_FLOWS (2)
62 #define UPLINK_REP_INDEX 0
64 #define MLX5_ESW_VPORT_TBL_SIZE 128
65 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
67 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
69 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
70 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
71 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
75 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
78 return xa_load(&esw->offloads.vport_reps, vport_num);
82 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
83 struct mlx5_flow_spec *spec,
84 struct mlx5_esw_flow_attr *attr)
86 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
90 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
95 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
96 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
97 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
100 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
101 * are not needed as well in the following process. So clear them all for simplicity.
104 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
106 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
109 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
110 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
112 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
113 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
115 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
116 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
121 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
122 struct mlx5_flow_spec *spec,
123 struct mlx5_flow_attr *attr,
124 struct mlx5_eswitch *src_esw,
127 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
132 /* Use metadata matching because vport is not represented by single
133 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
135 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
136 if (mlx5_esw_indir_table_decap_vport(attr))
137 vport = mlx5_esw_indir_table_decap_vport(attr);
139 if (!attr->chain && esw_attr && esw_attr->int_port)
141 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
144 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
146 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
147 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
149 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
150 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
151 mlx5_eswitch_get_vport_metadata_mask());
153 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
155 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
156 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
158 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
159 MLX5_SET(fte_match_set_misc, misc,
160 source_eswitch_owner_vhca_id,
161 MLX5_CAP_GEN(src_esw->dev, vhca_id));
163 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
164 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
165 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
167 source_eswitch_owner_vhca_id);
169 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
174 esw_setup_decap_indir(struct mlx5_eswitch *esw,
175 struct mlx5_flow_attr *attr)
177 struct mlx5_flow_table *ft;
179 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
182 ft = mlx5_esw_indir_table_get(esw, attr,
183 mlx5_esw_indir_table_decap_vport(attr), true);
184 return PTR_ERR_OR_ZERO(ft);
188 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
189 struct mlx5_flow_attr *attr)
191 if (mlx5_esw_indir_table_decap_vport(attr))
192 mlx5_esw_indir_table_put(esw,
193 mlx5_esw_indir_table_decap_vport(attr),
198 esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
199 struct mlx5e_meter_attr *meter,
202 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
203 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
204 dest[i].range.min = 0;
205 dest[i].range.max = meter->params.mtu;
206 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
207 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
213 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
214 struct mlx5_flow_act *flow_act,
218 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
219 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
220 dest[i].sampler_id = sampler_id;
226 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
227 struct mlx5_flow_act *flow_act,
228 struct mlx5_eswitch *esw,
229 struct mlx5_flow_attr *attr,
232 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
233 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
234 dest[i].ft = attr->dest_ft;
236 if (mlx5_esw_indir_table_decap_vport(attr))
237 return esw_setup_decap_indir(esw, attr);
242 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
243 struct mlx5_fs_chains *chains, int i)
245 if (mlx5_chains_ignore_flow_level_supported(chains))
246 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
247 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
248 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
252 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
253 struct mlx5_eswitch *esw, int i)
255 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
256 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
257 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
258 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
262 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
263 struct mlx5_flow_act *flow_act,
264 struct mlx5_fs_chains *chains,
265 u32 chain, u32 prio, u32 level,
268 struct mlx5_flow_table *ft;
270 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
271 ft = mlx5_chains_get_table(chains, chain, prio, level);
275 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
280 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
283 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
284 struct mlx5_fs_chains *chains = esw_chains(esw);
287 for (i = from; i < to; i++)
288 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
289 mlx5_chains_put_table(chains, 0, 1, 0);
290 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
291 esw_attr->dests[i].mdev))
292 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
297 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
301 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
302 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
308 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
309 struct mlx5_flow_act *flow_act,
310 struct mlx5_eswitch *esw,
311 struct mlx5_fs_chains *chains,
312 struct mlx5_flow_attr *attr,
315 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
318 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
321 /* flow steering cannot handle more than one dest with the same ft
324 if (esw_attr->out_count - esw_attr->split_count > 1)
327 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
331 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
332 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
333 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
340 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
341 struct mlx5_flow_attr *attr)
343 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
345 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
349 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
351 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
355 /* Indirect table is supported only for flows with in_port uplink
356 * and the destination is vport on the same eswitch as the uplink,
357 * return false in case at least one of destinations doesn't meet
360 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
361 if (esw_attr->dests[i].rep &&
362 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
363 esw_attr->dests[i].mdev)) {
374 esw_setup_indir_table(struct mlx5_flow_destination *dest,
375 struct mlx5_flow_act *flow_act,
376 struct mlx5_eswitch *esw,
377 struct mlx5_flow_attr *attr,
378 bool ignore_flow_lvl,
381 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
384 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
387 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
389 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
390 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
392 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
393 esw_attr->dests[j].rep->vport, false);
394 if (IS_ERR(dest[*i].ft)) {
395 err = PTR_ERR(dest[*i].ft);
396 goto err_indir_tbl_get;
400 if (mlx5_esw_indir_table_decap_vport(attr)) {
401 err = esw_setup_decap_indir(esw, attr);
403 goto err_indir_tbl_get;
409 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
413 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
415 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
417 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
418 esw_cleanup_decap_indir(esw, attr);
422 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
424 mlx5_chains_put_table(chains, chain, prio, level);
428 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
429 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
430 int attr_idx, int dest_idx, bool pkt_reformat)
432 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
433 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
434 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
435 dest[dest_idx].vport.vhca_id =
436 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
437 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
438 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
439 mlx5_lag_is_mpesw(esw->dev))
440 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
442 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
444 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
445 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
447 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
448 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
453 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
454 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
459 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
460 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
465 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
467 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
468 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
469 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
473 esw_setup_dests(struct mlx5_flow_destination *dest,
474 struct mlx5_flow_act *flow_act,
475 struct mlx5_eswitch *esw,
476 struct mlx5_flow_attr *attr,
477 struct mlx5_flow_spec *spec,
480 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
481 struct mlx5_fs_chains *chains = esw_chains(esw);
484 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
485 esw_src_port_rewrite_supported(esw))
486 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
488 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
489 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
494 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
495 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
497 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
498 esw_setup_accept_dest(dest, flow_act, chains, *i);
500 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
501 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
503 } else if (esw_is_indir_table(esw, attr)) {
504 err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
505 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
506 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
508 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
511 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
513 } else if (attr->dest_chain) {
514 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
525 esw_cleanup_dests(struct mlx5_eswitch *esw,
526 struct mlx5_flow_attr *attr)
528 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
529 struct mlx5_fs_chains *chains = esw_chains(esw);
532 esw_cleanup_decap_indir(esw, attr);
533 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
534 if (attr->dest_chain)
535 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
536 else if (esw_is_indir_table(esw, attr))
537 esw_cleanup_indir_table(esw, attr);
538 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
539 esw_cleanup_chain_src_port_rewrite(esw, attr);
544 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
546 struct mlx5e_flow_meter_handle *meter;
548 meter = attr->meter_attr.meter;
549 flow_act->exe_aso.type = attr->exe_aso_type;
550 flow_act->exe_aso.object_id = meter->obj_id;
551 flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
552 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
553 /* use metadata reg 5 for packet color */
554 flow_act->exe_aso.return_reg_id = 5;
557 struct mlx5_flow_handle *
558 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
559 struct mlx5_flow_spec *spec,
560 struct mlx5_flow_attr *attr)
562 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
563 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
564 struct mlx5_fs_chains *chains = esw_chains(esw);
565 bool split = !!(esw_attr->split_count);
566 struct mlx5_vport_tbl_attr fwd_attr;
567 struct mlx5_flow_destination *dest;
568 struct mlx5_flow_handle *rule;
569 struct mlx5_flow_table *fdb;
572 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
573 return ERR_PTR(-EOPNOTSUPP);
575 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
576 return ERR_PTR(-EOPNOTSUPP);
578 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
580 return ERR_PTR(-ENOMEM);
582 flow_act.action = attr->action;
584 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
585 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
586 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
587 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
588 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
589 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
590 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
591 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
595 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
597 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
600 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
603 goto err_create_goto_table;
607 if (esw_attr->decap_pkt_reformat)
608 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
610 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
611 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
612 dest[i].counter_id = mlx5_fc_id(attr->counter);
616 if (attr->outer_match_level != MLX5_MATCH_NONE)
617 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
618 if (attr->inner_match_level != MLX5_MATCH_NONE)
619 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
621 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
622 flow_act.modify_hdr = attr->modify_hdr;
624 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
625 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
626 esw_setup_meter(attr, &flow_act);
629 fwd_attr.chain = attr->chain;
630 fwd_attr.prio = attr->prio;
631 fwd_attr.vport = esw_attr->in_rep->vport;
632 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
634 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
636 if (attr->chain || attr->prio)
637 fdb = mlx5_chains_get_table(chains, attr->chain,
642 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
643 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
644 esw_attr->in_mdev->priv.eswitch,
645 esw_attr->in_rep->vport);
648 rule = ERR_CAST(fdb);
657 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
658 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
661 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
665 atomic64_inc(&esw->offloads.num_flows);
672 mlx5_esw_vporttbl_put(esw, &fwd_attr);
673 else if (attr->chain || attr->prio)
674 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
676 esw_cleanup_dests(esw, attr);
677 err_create_goto_table:
682 struct mlx5_flow_handle *
683 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
684 struct mlx5_flow_spec *spec,
685 struct mlx5_flow_attr *attr)
687 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
688 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
689 struct mlx5_fs_chains *chains = esw_chains(esw);
690 struct mlx5_vport_tbl_attr fwd_attr;
691 struct mlx5_flow_destination *dest;
692 struct mlx5_flow_table *fast_fdb;
693 struct mlx5_flow_table *fwd_fdb;
694 struct mlx5_flow_handle *rule;
697 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
699 return ERR_PTR(-ENOMEM);
701 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
702 if (IS_ERR(fast_fdb)) {
703 rule = ERR_CAST(fast_fdb);
707 fwd_attr.chain = attr->chain;
708 fwd_attr.prio = attr->prio;
709 fwd_attr.vport = esw_attr->in_rep->vport;
710 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
711 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
712 if (IS_ERR(fwd_fdb)) {
713 rule = ERR_CAST(fwd_fdb);
717 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
718 for (i = 0; i < esw_attr->split_count; i++) {
719 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
720 /* Source port rewrite (forward to ovs internal port or statck device) isn't
721 * supported in the rule of split action.
725 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
729 goto err_chain_src_rewrite;
732 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
733 dest[i].ft = fwd_fdb;
736 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
737 esw_attr->in_mdev->priv.eswitch,
738 esw_attr->in_rep->vport);
740 if (attr->outer_match_level != MLX5_MATCH_NONE)
741 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
743 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
744 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
747 i = esw_attr->split_count;
748 goto err_chain_src_rewrite;
751 atomic64_inc(&esw->offloads.num_flows);
755 err_chain_src_rewrite:
756 mlx5_esw_vporttbl_put(esw, &fwd_attr);
758 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
765 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
766 struct mlx5_flow_handle *rule,
767 struct mlx5_flow_attr *attr,
770 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
771 struct mlx5_fs_chains *chains = esw_chains(esw);
772 bool split = (esw_attr->split_count > 0);
773 struct mlx5_vport_tbl_attr fwd_attr;
776 mlx5_del_flow_rules(rule);
778 if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
779 /* unref the term table */
780 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
781 if (esw_attr->dests[i].termtbl)
782 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
786 atomic64_dec(&esw->offloads.num_flows);
788 if (fwd_rule || split) {
789 fwd_attr.chain = attr->chain;
790 fwd_attr.prio = attr->prio;
791 fwd_attr.vport = esw_attr->in_rep->vport;
792 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
796 mlx5_esw_vporttbl_put(esw, &fwd_attr);
797 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
800 mlx5_esw_vporttbl_put(esw, &fwd_attr);
801 else if (attr->chain || attr->prio)
802 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
803 esw_cleanup_dests(esw, attr);
808 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
809 struct mlx5_flow_handle *rule,
810 struct mlx5_flow_attr *attr)
812 __mlx5_eswitch_del_rule(esw, rule, attr, false);
816 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
817 struct mlx5_flow_handle *rule,
818 struct mlx5_flow_attr *attr)
820 __mlx5_eswitch_del_rule(esw, rule, attr, true);
823 struct mlx5_flow_handle *
824 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
825 struct mlx5_eswitch *from_esw,
826 struct mlx5_eswitch_rep *rep,
829 struct mlx5_flow_act flow_act = {0};
830 struct mlx5_flow_destination dest = {};
831 struct mlx5_flow_handle *flow_rule;
832 struct mlx5_flow_spec *spec;
836 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
838 flow_rule = ERR_PTR(-ENOMEM);
842 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
843 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
845 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
846 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
848 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
850 /* source vport is the esw manager */
851 vport = from_esw->manager_vport;
853 if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
854 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
855 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
856 mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
858 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
859 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
860 mlx5_eswitch_get_vport_metadata_mask());
862 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
864 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
865 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
867 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
868 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
869 MLX5_CAP_GEN(from_esw->dev, vhca_id));
871 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
872 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
874 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
875 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
876 source_eswitch_owner_vhca_id);
878 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
881 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
882 dest.vport.num = rep->vport;
883 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
884 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
885 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
887 if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
888 rep->vport == MLX5_VPORT_UPLINK)
889 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
891 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
892 spec, &flow_act, &dest, 1);
893 if (IS_ERR(flow_rule))
894 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
900 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
902 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
904 mlx5_del_flow_rules(rule);
907 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
910 mlx5_del_flow_rules(rule);
913 struct mlx5_flow_handle *
914 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
916 struct mlx5_flow_destination dest = {};
917 struct mlx5_flow_act flow_act = {0};
918 struct mlx5_flow_handle *flow_rule;
919 struct mlx5_flow_spec *spec;
921 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
923 return ERR_PTR(-ENOMEM);
925 MLX5_SET(fte_match_param, spec->match_criteria,
926 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
927 MLX5_SET(fte_match_param, spec->match_criteria,
928 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
929 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
930 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
932 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
933 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
934 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
936 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
937 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
938 dest.vport.num = vport_num;
940 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
941 spec, &flow_act, &dest, 1);
942 if (IS_ERR(flow_rule))
943 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
944 vport_num, PTR_ERR(flow_rule));
950 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
952 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
953 MLX5_FDB_TO_VPORT_REG_C_1;
956 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
958 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
959 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
960 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
964 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
965 !mlx5_eswitch_vport_match_metadata_enabled(esw))
968 MLX5_SET(query_esw_vport_context_in, in, opcode,
969 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
970 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
974 curr = MLX5_GET(query_esw_vport_context_out, out,
975 esw_vport_context.fdb_to_vport_reg_c_id);
976 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
977 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
978 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
985 MLX5_SET(modify_esw_vport_context_in, min,
986 esw_vport_context.fdb_to_vport_reg_c_id, curr);
987 MLX5_SET(modify_esw_vport_context_in, min,
988 field_select.fdb_to_vport_reg_c_id, 1);
990 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
992 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
993 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
995 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1001 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1002 struct mlx5_core_dev *peer_dev,
1003 struct mlx5_flow_spec *spec,
1004 struct mlx5_flow_destination *dest)
1008 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1009 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1011 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1012 mlx5_eswitch_get_vport_metadata_mask());
1014 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1016 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1019 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1020 MLX5_CAP_GEN(peer_dev, vhca_id));
1022 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1024 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1026 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1027 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1028 source_eswitch_owner_vhca_id);
1031 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1032 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1033 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1034 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1037 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1038 struct mlx5_eswitch *peer_esw,
1039 struct mlx5_flow_spec *spec,
1044 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1045 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1047 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1048 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1051 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1053 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1057 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1058 struct mlx5_core_dev *peer_dev)
1060 struct mlx5_flow_destination dest = {};
1061 struct mlx5_flow_act flow_act = {0};
1062 struct mlx5_flow_handle **flows;
1063 /* total vports is the same for both e-switches */
1064 int nvports = esw->total_vports;
1065 struct mlx5_flow_handle *flow;
1066 struct mlx5_flow_spec *spec;
1067 struct mlx5_vport *vport;
1072 if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
1075 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1079 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1081 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1084 goto alloc_flows_err;
1087 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1088 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1091 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1092 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1093 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1094 spec, MLX5_VPORT_PF);
1096 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1097 spec, &flow_act, &dest, 1);
1099 err = PTR_ERR(flow);
1100 goto add_pf_flow_err;
1102 flows[vport->index] = flow;
1105 if (mlx5_ecpf_vport_exists(esw->dev)) {
1106 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1107 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1108 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1109 spec, &flow_act, &dest, 1);
1111 err = PTR_ERR(flow);
1112 goto add_ecpf_flow_err;
1114 flows[vport->index] = flow;
1117 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1118 esw_set_peer_miss_rule_source_port(esw,
1119 peer_dev->priv.eswitch,
1120 spec, vport->vport);
1122 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1123 spec, &flow_act, &dest, 1);
1125 err = PTR_ERR(flow);
1126 goto add_vf_flow_err;
1128 flows[vport->index] = flow;
1131 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1132 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1133 if (i >= mlx5_core_max_ec_vfs(peer_dev))
1135 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1136 spec, vport->vport);
1137 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1138 spec, &flow_act, &dest, 1);
1140 err = PTR_ERR(flow);
1141 goto add_ec_vf_flow_err;
1143 flows[vport->index] = flow;
1146 esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
1152 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1153 if (!flows[vport->index])
1155 mlx5_del_flow_rules(flows[vport->index]);
1158 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1159 if (!flows[vport->index])
1161 mlx5_del_flow_rules(flows[vport->index]);
1163 if (mlx5_ecpf_vport_exists(esw->dev)) {
1164 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1165 mlx5_del_flow_rules(flows[vport->index]);
1168 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1169 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1170 mlx5_del_flow_rules(flows[vport->index]);
1173 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1180 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1181 struct mlx5_core_dev *peer_dev)
1183 u16 peer_index = mlx5_get_dev_index(peer_dev);
1184 struct mlx5_flow_handle **flows;
1185 struct mlx5_vport *vport;
1188 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
1192 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1193 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1194 /* The flow for a particular vport could be NULL if the other ECPF
1195 * has fewer or no VFs enabled
1197 if (!flows[vport->index])
1199 mlx5_del_flow_rules(flows[vport->index]);
1203 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1204 mlx5_del_flow_rules(flows[vport->index]);
1206 if (mlx5_ecpf_vport_exists(esw->dev)) {
1207 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1208 mlx5_del_flow_rules(flows[vport->index]);
1211 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1212 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1213 mlx5_del_flow_rules(flows[vport->index]);
1217 esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
1220 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1222 struct mlx5_flow_act flow_act = {0};
1223 struct mlx5_flow_destination dest = {};
1224 struct mlx5_flow_handle *flow_rule = NULL;
1225 struct mlx5_flow_spec *spec;
1232 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1238 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1239 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1241 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1242 outer_headers.dmac_47_16);
1245 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1246 dest.vport.num = esw->manager_vport;
1247 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1249 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1250 spec, &flow_act, &dest, 1);
1251 if (IS_ERR(flow_rule)) {
1252 err = PTR_ERR(flow_rule);
1253 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1257 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1259 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1261 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1262 outer_headers.dmac_47_16);
1264 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1265 spec, &flow_act, &dest, 1);
1266 if (IS_ERR(flow_rule)) {
1267 err = PTR_ERR(flow_rule);
1268 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1269 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1273 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1280 struct mlx5_flow_handle *
1281 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1283 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1284 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1285 struct mlx5_flow_context *flow_context;
1286 struct mlx5_flow_handle *flow_rule;
1287 struct mlx5_flow_destination dest;
1288 struct mlx5_flow_spec *spec;
1291 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1292 return ERR_PTR(-EOPNOTSUPP);
1294 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1296 return ERR_PTR(-ENOMEM);
1298 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1300 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1301 ESW_REG_C0_USER_DATA_METADATA_MASK);
1302 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1304 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1305 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1306 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1307 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1308 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1310 flow_context = &spec->flow_context;
1311 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1312 flow_context->flow_tag = tag;
1313 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1314 dest.ft = esw->offloads.ft_offloads;
1316 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1319 if (IS_ERR(flow_rule))
1321 "Failed to create restore rule for tag: %d, err(%d)\n",
1322 tag, (int)PTR_ERR(flow_rule));
1327 #define MAX_PF_SQ 256
1328 #define MAX_SQ_NVPORTS 32
1331 mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1335 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1339 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1340 MLX5_SET(create_flow_group_in, flow_group_in,
1341 match_criteria_enable,
1342 MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
1344 MLX5_SET(fte_match_param, match_criteria,
1345 misc_parameters_2.metadata_reg_c_0,
1346 mlx5_eswitch_get_vport_metadata_mask());
1348 MLX5_SET(create_flow_group_in, flow_group_in,
1349 match_criteria_enable,
1350 MLX5_MATCH_MISC_PARAMETERS | match_params);
1352 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1353 misc_parameters.source_port);
1357 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1358 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1360 struct mlx5_vport_tbl_attr attr;
1361 struct mlx5_vport *vport;
1366 mlx5_esw_for_each_vport(esw, i, vport) {
1367 attr.vport = vport->vport;
1368 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1369 mlx5_esw_vporttbl_put(esw, &attr);
1373 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1375 struct mlx5_vport_tbl_attr attr;
1376 struct mlx5_flow_table *fdb;
1377 struct mlx5_vport *vport;
1382 mlx5_esw_for_each_vport(esw, i, vport) {
1383 attr.vport = vport->vport;
1384 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1385 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1392 esw_vport_tbl_put(esw);
1393 return PTR_ERR(fdb);
1396 #define fdb_modify_header_fwd_to_table_supported(esw) \
1397 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1398 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1400 struct mlx5_core_dev *dev = esw->dev;
1402 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1403 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1405 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1406 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1407 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1408 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1409 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1410 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1411 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1412 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1413 /* Disabled when ttl workaround is needed, e.g
1414 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1417 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1418 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1420 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1421 esw_info(dev, "Supported tc chains and prios offload\n");
1424 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1425 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1429 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1431 struct mlx5_core_dev *dev = esw->dev;
1432 struct mlx5_flow_table *nf_ft, *ft;
1433 struct mlx5_chains_attr attr = {};
1434 struct mlx5_fs_chains *chains;
1437 esw_init_chains_offload_flags(esw, &attr.flags);
1438 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1439 attr.fs_base_prio = FDB_TC_OFFLOAD;
1440 attr.max_grp_num = esw->params.large_group_num;
1441 attr.default_ft = miss_fdb;
1442 attr.mapping = esw->offloads.reg_c0_obj_pool;
1444 chains = mlx5_chains_create(dev, &attr);
1445 if (IS_ERR(chains)) {
1446 err = PTR_ERR(chains);
1447 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1450 mlx5_chains_print_info(chains);
1452 esw->fdb_table.offloads.esw_chains_priv = chains;
1454 /* Create tc_end_ft which is the always created ft chain */
1455 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1457 if (IS_ERR(nf_ft)) {
1458 err = PTR_ERR(nf_ft);
1462 /* Always open the root for fast path */
1463 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1469 /* Open level 1 for split fdb rules now if prios isn't supported */
1470 if (!mlx5_chains_prios_supported(chains)) {
1471 err = esw_vport_tbl_get(esw);
1476 mlx5_chains_set_end_ft(chains, nf_ft);
1481 mlx5_chains_put_table(chains, 0, 1, 0);
1483 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1485 mlx5_chains_destroy(chains);
1486 esw->fdb_table.offloads.esw_chains_priv = NULL;
1492 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1494 if (!mlx5_chains_prios_supported(chains))
1495 esw_vport_tbl_put(esw);
1496 mlx5_chains_put_table(chains, 0, 1, 0);
1497 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1498 mlx5_chains_destroy(chains);
1501 #else /* CONFIG_MLX5_CLS_ACT */
1504 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1508 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1514 esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
1515 struct mlx5_flow_table *fdb,
1519 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1520 struct mlx5_flow_group *g;
1521 void *match_criteria;
1524 memset(flow_group_in, 0, inlen);
1526 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
1528 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1529 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1531 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1532 MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1533 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1534 misc_parameters.source_eswitch_owner_vhca_id);
1535 MLX5_SET(create_flow_group_in, flow_group_in,
1536 source_eswitch_owner_vhca_id_valid, 1);
1539 /* See comment at table_size calculation */
1540 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1541 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1542 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
1545 g = mlx5_create_flow_group(fdb, flow_group_in);
1548 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1551 esw->fdb_table.offloads.send_to_vport_grp = g;
1558 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
1559 struct mlx5_flow_table *fdb,
1563 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1564 struct mlx5_flow_group *g;
1565 void *match_criteria;
1568 if (!esw_src_port_rewrite_supported(esw))
1571 memset(flow_group_in, 0, inlen);
1573 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1574 MLX5_MATCH_MISC_PARAMETERS_2);
1576 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1578 MLX5_SET(fte_match_param, match_criteria,
1579 misc_parameters_2.metadata_reg_c_0,
1580 mlx5_eswitch_get_vport_metadata_mask());
1581 MLX5_SET(fte_match_param, match_criteria,
1582 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1584 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1585 MLX5_SET(create_flow_group_in, flow_group_in,
1586 end_flow_index, *ix + esw->total_vports - 1);
1587 *ix += esw->total_vports;
1589 g = mlx5_create_flow_group(fdb, flow_group_in);
1593 "Failed to create send-to-vport meta flow group err(%d)\n", err);
1594 goto send_vport_meta_err;
1596 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1600 send_vport_meta_err:
1605 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
1606 struct mlx5_flow_table *fdb,
1610 int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1);
1611 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1612 struct mlx5_flow_group *g;
1613 void *match_criteria;
1616 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1619 memset(flow_group_in, 0, inlen);
1621 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1623 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1624 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1628 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1629 misc_parameters.source_eswitch_owner_vhca_id);
1631 MLX5_SET(create_flow_group_in, flow_group_in,
1632 source_eswitch_owner_vhca_id_valid, 1);
1635 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1636 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1637 *ix + max_peer_ports);
1638 *ix += max_peer_ports + 1;
1640 g = mlx5_create_flow_group(fdb, flow_group_in);
1643 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
1646 esw->fdb_table.offloads.peer_miss_grp = g;
1653 esw_create_miss_group(struct mlx5_eswitch *esw,
1654 struct mlx5_flow_table *fdb,
1658 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1659 struct mlx5_flow_group *g;
1660 void *match_criteria;
1664 memset(flow_group_in, 0, inlen);
1666 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1667 MLX5_MATCH_OUTER_HEADERS);
1668 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1670 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1671 outer_headers.dmac_47_16);
1674 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1675 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1676 *ix + MLX5_ESW_MISS_FLOWS);
1678 g = mlx5_create_flow_group(fdb, flow_group_in);
1681 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
1684 esw->fdb_table.offloads.miss_grp = g;
1686 err = esw_add_fdb_miss_rule(esw);
1693 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1698 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1700 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1701 struct mlx5_flow_table_attr ft_attr = {};
1702 struct mlx5_core_dev *dev = esw->dev;
1703 struct mlx5_flow_namespace *root_ns;
1704 struct mlx5_flow_table *fdb = NULL;
1705 int table_size, ix = 0, err = 0;
1706 u32 flags = 0, *flow_group_in;
1708 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1710 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1714 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1716 esw_warn(dev, "Failed to get FDB flow namespace\n");
1720 esw->fdb_table.offloads.ns = root_ns;
1721 err = mlx5_flow_namespace_set_mode(root_ns,
1722 esw->dev->priv.steering->mode);
1724 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1728 /* To be strictly correct:
1729 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1731 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1732 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1733 * but as the peer device might not be in switchdev mode it's not
1734 * possible. We use the fact that by default FW sets max vfs and max sfs
1735 * to the same value on both devices. If it needs to be changed in the future note
1736 * the peer miss group should also be created based on the number of
1737 * total vports of the peer (currently is also uses esw->total_vports).
1739 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1740 esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS;
1742 /* create the slow path fdb with encap set, so further table instances
1743 * can be created at run time while VFs are probed if the FW allows that.
1745 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1746 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1747 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1749 ft_attr.flags = flags;
1750 ft_attr.max_fte = table_size;
1751 ft_attr.prio = FDB_SLOW_PATH;
1753 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1756 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1759 esw->fdb_table.offloads.slow_fdb = fdb;
1761 /* Create empty TC-miss managed table. This allows plugging in following
1762 * priorities without directly exposing their level 0 table to
1763 * eswitch_offloads and passing it as miss_fdb to following call to
1764 * esw_chains_create().
1766 memset(&ft_attr, 0, sizeof(ft_attr));
1767 ft_attr.prio = FDB_TC_MISS;
1768 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1769 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1770 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1771 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1772 goto tc_miss_table_err;
1775 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1777 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1778 goto fdb_chains_err;
1781 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1783 goto send_vport_err;
1785 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1787 goto send_vport_meta_err;
1789 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
1793 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
1797 kvfree(flow_group_in);
1801 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1802 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1804 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1805 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1806 send_vport_meta_err:
1807 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1809 esw_chains_destroy(esw, esw_chains(esw));
1811 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1813 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1815 /* Holds true only as long as DMFS is the default */
1816 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1818 kvfree(flow_group_in);
1822 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1824 if (!mlx5_eswitch_get_slow_fdb(esw))
1827 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1828 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1829 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1830 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1831 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1832 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1833 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1834 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1835 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1837 esw_chains_destroy(esw, esw_chains(esw));
1839 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1840 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1841 /* Holds true only as long as DMFS is the default */
1842 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1843 MLX5_FLOW_STEERING_MODE_DMFS);
1844 atomic64_set(&esw->user_count, 0);
1847 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
1851 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1852 if (mlx5e_tc_int_port_supported(esw))
1853 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1858 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1860 struct mlx5_flow_table_attr ft_attr = {};
1861 struct mlx5_core_dev *dev = esw->dev;
1862 struct mlx5_flow_table *ft_offloads;
1863 struct mlx5_flow_namespace *ns;
1866 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1868 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1872 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
1873 MLX5_ESW_FT_OFFLOADS_DROP_RULE;
1876 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1877 if (IS_ERR(ft_offloads)) {
1878 err = PTR_ERR(ft_offloads);
1879 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1883 esw->offloads.ft_offloads = ft_offloads;
1887 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1889 struct mlx5_esw_offload *offloads = &esw->offloads;
1891 mlx5_destroy_flow_table(offloads->ft_offloads);
1894 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1896 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1897 struct mlx5_flow_group *g;
1902 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
1903 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1907 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1909 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1910 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1912 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1916 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1920 esw->offloads.vport_rx_group = g;
1922 kvfree(flow_group_in);
1926 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1928 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1931 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
1933 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
1934 * for the drop rule, which is placed at the end of the table.
1935 * So return the total of vport and int_port as rule index.
1937 return esw_get_nr_ft_offloads_steering_src_ports(esw);
1940 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
1942 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1943 struct mlx5_flow_group *g;
1948 flow_index = esw_create_vport_rx_drop_rule_index(esw);
1950 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1954 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1955 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1957 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1961 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
1965 esw->offloads.vport_rx_drop_group = g;
1967 kvfree(flow_group_in);
1971 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
1973 if (esw->offloads.vport_rx_drop_group)
1974 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
1978 mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
1980 struct mlx5_flow_spec *spec)
1984 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1985 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1986 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1987 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1989 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1990 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1991 mlx5_eswitch_get_vport_metadata_mask());
1993 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1995 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1996 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1998 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1999 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2001 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2005 struct mlx5_flow_handle *
2006 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
2007 struct mlx5_flow_destination *dest)
2009 struct mlx5_flow_act flow_act = {0};
2010 struct mlx5_flow_handle *flow_rule;
2011 struct mlx5_flow_spec *spec;
2013 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2015 flow_rule = ERR_PTR(-ENOMEM);
2019 mlx5_esw_set_spec_source_port(esw, vport, spec);
2021 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2022 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
2023 &flow_act, dest, 1);
2024 if (IS_ERR(flow_rule)) {
2025 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
2034 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2036 struct mlx5_flow_act flow_act = {};
2037 struct mlx5_flow_handle *flow_rule;
2039 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2040 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
2041 &flow_act, NULL, 0);
2042 if (IS_ERR(flow_rule)) {
2044 "fs offloads: Failed to add vport rx drop rule err %ld\n",
2045 PTR_ERR(flow_rule));
2046 return PTR_ERR(flow_rule);
2049 esw->offloads.vport_rx_drop_rule = flow_rule;
2054 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2056 if (esw->offloads.vport_rx_drop_rule)
2057 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
2060 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2062 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2063 struct mlx5_core_dev *dev = esw->dev;
2064 struct mlx5_vport *vport;
2067 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2070 if (!mlx5_esw_is_fdb_created(esw))
2073 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2074 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2075 mlx5_mode = MLX5_INLINE_MODE_NONE;
2077 case MLX5_CAP_INLINE_MODE_L2:
2078 mlx5_mode = MLX5_INLINE_MODE_L2;
2080 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2085 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2086 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2087 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2088 if (prev_mlx5_mode != mlx5_mode)
2090 prev_mlx5_mode = mlx5_mode;
2098 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2100 struct mlx5_esw_offload *offloads = &esw->offloads;
2102 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2105 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2106 mlx5_destroy_flow_group(offloads->restore_group);
2107 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2110 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2112 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2113 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2114 struct mlx5_flow_table_attr ft_attr = {};
2115 struct mlx5_core_dev *dev = esw->dev;
2116 struct mlx5_flow_namespace *ns;
2117 struct mlx5_modify_hdr *mod_hdr;
2118 void *match_criteria, *misc;
2119 struct mlx5_flow_table *ft;
2120 struct mlx5_flow_group *g;
2124 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2127 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2129 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2133 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2134 if (!flow_group_in) {
2139 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2140 ft = mlx5_create_flow_table(ns, &ft_attr);
2143 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2148 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2150 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2153 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2154 ESW_REG_C0_USER_DATA_METADATA_MASK);
2155 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2156 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2157 ft_attr.max_fte - 1);
2158 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2159 MLX5_MATCH_MISC_PARAMETERS_2);
2160 g = mlx5_create_flow_group(ft, flow_group_in);
2163 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2168 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2169 MLX5_SET(copy_action_in, modact, src_field,
2170 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2171 MLX5_SET(copy_action_in, modact, dst_field,
2172 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2173 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2174 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2176 if (IS_ERR(mod_hdr)) {
2177 err = PTR_ERR(mod_hdr);
2178 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2183 esw->offloads.ft_offloads_restore = ft;
2184 esw->offloads.restore_group = g;
2185 esw->offloads.restore_copy_hdr_id = mod_hdr;
2187 kvfree(flow_group_in);
2192 mlx5_destroy_flow_group(g);
2194 mlx5_destroy_flow_table(ft);
2196 kvfree(flow_group_in);
2201 static int esw_offloads_start(struct mlx5_eswitch *esw,
2202 struct netlink_ext_ack *extack)
2206 esw->mode = MLX5_ESWITCH_OFFLOADS;
2207 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
2209 NL_SET_ERR_MSG_MOD(extack,
2210 "Failed setting eswitch to offloads");
2211 esw->mode = MLX5_ESWITCH_LEGACY;
2212 mlx5_rescan_drivers(esw->dev);
2215 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2216 if (mlx5_eswitch_inline_mode_get(esw,
2217 &esw->offloads.inline_mode)) {
2218 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2219 NL_SET_ERR_MSG_MOD(extack,
2220 "Inline mode is different between vports");
2226 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2228 struct mlx5_eswitch_rep *rep;
2232 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2236 rep->vport = vport->vport;
2237 rep->vport_index = vport->index;
2238 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2239 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2241 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2252 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2253 struct mlx5_eswitch_rep *rep)
2255 xa_erase(&esw->offloads.vport_reps, rep->vport);
2259 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2261 struct mlx5_eswitch_rep *rep;
2264 mlx5_esw_for_each_rep(esw, i, rep)
2265 mlx5_esw_offloads_rep_cleanup(esw, rep);
2266 xa_destroy(&esw->offloads.vport_reps);
2269 static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2271 struct mlx5_vport *vport;
2275 xa_init(&esw->offloads.vport_reps);
2277 mlx5_esw_for_each_vport(esw, i, vport) {
2278 err = mlx5_esw_offloads_rep_init(esw, vport);
2285 esw_offloads_cleanup_reps(esw);
2289 static int esw_port_metadata_set(struct devlink *devlink, u32 id,
2290 struct devlink_param_gset_ctx *ctx)
2292 struct mlx5_core_dev *dev = devlink_priv(devlink);
2293 struct mlx5_eswitch *esw = dev->priv.eswitch;
2296 down_write(&esw->mode_lock);
2297 if (mlx5_esw_is_fdb_created(esw)) {
2301 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2306 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2308 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2310 up_write(&esw->mode_lock);
2314 static int esw_port_metadata_get(struct devlink *devlink, u32 id,
2315 struct devlink_param_gset_ctx *ctx)
2317 struct mlx5_core_dev *dev = devlink_priv(devlink);
2319 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
2323 static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
2324 union devlink_param_value val,
2325 struct netlink_ext_ack *extack)
2327 struct mlx5_core_dev *dev = devlink_priv(devlink);
2330 esw_mode = mlx5_eswitch_mode(dev);
2331 if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
2332 NL_SET_ERR_MSG_MOD(extack,
2333 "E-Switch must either disabled or non switchdev mode");
2339 static const struct devlink_param esw_devlink_params[] = {
2340 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
2341 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
2342 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2343 esw_port_metadata_get,
2344 esw_port_metadata_set,
2345 esw_port_metadata_validate),
2348 int esw_offloads_init(struct mlx5_eswitch *esw)
2352 err = esw_offloads_init_reps(esw);
2356 err = devl_params_register(priv_to_devlink(esw->dev),
2358 ARRAY_SIZE(esw_devlink_params));
2365 esw_offloads_cleanup_reps(esw);
2369 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2371 devl_params_unregister(priv_to_devlink(esw->dev),
2373 ARRAY_SIZE(esw_devlink_params));
2374 esw_offloads_cleanup_reps(esw);
2377 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2378 struct mlx5_eswitch_rep *rep, u8 rep_type)
2380 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2381 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2382 esw->offloads.rep_ops[rep_type]->unload(rep);
2385 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2387 struct mlx5_eswitch_rep *rep;
2390 mlx5_esw_for_each_rep(esw, i, rep)
2391 __esw_offloads_unload_rep(esw, rep, rep_type);
2394 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2396 struct mlx5_eswitch_rep *rep;
2400 rep = mlx5_eswitch_get_rep(esw, vport_num);
2401 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2402 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2403 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2404 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2412 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2413 for (--rep_type; rep_type >= 0; rep_type--)
2414 __esw_offloads_unload_rep(esw, rep, rep_type);
2418 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2420 struct mlx5_eswitch_rep *rep;
2423 rep = mlx5_eswitch_get_rep(esw, vport_num);
2424 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2425 __esw_offloads_unload_rep(esw, rep, rep_type);
2428 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2432 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2435 if (vport_num != MLX5_VPORT_UPLINK) {
2436 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2441 err = mlx5_esw_offloads_rep_load(esw, vport_num);
2447 if (vport_num != MLX5_VPORT_UPLINK)
2448 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2452 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2454 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2457 mlx5_esw_offloads_rep_unload(esw, vport_num);
2459 if (vport_num != MLX5_VPORT_UPLINK)
2460 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2463 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2464 struct mlx5_core_dev *slave)
2466 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2467 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2468 struct mlx5_flow_root_namespace *root;
2469 struct mlx5_flow_namespace *ns;
2472 MLX5_SET(set_flow_table_root_in, in, opcode,
2473 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2474 MLX5_SET(set_flow_table_root_in, in, table_type,
2478 ns = mlx5_get_flow_namespace(master,
2479 MLX5_FLOW_NAMESPACE_FDB);
2480 root = find_root(&ns->node);
2481 mutex_lock(&root->chain_lock);
2482 MLX5_SET(set_flow_table_root_in, in,
2483 table_eswitch_owner_vhca_id_valid, 1);
2484 MLX5_SET(set_flow_table_root_in, in,
2485 table_eswitch_owner_vhca_id,
2486 MLX5_CAP_GEN(master, vhca_id));
2487 MLX5_SET(set_flow_table_root_in, in, table_id,
2490 ns = mlx5_get_flow_namespace(slave,
2491 MLX5_FLOW_NAMESPACE_FDB);
2492 root = find_root(&ns->node);
2493 mutex_lock(&root->chain_lock);
2494 MLX5_SET(set_flow_table_root_in, in, table_id,
2498 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2499 mutex_unlock(&root->chain_lock);
2504 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2505 struct mlx5_core_dev *slave,
2506 struct mlx5_vport *vport,
2507 struct mlx5_flow_table *acl)
2509 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2510 struct mlx5_flow_handle *flow_rule = NULL;
2511 struct mlx5_flow_destination dest = {};
2512 struct mlx5_flow_act flow_act = {};
2513 struct mlx5_flow_spec *spec;
2517 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2521 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2522 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2524 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2525 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index);
2527 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2528 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2529 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2530 source_eswitch_owner_vhca_id);
2532 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2533 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2534 dest.vport.num = slave->priv.eswitch->manager_vport;
2535 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2536 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2538 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2540 if (IS_ERR(flow_rule)) {
2541 err = PTR_ERR(flow_rule);
2543 err = xa_insert(&vport->egress.offloads.bounce_rules,
2544 slave_index, flow_rule, GFP_KERNEL);
2546 mlx5_del_flow_rules(flow_rule);
2553 static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
2554 struct mlx5_flow_namespace *egress_ns,
2555 struct mlx5_vport *vport, size_t count)
2557 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2558 struct mlx5_flow_table_attr ft_attr = {
2559 .max_fte = count, .prio = 0, .level = 0,
2561 struct mlx5_flow_table *acl;
2562 struct mlx5_flow_group *g;
2563 void *match_criteria;
2567 if (vport->egress.acl)
2570 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2574 if (vport->vport || mlx5_core_is_ecpf(esw->dev))
2575 ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
2577 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2583 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2585 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2586 misc_parameters.source_port);
2587 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2588 misc_parameters.source_eswitch_owner_vhca_id);
2589 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2590 MLX5_MATCH_MISC_PARAMETERS);
2592 MLX5_SET(create_flow_group_in, flow_group_in,
2593 source_eswitch_owner_vhca_id_valid, 1);
2594 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2595 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count);
2597 g = mlx5_create_flow_group(acl, flow_group_in);
2603 vport->egress.acl = acl;
2604 vport->egress.offloads.bounce_grp = g;
2605 vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB;
2606 xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC);
2608 kvfree(flow_group_in);
2613 mlx5_destroy_flow_table(acl);
2615 kvfree(flow_group_in);
2619 static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
2621 if (!xa_empty(&vport->egress.offloads.bounce_rules))
2623 mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
2624 vport->egress.offloads.bounce_grp = NULL;
2625 mlx5_destroy_flow_table(vport->egress.acl);
2626 vport->egress.acl = NULL;
2629 static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2630 struct mlx5_core_dev *slave, size_t count)
2632 struct mlx5_eswitch *esw = master->priv.eswitch;
2633 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2634 struct mlx5_flow_namespace *egress_ns;
2635 struct mlx5_vport *vport;
2638 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2640 return PTR_ERR(vport);
2642 egress_ns = mlx5_get_flow_vport_acl_namespace(master,
2643 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2648 if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
2651 err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
2655 if (xa_load(&vport->egress.offloads.bounce_rules, slave_index))
2658 err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl);
2665 esw_master_egress_destroy_resources(vport);
2669 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
2670 struct mlx5_core_dev *slave_dev)
2672 struct mlx5_vport *vport;
2674 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2675 dev->priv.eswitch->manager_vport);
2677 esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id));
2679 if (xa_empty(&vport->egress.offloads.bounce_rules)) {
2680 esw_acl_egress_ofld_cleanup(vport);
2681 xa_destroy(&vport->egress.offloads.bounce_rules);
2685 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
2686 struct mlx5_eswitch *slave_esw, int max_slaves)
2690 err = esw_set_slave_root_fdb(master_esw->dev,
2695 err = esw_set_master_egress_rule(master_esw->dev,
2696 slave_esw->dev, max_slaves);
2703 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2707 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
2708 struct mlx5_eswitch *slave_esw)
2710 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2711 esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev);
2714 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2715 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2717 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
2718 struct mlx5_eswitch *peer_esw)
2720 const struct mlx5_eswitch_rep_ops *ops;
2721 struct mlx5_eswitch_rep *rep;
2725 mlx5_esw_for_each_rep(esw, i, rep) {
2726 rep_type = NUM_REP_TYPES;
2727 while (rep_type--) {
2728 ops = esw->offloads.rep_ops[rep_type];
2729 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2731 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
2736 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
2737 struct mlx5_eswitch *peer_esw)
2739 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2740 mlx5e_tc_clean_fdb_peer_flows(esw);
2742 mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
2743 esw_del_fdb_peer_miss_rules(esw, peer_esw->dev);
2746 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2747 struct mlx5_eswitch *peer_esw)
2749 const struct mlx5_eswitch_rep_ops *ops;
2750 struct mlx5_eswitch_rep *rep;
2755 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2759 mlx5_esw_for_each_rep(esw, i, rep) {
2760 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2761 ops = esw->offloads.rep_ops[rep_type];
2762 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2764 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2774 mlx5_esw_offloads_unpair(esw, peer_esw);
2778 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2779 struct mlx5_eswitch *peer_esw,
2782 u8 peer_idx = mlx5_get_dev_index(peer_esw->dev);
2783 struct mlx5_flow_root_namespace *peer_ns;
2784 u8 idx = mlx5_get_dev_index(esw->dev);
2785 struct mlx5_flow_root_namespace *ns;
2788 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2789 ns = esw->dev->priv.steering->fdb_root_ns;
2792 err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx);
2796 err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx);
2798 mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
2802 mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
2803 mlx5_flow_namespace_set_peer(peer_ns, NULL, idx);
2809 static int mlx5_esw_offloads_devcom_event(int event,
2813 struct mlx5_eswitch *esw = my_data;
2814 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2815 struct mlx5_eswitch *peer_esw = event_data;
2816 u16 esw_i, peer_esw_i;
2820 peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
2821 esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
2822 esw_paired = !!xa_load(&esw->paired, peer_esw_i);
2825 case ESW_OFFLOADS_DEVCOM_PAIR:
2826 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2827 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2833 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2836 err = mlx5_esw_offloads_pair(esw, peer_esw);
2840 err = mlx5_esw_offloads_pair(peer_esw, esw);
2844 err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
2848 err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
2853 peer_esw->num_peers++;
2854 mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2857 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2861 peer_esw->num_peers--;
2863 if (!esw->num_peers && !peer_esw->num_peers)
2864 mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2865 xa_erase(&peer_esw->paired, esw_i);
2866 xa_erase(&esw->paired, peer_esw_i);
2867 mlx5_esw_offloads_unpair(peer_esw, esw);
2868 mlx5_esw_offloads_unpair(esw, peer_esw);
2869 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2876 xa_erase(&esw->paired, peer_esw_i);
2878 mlx5_esw_offloads_unpair(peer_esw, esw);
2880 mlx5_esw_offloads_unpair(esw, peer_esw);
2882 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2884 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2889 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2891 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2894 for (i = 0; i < MLX5_MAX_PORTS; i++)
2895 INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
2896 mutex_init(&esw->offloads.peer_mutex);
2898 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2901 if (!mlx5_lag_is_supported(esw->dev))
2904 xa_init(&esw->paired);
2905 mlx5_devcom_register_component(devcom,
2906 MLX5_DEVCOM_ESW_OFFLOADS,
2907 mlx5_esw_offloads_devcom_event,
2911 mlx5_devcom_send_event(devcom,
2912 MLX5_DEVCOM_ESW_OFFLOADS,
2913 ESW_OFFLOADS_DEVCOM_PAIR,
2914 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2917 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2919 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2921 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2924 if (!mlx5_lag_is_supported(esw->dev))
2927 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2928 ESW_OFFLOADS_DEVCOM_UNPAIR,
2929 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2931 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2932 xa_destroy(&esw->paired);
2935 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2937 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2940 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2941 MLX5_FDB_TO_VPORT_REG_C_0))
2947 #define MLX5_ESW_METADATA_RSVD_UPLINK 1
2949 /* Share the same metadata for uplink's. This is fine because:
2950 * (a) In shared FDB mode (LAG) both uplink's are treated the
2951 * same and tagged with the same metadata.
2952 * (b) In non shared FDB mode, packets from physical port0
2953 * cannot hit eswitch of PF1 and vice versa.
2955 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
2957 return MLX5_ESW_METADATA_RSVD_UPLINK;
2960 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2962 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2963 /* Reserve 0xf for internal port offload */
2964 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
2968 /* Only 4 bits of pf_num */
2969 pf_num = mlx5_get_dev_index(esw->dev);
2970 if (pf_num > max_pf_num)
2973 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2974 /* Use only non-zero vport_id (2-4095) for all PF's */
2975 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
2976 MLX5_ESW_METADATA_RSVD_UPLINK + 1,
2977 vport_end_ida, GFP_KERNEL);
2980 id = (pf_num << ESW_VPORT_BITS) | id;
2984 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2986 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2988 /* Metadata contains only 12 bits of actual ida id */
2989 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2992 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2993 struct mlx5_vport *vport)
2995 if (vport->vport == MLX5_VPORT_UPLINK)
2996 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
2998 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
3000 vport->metadata = vport->default_metadata;
3001 return vport->metadata ? 0 : -ENOSPC;
3004 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
3005 struct mlx5_vport *vport)
3007 if (!vport->default_metadata)
3010 if (vport->vport == MLX5_VPORT_UPLINK)
3013 WARN_ON(vport->metadata != vport->default_metadata);
3014 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
3017 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
3019 struct mlx5_vport *vport;
3022 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3025 mlx5_esw_for_each_vport(esw, i, vport)
3026 esw_offloads_vport_metadata_cleanup(esw, vport);
3029 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
3031 struct mlx5_vport *vport;
3035 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3038 mlx5_esw_for_each_vport(esw, i, vport) {
3039 err = esw_offloads_vport_metadata_setup(esw, vport);
3047 esw_offloads_metadata_uninit(esw);
3052 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
3053 struct mlx5_vport *vport)
3057 err = esw_acl_ingress_ofld_setup(esw, vport);
3061 err = esw_acl_egress_ofld_setup(esw, vport);
3068 esw_acl_ingress_ofld_cleanup(esw, vport);
3073 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
3074 struct mlx5_vport *vport)
3076 esw_acl_egress_ofld_cleanup(vport);
3077 esw_acl_ingress_ofld_cleanup(esw, vport);
3080 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
3082 struct mlx5_vport *vport;
3084 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3086 return PTR_ERR(vport);
3088 return esw_vport_create_offloads_acl_tables(esw, vport);
3091 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
3093 struct mlx5_vport *vport;
3095 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3099 esw_vport_destroy_offloads_acl_tables(esw, vport);
3102 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
3104 struct mlx5_eswitch_rep *rep;
3108 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3111 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3112 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3115 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3119 mlx5_esw_for_each_rep(esw, i, rep) {
3120 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3121 mlx5_esw_offloads_rep_load(esw, rep->vport);
3127 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3129 struct mlx5_esw_indir_table *indir;
3132 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3133 mutex_init(&esw->fdb_table.offloads.vports.lock);
3134 hash_init(esw->fdb_table.offloads.vports.table);
3135 atomic64_set(&esw->user_count, 0);
3137 indir = mlx5_esw_indir_table_init();
3138 if (IS_ERR(indir)) {
3139 err = PTR_ERR(indir);
3140 goto create_indir_err;
3142 esw->fdb_table.offloads.indir = indir;
3144 err = esw_create_uplink_offloads_acl_tables(esw);
3146 goto create_acl_err;
3148 err = esw_create_offloads_table(esw);
3150 goto create_offloads_err;
3152 err = esw_create_restore_table(esw);
3154 goto create_restore_err;
3156 err = esw_create_offloads_fdb_tables(esw);
3158 goto create_fdb_err;
3160 err = esw_create_vport_rx_group(esw);
3164 err = esw_create_vport_rx_drop_group(esw);
3166 goto create_rx_drop_fg_err;
3168 err = esw_create_vport_rx_drop_rule(esw);
3170 goto create_rx_drop_rule_err;
3174 create_rx_drop_rule_err:
3175 esw_destroy_vport_rx_drop_group(esw);
3176 create_rx_drop_fg_err:
3177 esw_destroy_vport_rx_group(esw);
3179 esw_destroy_offloads_fdb_tables(esw);
3181 esw_destroy_restore_table(esw);
3183 esw_destroy_offloads_table(esw);
3184 create_offloads_err:
3185 esw_destroy_uplink_offloads_acl_tables(esw);
3187 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3189 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3193 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3195 esw_destroy_vport_rx_drop_rule(esw);
3196 esw_destroy_vport_rx_drop_group(esw);
3197 esw_destroy_vport_rx_group(esw);
3198 esw_destroy_offloads_fdb_tables(esw);
3199 esw_destroy_restore_table(esw);
3200 esw_destroy_offloads_table(esw);
3201 esw_destroy_uplink_offloads_acl_tables(esw);
3202 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3203 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3207 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3209 struct devlink *devlink;
3210 bool host_pf_disabled;
3213 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3214 host_params_context.host_num_of_vfs);
3215 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3216 host_params_context.host_pf_disabled);
3218 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3221 devlink = priv_to_devlink(esw->dev);
3223 /* Number of VFs can only change from "0 to x" or "x to 0". */
3224 if (esw->esw_funcs.num_vfs > 0) {
3225 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3229 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3230 MLX5_VPORT_UC_ADDR_CHANGE);
3232 devl_unlock(devlink);
3236 esw->esw_funcs.num_vfs = new_num_vfs;
3237 devl_unlock(devlink);
3240 static void esw_functions_changed_event_handler(struct work_struct *work)
3242 struct mlx5_host_work *host_work;
3243 struct mlx5_eswitch *esw;
3246 host_work = container_of(work, struct mlx5_host_work, work);
3247 esw = host_work->esw;
3249 out = mlx5_esw_query_functions(esw->dev);
3253 esw_vfs_changed_event_handler(esw, out);
3259 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3261 struct mlx5_esw_functions *esw_funcs;
3262 struct mlx5_host_work *host_work;
3263 struct mlx5_eswitch *esw;
3265 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3269 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3270 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3272 host_work->esw = esw;
3274 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3275 queue_work(esw->work_queue, &host_work->work);
3280 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3282 const u32 *query_host_out;
3284 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3287 query_host_out = mlx5_esw_query_functions(esw->dev);
3288 if (IS_ERR(query_host_out))
3289 return PTR_ERR(query_host_out);
3291 /* Mark non local controller with non zero controller number. */
3292 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3293 host_params_context.host_number);
3294 kvfree(query_host_out);
3298 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3300 /* Local controller is always valid */
3301 if (controller == 0)
3304 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3307 /* External host number starts with zero in device */
3308 return (controller == esw->offloads.host_number + 1);
3311 int esw_offloads_enable(struct mlx5_eswitch *esw)
3313 struct mapping_ctx *reg_c0_obj_pool;
3314 struct mlx5_vport *vport;
3319 mutex_init(&esw->offloads.termtbl_mutex);
3320 mlx5_rdma_enable_roce(esw->dev);
3322 err = mlx5_esw_host_number_init(esw);
3326 err = esw_offloads_metadata_init(esw);
3330 err = esw_set_passing_vport_metadata(esw, true);
3332 goto err_vport_metadata;
3334 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3336 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
3337 sizeof(struct mlx5_mapped_obj),
3338 ESW_REG_C0_USER_DATA_METADATA_MASK,
3341 if (IS_ERR(reg_c0_obj_pool)) {
3342 err = PTR_ERR(reg_c0_obj_pool);
3345 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3347 err = esw_offloads_steering_init(esw);
3349 goto err_steering_init;
3351 /* Representor will control the vport link state */
3352 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3353 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3354 if (mlx5_core_ec_sriov_enabled(esw->dev))
3355 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
3356 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3358 /* Uplink vport rep must load first. */
3359 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
3363 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3370 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3372 esw_offloads_steering_cleanup(esw);
3374 mapping_destroy(reg_c0_obj_pool);
3376 esw_set_passing_vport_metadata(esw, false);
3378 esw_offloads_metadata_uninit(esw);
3380 mlx5_rdma_disable_roce(esw->dev);
3381 mutex_destroy(&esw->offloads.termtbl_mutex);
3385 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3386 struct netlink_ext_ack *extack)
3390 esw->mode = MLX5_ESWITCH_LEGACY;
3392 /* If changing from switchdev to legacy mode without sriov enabled,
3393 * no need to create legacy fdb.
3395 if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
3398 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3400 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3405 void esw_offloads_disable(struct mlx5_eswitch *esw)
3407 mlx5_eswitch_disable_pf_vf_vports(esw);
3408 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3409 esw_set_passing_vport_metadata(esw, false);
3410 esw_offloads_steering_cleanup(esw);
3411 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3412 esw_offloads_metadata_uninit(esw);
3413 mlx5_rdma_disable_roce(esw->dev);
3414 mutex_destroy(&esw->offloads.termtbl_mutex);
3417 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3420 case DEVLINK_ESWITCH_MODE_LEGACY:
3421 *mlx5_mode = MLX5_ESWITCH_LEGACY;
3423 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3424 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3433 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3435 switch (mlx5_mode) {
3436 case MLX5_ESWITCH_LEGACY:
3437 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3439 case MLX5_ESWITCH_OFFLOADS:
3440 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3449 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3452 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3453 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3455 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3456 *mlx5_mode = MLX5_INLINE_MODE_L2;
3458 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3459 *mlx5_mode = MLX5_INLINE_MODE_IP;
3461 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3462 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3471 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3473 switch (mlx5_mode) {
3474 case MLX5_INLINE_MODE_NONE:
3475 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3477 case MLX5_INLINE_MODE_L2:
3478 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3480 case MLX5_INLINE_MODE_IP:
3481 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3483 case MLX5_INLINE_MODE_TCP_UDP:
3484 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3493 static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
3495 struct net *devl_net, *netdev_net;
3496 struct mlx5_eswitch *esw;
3498 esw = mlx5_devlink_eswitch_get(devlink);
3499 netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
3500 devl_net = devlink_net(devlink);
3502 return net_eq(devl_net, netdev_net);
3505 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3506 struct netlink_ext_ack *extack)
3508 u16 cur_mlx5_mode, mlx5_mode = 0;
3509 struct mlx5_eswitch *esw;
3512 esw = mlx5_devlink_eswitch_get(devlink);
3514 return PTR_ERR(esw);
3516 if (esw_mode_from_devlink(mode, &mlx5_mode))
3519 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
3520 !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
3521 NL_SET_ERR_MSG_MOD(extack,
3522 "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
3526 mlx5_lag_disable_change(esw->dev);
3527 err = mlx5_esw_try_lock(esw);
3529 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
3532 cur_mlx5_mode = err;
3535 if (cur_mlx5_mode == mlx5_mode)
3538 mlx5_eswitch_disable_locked(esw);
3539 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3540 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3541 NL_SET_ERR_MSG_MOD(extack,
3542 "Can't change mode while devlink traps are active");
3546 err = esw_offloads_start(esw, extack);
3547 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
3548 err = esw_offloads_stop(esw, extack);
3549 mlx5_rescan_drivers(esw->dev);
3555 mlx5_esw_unlock(esw);
3557 mlx5_lag_enable_change(esw->dev);
3561 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3563 struct mlx5_eswitch *esw;
3566 esw = mlx5_devlink_eswitch_get(devlink);
3568 return PTR_ERR(esw);
3570 down_read(&esw->mode_lock);
3571 err = esw_mode_to_devlink(esw->mode, mode);
3572 up_read(&esw->mode_lock);
3576 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3577 struct netlink_ext_ack *extack)
3579 struct mlx5_core_dev *dev = esw->dev;
3580 struct mlx5_vport *vport;
3581 u16 err_vport_num = 0;
3585 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3586 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3588 err_vport_num = vport->vport;
3589 NL_SET_ERR_MSG_MOD(extack,
3590 "Failed to set min inline on vport");
3591 goto revert_inline_mode;
3594 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
3595 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3596 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3598 err_vport_num = vport->vport;
3599 NL_SET_ERR_MSG_MOD(extack,
3600 "Failed to set min inline on vport");
3601 goto revert_ec_vf_inline_mode;
3607 revert_ec_vf_inline_mode:
3608 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3609 if (vport->vport == err_vport_num)
3611 mlx5_modify_nic_vport_min_inline(dev,
3613 esw->offloads.inline_mode);
3616 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3617 if (vport->vport == err_vport_num)
3619 mlx5_modify_nic_vport_min_inline(dev,
3621 esw->offloads.inline_mode);
3626 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3627 struct netlink_ext_ack *extack)
3629 struct mlx5_core_dev *dev = devlink_priv(devlink);
3630 struct mlx5_eswitch *esw;
3634 esw = mlx5_devlink_eswitch_get(devlink);
3636 return PTR_ERR(esw);
3638 down_write(&esw->mode_lock);
3640 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3641 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3642 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3648 case MLX5_CAP_INLINE_MODE_L2:
3649 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3652 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3656 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3657 NL_SET_ERR_MSG_MOD(extack,
3658 "Can't set inline mode when flows are configured");
3663 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3667 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3671 esw->offloads.inline_mode = mlx5_mode;
3672 up_write(&esw->mode_lock);
3676 up_write(&esw->mode_lock);
3680 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3682 struct mlx5_eswitch *esw;
3685 esw = mlx5_devlink_eswitch_get(devlink);
3687 return PTR_ERR(esw);
3689 down_read(&esw->mode_lock);
3690 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3691 up_read(&esw->mode_lock);
3695 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
3697 struct devlink *devlink = priv_to_devlink(dev);
3698 struct mlx5_eswitch *esw;
3701 esw = mlx5_devlink_eswitch_get(devlink);
3703 devl_unlock(devlink);
3704 /* Failure means no eswitch => not possible to change encap */
3708 down_write(&esw->mode_lock);
3709 if (esw->mode != MLX5_ESWITCH_LEGACY &&
3710 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
3711 up_write(&esw->mode_lock);
3712 devl_unlock(devlink);
3716 esw->offloads.num_block_encap++;
3717 up_write(&esw->mode_lock);
3718 devl_unlock(devlink);
3722 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
3724 struct devlink *devlink = priv_to_devlink(dev);
3725 struct mlx5_eswitch *esw;
3727 esw = mlx5_devlink_eswitch_get(devlink);
3731 down_write(&esw->mode_lock);
3732 esw->offloads.num_block_encap--;
3733 up_write(&esw->mode_lock);
3736 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3737 enum devlink_eswitch_encap_mode encap,
3738 struct netlink_ext_ack *extack)
3740 struct mlx5_core_dev *dev = devlink_priv(devlink);
3741 struct mlx5_eswitch *esw;
3744 esw = mlx5_devlink_eswitch_get(devlink);
3746 return PTR_ERR(esw);
3748 down_write(&esw->mode_lock);
3750 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3751 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3752 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3757 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3762 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3763 esw->offloads.encap = encap;
3767 if (esw->offloads.encap == encap)
3770 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3771 NL_SET_ERR_MSG_MOD(extack,
3772 "Can't set encapsulation when flows are configured");
3777 if (esw->offloads.num_block_encap) {
3778 NL_SET_ERR_MSG_MOD(extack,
3779 "Can't set encapsulation when IPsec SA and/or policies are configured");
3784 esw_destroy_offloads_fdb_tables(esw);
3786 esw->offloads.encap = encap;
3788 err = esw_create_offloads_fdb_tables(esw);
3791 NL_SET_ERR_MSG_MOD(extack,
3792 "Failed re-creating fast FDB table");
3793 esw->offloads.encap = !encap;
3794 (void)esw_create_offloads_fdb_tables(esw);
3798 up_write(&esw->mode_lock);
3802 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3803 enum devlink_eswitch_encap_mode *encap)
3805 struct mlx5_eswitch *esw;
3807 esw = mlx5_devlink_eswitch_get(devlink);
3809 return PTR_ERR(esw);
3811 down_read(&esw->mode_lock);
3812 *encap = esw->offloads.encap;
3813 up_read(&esw->mode_lock);
3818 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3820 /* Currently, only ECPF based device has representor for host PF. */
3821 if (vport_num == MLX5_VPORT_PF &&
3822 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3825 if (vport_num == MLX5_VPORT_ECPF &&
3826 !mlx5_ecpf_vport_exists(esw->dev))
3832 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
3833 const struct mlx5_eswitch_rep_ops *ops,
3836 struct mlx5_eswitch_rep_data *rep_data;
3837 struct mlx5_eswitch_rep *rep;
3840 esw->offloads.rep_ops[rep_type] = ops;
3841 mlx5_esw_for_each_rep(esw, i, rep) {
3842 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
3844 rep_data = &rep->rep_data[rep_type];
3845 atomic_set(&rep_data->state, REP_REGISTERED);
3849 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
3851 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
3853 struct mlx5_eswitch_rep *rep;
3856 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
3857 __unload_reps_all_vport(esw, rep_type);
3859 mlx5_esw_for_each_rep(esw, i, rep)
3860 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
3862 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
3864 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
3866 struct mlx5_eswitch_rep *rep;
3868 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3869 return rep->rep_data[rep_type].priv;
3872 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
3876 struct mlx5_eswitch_rep *rep;
3878 rep = mlx5_eswitch_get_rep(esw, vport);
3880 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3881 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3882 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
3885 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
3887 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3889 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
3891 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3893 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
3896 return mlx5_eswitch_get_rep(esw, vport);
3898 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
3900 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3902 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3904 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3906 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3908 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3910 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3912 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
3915 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3917 if (WARN_ON_ONCE(IS_ERR(vport)))
3920 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
3922 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
3924 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3925 u16 vport_num, u32 controller, u32 sfnum)
3929 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3933 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
3937 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3943 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3945 mlx5_esw_vport_disable(esw, vport_num);
3949 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3951 mlx5_esw_offloads_rep_unload(esw, vport_num);
3952 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3953 mlx5_esw_vport_disable(esw, vport_num);
3956 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3958 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3965 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3969 err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
3973 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3974 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3981 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3983 u16 *old_entry, *vhca_map_entry, vhca_id;
3986 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3988 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3993 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3994 if (!vhca_map_entry)
3997 *vhca_map_entry = vport_num;
3998 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3999 if (xa_is_err(old_entry)) {
4000 kfree(vhca_map_entry);
4001 return xa_err(old_entry);
4007 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
4009 u16 *vhca_map_entry, vhca_id;
4012 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
4014 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
4017 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
4018 kfree(vhca_map_entry);
4021 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
4023 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
4032 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
4035 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
4037 if (WARN_ON_ONCE(IS_ERR(vport)))
4040 return vport->metadata;
4042 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
4045 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
4047 return vport_num == MLX5_VPORT_PF ||
4048 mlx5_eswitch_is_vf_vport(esw, vport_num) ||
4049 mlx5_esw_is_sf_vport(esw, vport_num);
4052 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
4053 u8 *hw_addr, int *hw_addr_len,
4054 struct netlink_ext_ack *extack)
4056 struct mlx5_eswitch *esw;
4057 struct mlx5_vport *vport;
4060 esw = mlx5_devlink_eswitch_get(port->devlink);
4062 return PTR_ERR(esw);
4064 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4065 if (!is_port_function_supported(esw, vport_num))
4068 vport = mlx5_eswitch_get_vport(esw, vport_num);
4069 if (IS_ERR(vport)) {
4070 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4071 return PTR_ERR(vport);
4074 mutex_lock(&esw->state_lock);
4075 ether_addr_copy(hw_addr, vport->info.mac);
4076 *hw_addr_len = ETH_ALEN;
4077 mutex_unlock(&esw->state_lock);
4081 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
4082 const u8 *hw_addr, int hw_addr_len,
4083 struct netlink_ext_ack *extack)
4085 struct mlx5_eswitch *esw;
4088 esw = mlx5_devlink_eswitch_get(port->devlink);
4090 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
4091 return PTR_ERR(esw);
4094 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4095 if (!is_port_function_supported(esw, vport_num)) {
4096 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
4100 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
4103 static struct mlx5_vport *
4104 mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
4108 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
4109 return ERR_PTR(-EOPNOTSUPP);
4111 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4112 if (!is_port_function_supported(esw, vport_num))
4113 return ERR_PTR(-EOPNOTSUPP);
4115 return mlx5_eswitch_get_vport(esw, vport_num);
4118 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
4119 struct netlink_ext_ack *extack)
4121 struct mlx5_eswitch *esw;
4122 struct mlx5_vport *vport;
4123 int err = -EOPNOTSUPP;
4125 esw = mlx5_devlink_eswitch_get(port->devlink);
4127 return PTR_ERR(esw);
4129 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4130 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4134 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4135 if (IS_ERR(vport)) {
4136 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4137 return PTR_ERR(vport);
4140 mutex_lock(&esw->state_lock);
4141 if (vport->enabled) {
4142 *is_enabled = vport->info.mig_enabled;
4145 mutex_unlock(&esw->state_lock);
4149 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
4150 struct netlink_ext_ack *extack)
4152 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4153 struct mlx5_eswitch *esw;
4154 struct mlx5_vport *vport;
4157 int err = -EOPNOTSUPP;
4159 esw = mlx5_devlink_eswitch_get(port->devlink);
4161 return PTR_ERR(esw);
4163 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4164 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4168 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4169 if (IS_ERR(vport)) {
4170 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4171 return PTR_ERR(vport);
4174 mutex_lock(&esw->state_lock);
4175 if (!vport->enabled) {
4176 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4180 if (vport->info.mig_enabled == enable) {
4185 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4191 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4192 MLX5_CAP_GENERAL_2);
4194 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4198 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4199 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
4201 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4202 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
4204 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
4208 vport->info.mig_enabled = enable;
4213 mutex_unlock(&esw->state_lock);
4217 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
4218 struct netlink_ext_ack *extack)
4220 struct mlx5_eswitch *esw;
4221 struct mlx5_vport *vport;
4222 int err = -EOPNOTSUPP;
4224 esw = mlx5_devlink_eswitch_get(port->devlink);
4226 return PTR_ERR(esw);
4228 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4229 if (IS_ERR(vport)) {
4230 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4231 return PTR_ERR(vport);
4234 mutex_lock(&esw->state_lock);
4235 if (vport->enabled) {
4236 *is_enabled = vport->info.roce_enabled;
4239 mutex_unlock(&esw->state_lock);
4243 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
4244 struct netlink_ext_ack *extack)
4246 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4247 struct mlx5_eswitch *esw;
4248 struct mlx5_vport *vport;
4249 int err = -EOPNOTSUPP;
4254 esw = mlx5_devlink_eswitch_get(port->devlink);
4256 return PTR_ERR(esw);
4258 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4259 if (IS_ERR(vport)) {
4260 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4261 return PTR_ERR(vport);
4263 vport_num = vport->vport;
4265 mutex_lock(&esw->state_lock);
4266 if (!vport->enabled) {
4267 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4271 if (vport->info.roce_enabled == enable) {
4276 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4282 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4285 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4289 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4290 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
4292 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
4293 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
4295 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
4299 vport->info.roce_enabled = enable;
4304 mutex_unlock(&esw->state_lock);