2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
46 #include "lib/devcom.h"
48 #include "lib/fs_chains.h"
50 #include "en/mapping.h"
53 #include "en/tc/post_meter.h"
55 #define mlx5_esw_for_each_rep(esw, i, rep) \
56 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
58 /* There are two match-all miss flows, one for unicast dst mac and
61 #define MLX5_ESW_MISS_FLOWS (2)
62 #define UPLINK_REP_INDEX 0
64 #define MLX5_ESW_VPORT_TBL_SIZE 128
65 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
67 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
69 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
70 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
71 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
75 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
78 return xa_load(&esw->offloads.vport_reps, vport_num);
82 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
83 struct mlx5_flow_spec *spec,
84 struct mlx5_esw_flow_attr *attr)
86 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
90 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
95 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
96 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
97 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
100 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
101 * are not needed as well in the following process. So clear them all for simplicity.
104 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
106 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
109 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
110 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
112 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
113 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
115 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
116 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
121 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
122 struct mlx5_flow_spec *spec,
123 struct mlx5_flow_attr *attr,
124 struct mlx5_eswitch *src_esw,
127 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
132 /* Use metadata matching because vport is not represented by single
133 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
135 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
136 if (mlx5_esw_indir_table_decap_vport(attr))
137 vport = mlx5_esw_indir_table_decap_vport(attr);
139 if (!attr->chain && esw_attr && esw_attr->int_port)
141 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
144 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
146 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
147 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
149 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
150 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
151 mlx5_eswitch_get_vport_metadata_mask());
153 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
155 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
156 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
158 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
159 MLX5_SET(fte_match_set_misc, misc,
160 source_eswitch_owner_vhca_id,
161 MLX5_CAP_GEN(src_esw->dev, vhca_id));
163 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
164 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
165 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
167 source_eswitch_owner_vhca_id);
169 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
174 esw_setup_decap_indir(struct mlx5_eswitch *esw,
175 struct mlx5_flow_attr *attr)
177 struct mlx5_flow_table *ft;
179 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
182 ft = mlx5_esw_indir_table_get(esw, attr,
183 mlx5_esw_indir_table_decap_vport(attr), true);
184 return PTR_ERR_OR_ZERO(ft);
188 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
189 struct mlx5_flow_attr *attr)
191 if (mlx5_esw_indir_table_decap_vport(attr))
192 mlx5_esw_indir_table_put(esw,
193 mlx5_esw_indir_table_decap_vport(attr),
198 esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
199 struct mlx5e_meter_attr *meter,
202 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
203 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
204 dest[i].range.min = 0;
205 dest[i].range.max = meter->params.mtu;
206 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
207 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
213 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
214 struct mlx5_flow_act *flow_act,
218 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
219 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
220 dest[i].sampler_id = sampler_id;
226 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
227 struct mlx5_flow_act *flow_act,
228 struct mlx5_eswitch *esw,
229 struct mlx5_flow_attr *attr,
232 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
233 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
234 dest[i].ft = attr->dest_ft;
236 if (mlx5_esw_indir_table_decap_vport(attr))
237 return esw_setup_decap_indir(esw, attr);
242 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
243 struct mlx5_fs_chains *chains, int i)
245 if (mlx5_chains_ignore_flow_level_supported(chains))
246 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
247 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
248 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
252 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
253 struct mlx5_eswitch *esw, int i)
255 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
256 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
257 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
258 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
262 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
263 struct mlx5_flow_act *flow_act,
264 struct mlx5_fs_chains *chains,
265 u32 chain, u32 prio, u32 level,
268 struct mlx5_flow_table *ft;
270 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
271 ft = mlx5_chains_get_table(chains, chain, prio, level);
275 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
280 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
283 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
284 struct mlx5_fs_chains *chains = esw_chains(esw);
287 for (i = from; i < to; i++)
288 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
289 mlx5_chains_put_table(chains, 0, 1, 0);
290 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
291 esw_attr->dests[i].mdev))
292 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
297 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
301 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
302 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
308 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
309 struct mlx5_flow_act *flow_act,
310 struct mlx5_eswitch *esw,
311 struct mlx5_fs_chains *chains,
312 struct mlx5_flow_attr *attr,
315 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
318 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
321 /* flow steering cannot handle more than one dest with the same ft
324 if (esw_attr->out_count - esw_attr->split_count > 1)
327 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
331 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
332 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
333 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
340 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
341 struct mlx5_flow_attr *attr)
343 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
345 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
349 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
351 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
355 /* Indirect table is supported only for flows with in_port uplink
356 * and the destination is vport on the same eswitch as the uplink,
357 * return false in case at least one of destinations doesn't meet
360 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
361 if (esw_attr->dests[i].rep &&
362 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
363 esw_attr->dests[i].mdev)) {
374 esw_setup_indir_table(struct mlx5_flow_destination *dest,
375 struct mlx5_flow_act *flow_act,
376 struct mlx5_eswitch *esw,
377 struct mlx5_flow_attr *attr,
378 bool ignore_flow_lvl,
381 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
384 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
387 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
389 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
390 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
392 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
393 esw_attr->dests[j].rep->vport, false);
394 if (IS_ERR(dest[*i].ft)) {
395 err = PTR_ERR(dest[*i].ft);
396 goto err_indir_tbl_get;
400 if (mlx5_esw_indir_table_decap_vport(attr)) {
401 err = esw_setup_decap_indir(esw, attr);
403 goto err_indir_tbl_get;
409 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
413 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
415 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
417 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
418 esw_cleanup_decap_indir(esw, attr);
422 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
424 mlx5_chains_put_table(chains, chain, prio, level);
428 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
429 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
430 int attr_idx, int dest_idx, bool pkt_reformat)
432 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
433 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
434 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
435 dest[dest_idx].vport.vhca_id =
436 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
437 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
438 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
439 mlx5_lag_is_mpesw(esw->dev))
440 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
442 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
444 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
445 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
447 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
448 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
453 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
454 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
459 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
460 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
465 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
467 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
468 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
469 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
473 esw_setup_dests(struct mlx5_flow_destination *dest,
474 struct mlx5_flow_act *flow_act,
475 struct mlx5_eswitch *esw,
476 struct mlx5_flow_attr *attr,
477 struct mlx5_flow_spec *spec,
480 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
481 struct mlx5_fs_chains *chains = esw_chains(esw);
484 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
485 esw_src_port_rewrite_supported(esw))
486 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
488 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
489 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
494 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
495 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
497 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
498 esw_setup_accept_dest(dest, flow_act, chains, *i);
500 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
501 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
503 } else if (esw_is_indir_table(esw, attr)) {
504 err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
505 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
506 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
508 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
511 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
513 } else if (attr->dest_chain) {
514 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
525 esw_cleanup_dests(struct mlx5_eswitch *esw,
526 struct mlx5_flow_attr *attr)
528 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
529 struct mlx5_fs_chains *chains = esw_chains(esw);
532 esw_cleanup_decap_indir(esw, attr);
533 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
534 if (attr->dest_chain)
535 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
536 else if (esw_is_indir_table(esw, attr))
537 esw_cleanup_indir_table(esw, attr);
538 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
539 esw_cleanup_chain_src_port_rewrite(esw, attr);
544 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
546 struct mlx5e_flow_meter_handle *meter;
548 meter = attr->meter_attr.meter;
549 flow_act->exe_aso.type = attr->exe_aso_type;
550 flow_act->exe_aso.object_id = meter->obj_id;
551 flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
552 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
553 /* use metadata reg 5 for packet color */
554 flow_act->exe_aso.return_reg_id = 5;
557 struct mlx5_flow_handle *
558 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
559 struct mlx5_flow_spec *spec,
560 struct mlx5_flow_attr *attr)
562 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
563 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
564 struct mlx5_fs_chains *chains = esw_chains(esw);
565 bool split = !!(esw_attr->split_count);
566 struct mlx5_vport_tbl_attr fwd_attr;
567 struct mlx5_flow_destination *dest;
568 struct mlx5_flow_handle *rule;
569 struct mlx5_flow_table *fdb;
572 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
573 return ERR_PTR(-EOPNOTSUPP);
575 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
576 return ERR_PTR(-EOPNOTSUPP);
578 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
580 return ERR_PTR(-ENOMEM);
582 flow_act.action = attr->action;
584 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
585 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
586 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
587 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
588 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
589 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
590 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
591 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
595 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
597 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
600 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
603 goto err_create_goto_table;
607 if (esw_attr->decap_pkt_reformat)
608 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
610 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
611 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
612 dest[i].counter_id = mlx5_fc_id(attr->counter);
616 if (attr->outer_match_level != MLX5_MATCH_NONE)
617 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
618 if (attr->inner_match_level != MLX5_MATCH_NONE)
619 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
621 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
622 flow_act.modify_hdr = attr->modify_hdr;
624 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
625 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
626 esw_setup_meter(attr, &flow_act);
629 fwd_attr.chain = attr->chain;
630 fwd_attr.prio = attr->prio;
631 fwd_attr.vport = esw_attr->in_rep->vport;
632 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
634 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
636 if (attr->chain || attr->prio)
637 fdb = mlx5_chains_get_table(chains, attr->chain,
642 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
643 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
644 esw_attr->in_mdev->priv.eswitch,
645 esw_attr->in_rep->vport);
648 rule = ERR_CAST(fdb);
657 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
658 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
661 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
665 atomic64_inc(&esw->offloads.num_flows);
672 mlx5_esw_vporttbl_put(esw, &fwd_attr);
673 else if (attr->chain || attr->prio)
674 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
676 esw_cleanup_dests(esw, attr);
677 err_create_goto_table:
682 struct mlx5_flow_handle *
683 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
684 struct mlx5_flow_spec *spec,
685 struct mlx5_flow_attr *attr)
687 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
688 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
689 struct mlx5_fs_chains *chains = esw_chains(esw);
690 struct mlx5_vport_tbl_attr fwd_attr;
691 struct mlx5_flow_destination *dest;
692 struct mlx5_flow_table *fast_fdb;
693 struct mlx5_flow_table *fwd_fdb;
694 struct mlx5_flow_handle *rule;
697 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
699 return ERR_PTR(-ENOMEM);
701 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
702 if (IS_ERR(fast_fdb)) {
703 rule = ERR_CAST(fast_fdb);
707 fwd_attr.chain = attr->chain;
708 fwd_attr.prio = attr->prio;
709 fwd_attr.vport = esw_attr->in_rep->vport;
710 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
711 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
712 if (IS_ERR(fwd_fdb)) {
713 rule = ERR_CAST(fwd_fdb);
717 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
718 for (i = 0; i < esw_attr->split_count; i++) {
719 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
720 /* Source port rewrite (forward to ovs internal port or statck device) isn't
721 * supported in the rule of split action.
725 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
729 goto err_chain_src_rewrite;
732 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
733 dest[i].ft = fwd_fdb;
736 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
737 esw_attr->in_mdev->priv.eswitch,
738 esw_attr->in_rep->vport);
740 if (attr->outer_match_level != MLX5_MATCH_NONE)
741 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
743 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
744 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
747 i = esw_attr->split_count;
748 goto err_chain_src_rewrite;
751 atomic64_inc(&esw->offloads.num_flows);
755 err_chain_src_rewrite:
756 mlx5_esw_vporttbl_put(esw, &fwd_attr);
758 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
765 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
766 struct mlx5_flow_handle *rule,
767 struct mlx5_flow_attr *attr,
770 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
771 struct mlx5_fs_chains *chains = esw_chains(esw);
772 bool split = (esw_attr->split_count > 0);
773 struct mlx5_vport_tbl_attr fwd_attr;
776 mlx5_del_flow_rules(rule);
778 if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
779 /* unref the term table */
780 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
781 if (esw_attr->dests[i].termtbl)
782 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
786 atomic64_dec(&esw->offloads.num_flows);
788 if (fwd_rule || split) {
789 fwd_attr.chain = attr->chain;
790 fwd_attr.prio = attr->prio;
791 fwd_attr.vport = esw_attr->in_rep->vport;
792 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
796 mlx5_esw_vporttbl_put(esw, &fwd_attr);
797 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
800 mlx5_esw_vporttbl_put(esw, &fwd_attr);
801 else if (attr->chain || attr->prio)
802 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
803 esw_cleanup_dests(esw, attr);
808 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
809 struct mlx5_flow_handle *rule,
810 struct mlx5_flow_attr *attr)
812 __mlx5_eswitch_del_rule(esw, rule, attr, false);
816 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
817 struct mlx5_flow_handle *rule,
818 struct mlx5_flow_attr *attr)
820 __mlx5_eswitch_del_rule(esw, rule, attr, true);
823 struct mlx5_flow_handle *
824 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
825 struct mlx5_eswitch *from_esw,
826 struct mlx5_eswitch_rep *rep,
829 struct mlx5_flow_act flow_act = {0};
830 struct mlx5_flow_destination dest = {};
831 struct mlx5_flow_handle *flow_rule;
832 struct mlx5_flow_spec *spec;
836 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
838 flow_rule = ERR_PTR(-ENOMEM);
842 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
843 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
845 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
846 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
848 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
850 /* source vport is the esw manager */
851 vport = from_esw->manager_vport;
853 if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
854 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
855 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
856 mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
858 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
859 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
860 mlx5_eswitch_get_vport_metadata_mask());
862 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
864 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
865 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
867 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
868 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
869 MLX5_CAP_GEN(from_esw->dev, vhca_id));
871 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
872 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
874 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
875 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
876 source_eswitch_owner_vhca_id);
878 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
881 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
882 dest.vport.num = rep->vport;
883 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
884 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
885 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
887 if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
888 rep->vport == MLX5_VPORT_UPLINK)
889 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
891 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
892 spec, &flow_act, &dest, 1);
893 if (IS_ERR(flow_rule))
894 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
900 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
902 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
904 mlx5_del_flow_rules(rule);
907 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
910 mlx5_del_flow_rules(rule);
913 struct mlx5_flow_handle *
914 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
916 struct mlx5_flow_destination dest = {};
917 struct mlx5_flow_act flow_act = {0};
918 struct mlx5_flow_handle *flow_rule;
919 struct mlx5_flow_spec *spec;
921 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
923 return ERR_PTR(-ENOMEM);
925 MLX5_SET(fte_match_param, spec->match_criteria,
926 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
927 MLX5_SET(fte_match_param, spec->match_criteria,
928 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
929 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
930 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
932 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
933 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
934 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
936 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
937 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
938 dest.vport.num = vport_num;
940 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
941 spec, &flow_act, &dest, 1);
942 if (IS_ERR(flow_rule))
943 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
944 vport_num, PTR_ERR(flow_rule));
950 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
952 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
953 MLX5_FDB_TO_VPORT_REG_C_1;
956 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
958 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
959 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
960 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
964 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
965 !mlx5_eswitch_vport_match_metadata_enabled(esw))
968 MLX5_SET(query_esw_vport_context_in, in, opcode,
969 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
970 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
974 curr = MLX5_GET(query_esw_vport_context_out, out,
975 esw_vport_context.fdb_to_vport_reg_c_id);
976 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
977 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
978 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
985 MLX5_SET(modify_esw_vport_context_in, min,
986 esw_vport_context.fdb_to_vport_reg_c_id, curr);
987 MLX5_SET(modify_esw_vport_context_in, min,
988 field_select.fdb_to_vport_reg_c_id, 1);
990 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
992 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
993 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
995 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1001 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1002 struct mlx5_core_dev *peer_dev,
1003 struct mlx5_flow_spec *spec,
1004 struct mlx5_flow_destination *dest)
1008 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1009 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1011 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1012 mlx5_eswitch_get_vport_metadata_mask());
1014 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1016 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1019 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1020 MLX5_CAP_GEN(peer_dev, vhca_id));
1022 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1024 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1026 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1027 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1028 source_eswitch_owner_vhca_id);
1031 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1032 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1033 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1034 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1037 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1038 struct mlx5_eswitch *peer_esw,
1039 struct mlx5_flow_spec *spec,
1044 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1045 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1047 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1048 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1051 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1053 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1057 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1058 struct mlx5_core_dev *peer_dev)
1060 struct mlx5_flow_destination dest = {};
1061 struct mlx5_flow_act flow_act = {0};
1062 struct mlx5_flow_handle **flows;
1063 /* total vports is the same for both e-switches */
1064 int nvports = esw->total_vports;
1065 struct mlx5_flow_handle *flow;
1066 struct mlx5_flow_spec *spec;
1067 struct mlx5_vport *vport;
1072 if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
1075 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1079 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1081 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1084 goto alloc_flows_err;
1087 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1088 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1091 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1092 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1093 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1094 spec, MLX5_VPORT_PF);
1096 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1097 spec, &flow_act, &dest, 1);
1099 err = PTR_ERR(flow);
1100 goto add_pf_flow_err;
1102 flows[vport->index] = flow;
1105 if (mlx5_ecpf_vport_exists(esw->dev)) {
1106 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1107 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1108 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1109 spec, &flow_act, &dest, 1);
1111 err = PTR_ERR(flow);
1112 goto add_ecpf_flow_err;
1114 flows[vport->index] = flow;
1117 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1118 esw_set_peer_miss_rule_source_port(esw,
1119 peer_dev->priv.eswitch,
1120 spec, vport->vport);
1122 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1123 spec, &flow_act, &dest, 1);
1125 err = PTR_ERR(flow);
1126 goto add_vf_flow_err;
1128 flows[vport->index] = flow;
1131 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1132 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1133 if (i >= mlx5_core_max_ec_vfs(peer_dev))
1135 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1136 spec, vport->vport);
1137 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1138 spec, &flow_act, &dest, 1);
1140 err = PTR_ERR(flow);
1141 goto add_ec_vf_flow_err;
1143 flows[vport->index] = flow;
1146 esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
1152 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1153 if (!flows[vport->index])
1155 mlx5_del_flow_rules(flows[vport->index]);
1158 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1159 if (!flows[vport->index])
1161 mlx5_del_flow_rules(flows[vport->index]);
1163 if (mlx5_ecpf_vport_exists(esw->dev)) {
1164 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1165 mlx5_del_flow_rules(flows[vport->index]);
1168 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1169 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1170 mlx5_del_flow_rules(flows[vport->index]);
1173 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1180 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1181 struct mlx5_core_dev *peer_dev)
1183 u16 peer_index = mlx5_get_dev_index(peer_dev);
1184 struct mlx5_flow_handle **flows;
1185 struct mlx5_vport *vport;
1188 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
1192 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1193 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1194 /* The flow for a particular vport could be NULL if the other ECPF
1195 * has fewer or no VFs enabled
1197 if (!flows[vport->index])
1199 mlx5_del_flow_rules(flows[vport->index]);
1203 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1204 mlx5_del_flow_rules(flows[vport->index]);
1206 if (mlx5_ecpf_vport_exists(esw->dev)) {
1207 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1208 mlx5_del_flow_rules(flows[vport->index]);
1211 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1212 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1213 mlx5_del_flow_rules(flows[vport->index]);
1217 esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
1220 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1222 struct mlx5_flow_act flow_act = {0};
1223 struct mlx5_flow_destination dest = {};
1224 struct mlx5_flow_handle *flow_rule = NULL;
1225 struct mlx5_flow_spec *spec;
1232 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1238 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1239 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1241 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1242 outer_headers.dmac_47_16);
1245 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1246 dest.vport.num = esw->manager_vport;
1247 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1249 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1250 spec, &flow_act, &dest, 1);
1251 if (IS_ERR(flow_rule)) {
1252 err = PTR_ERR(flow_rule);
1253 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1257 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1259 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1261 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1262 outer_headers.dmac_47_16);
1264 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1265 spec, &flow_act, &dest, 1);
1266 if (IS_ERR(flow_rule)) {
1267 err = PTR_ERR(flow_rule);
1268 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1269 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1273 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1280 struct mlx5_flow_handle *
1281 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1283 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1284 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1285 struct mlx5_flow_context *flow_context;
1286 struct mlx5_flow_handle *flow_rule;
1287 struct mlx5_flow_destination dest;
1288 struct mlx5_flow_spec *spec;
1291 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1292 return ERR_PTR(-EOPNOTSUPP);
1294 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1296 return ERR_PTR(-ENOMEM);
1298 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1300 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1301 ESW_REG_C0_USER_DATA_METADATA_MASK);
1302 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1304 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1305 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1306 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1307 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1308 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1310 flow_context = &spec->flow_context;
1311 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1312 flow_context->flow_tag = tag;
1313 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1314 dest.ft = esw->offloads.ft_offloads;
1316 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1319 if (IS_ERR(flow_rule))
1321 "Failed to create restore rule for tag: %d, err(%d)\n",
1322 tag, (int)PTR_ERR(flow_rule));
1327 #define MAX_PF_SQ 256
1328 #define MAX_SQ_NVPORTS 32
1331 mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1335 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1339 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1340 MLX5_SET(create_flow_group_in, flow_group_in,
1341 match_criteria_enable,
1342 MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
1344 MLX5_SET(fte_match_param, match_criteria,
1345 misc_parameters_2.metadata_reg_c_0,
1346 mlx5_eswitch_get_vport_metadata_mask());
1348 MLX5_SET(create_flow_group_in, flow_group_in,
1349 match_criteria_enable,
1350 MLX5_MATCH_MISC_PARAMETERS | match_params);
1352 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1353 misc_parameters.source_port);
1357 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1358 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1360 struct mlx5_vport_tbl_attr attr;
1361 struct mlx5_vport *vport;
1366 mlx5_esw_for_each_vport(esw, i, vport) {
1367 attr.vport = vport->vport;
1368 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1369 mlx5_esw_vporttbl_put(esw, &attr);
1373 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1375 struct mlx5_vport_tbl_attr attr;
1376 struct mlx5_flow_table *fdb;
1377 struct mlx5_vport *vport;
1382 mlx5_esw_for_each_vport(esw, i, vport) {
1383 attr.vport = vport->vport;
1384 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1385 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1392 esw_vport_tbl_put(esw);
1393 return PTR_ERR(fdb);
1396 #define fdb_modify_header_fwd_to_table_supported(esw) \
1397 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1398 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1400 struct mlx5_core_dev *dev = esw->dev;
1402 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1403 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1405 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1406 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1407 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1408 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1409 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1410 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1411 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1412 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1413 /* Disabled when ttl workaround is needed, e.g
1414 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1417 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1418 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1420 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1421 esw_info(dev, "Supported tc chains and prios offload\n");
1424 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1425 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1429 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1431 struct mlx5_core_dev *dev = esw->dev;
1432 struct mlx5_flow_table *nf_ft, *ft;
1433 struct mlx5_chains_attr attr = {};
1434 struct mlx5_fs_chains *chains;
1437 esw_init_chains_offload_flags(esw, &attr.flags);
1438 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1439 attr.max_grp_num = esw->params.large_group_num;
1440 attr.default_ft = miss_fdb;
1441 attr.mapping = esw->offloads.reg_c0_obj_pool;
1443 chains = mlx5_chains_create(dev, &attr);
1444 if (IS_ERR(chains)) {
1445 err = PTR_ERR(chains);
1446 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1449 mlx5_chains_print_info(chains);
1451 esw->fdb_table.offloads.esw_chains_priv = chains;
1453 /* Create tc_end_ft which is the always created ft chain */
1454 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1456 if (IS_ERR(nf_ft)) {
1457 err = PTR_ERR(nf_ft);
1461 /* Always open the root for fast path */
1462 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1468 /* Open level 1 for split fdb rules now if prios isn't supported */
1469 if (!mlx5_chains_prios_supported(chains)) {
1470 err = esw_vport_tbl_get(esw);
1475 mlx5_chains_set_end_ft(chains, nf_ft);
1480 mlx5_chains_put_table(chains, 0, 1, 0);
1482 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1484 mlx5_chains_destroy(chains);
1485 esw->fdb_table.offloads.esw_chains_priv = NULL;
1491 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1493 if (!mlx5_chains_prios_supported(chains))
1494 esw_vport_tbl_put(esw);
1495 mlx5_chains_put_table(chains, 0, 1, 0);
1496 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1497 mlx5_chains_destroy(chains);
1500 #else /* CONFIG_MLX5_CLS_ACT */
1503 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1507 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1513 esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
1514 struct mlx5_flow_table *fdb,
1518 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1519 struct mlx5_flow_group *g;
1520 void *match_criteria;
1523 memset(flow_group_in, 0, inlen);
1525 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
1527 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1528 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1530 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1531 MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1532 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1533 misc_parameters.source_eswitch_owner_vhca_id);
1534 MLX5_SET(create_flow_group_in, flow_group_in,
1535 source_eswitch_owner_vhca_id_valid, 1);
1538 /* See comment at table_size calculation */
1539 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1540 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1541 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
1544 g = mlx5_create_flow_group(fdb, flow_group_in);
1547 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1550 esw->fdb_table.offloads.send_to_vport_grp = g;
1557 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
1558 struct mlx5_flow_table *fdb,
1562 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1563 struct mlx5_flow_group *g;
1564 void *match_criteria;
1567 if (!esw_src_port_rewrite_supported(esw))
1570 memset(flow_group_in, 0, inlen);
1572 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1573 MLX5_MATCH_MISC_PARAMETERS_2);
1575 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1577 MLX5_SET(fte_match_param, match_criteria,
1578 misc_parameters_2.metadata_reg_c_0,
1579 mlx5_eswitch_get_vport_metadata_mask());
1580 MLX5_SET(fte_match_param, match_criteria,
1581 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1583 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1584 MLX5_SET(create_flow_group_in, flow_group_in,
1585 end_flow_index, *ix + esw->total_vports - 1);
1586 *ix += esw->total_vports;
1588 g = mlx5_create_flow_group(fdb, flow_group_in);
1592 "Failed to create send-to-vport meta flow group err(%d)\n", err);
1593 goto send_vport_meta_err;
1595 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1599 send_vport_meta_err:
1604 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
1605 struct mlx5_flow_table *fdb,
1609 int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1);
1610 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1611 struct mlx5_flow_group *g;
1612 void *match_criteria;
1615 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1618 memset(flow_group_in, 0, inlen);
1620 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1622 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1623 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1627 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1628 misc_parameters.source_eswitch_owner_vhca_id);
1630 MLX5_SET(create_flow_group_in, flow_group_in,
1631 source_eswitch_owner_vhca_id_valid, 1);
1634 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1635 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1636 *ix + max_peer_ports);
1637 *ix += max_peer_ports + 1;
1639 g = mlx5_create_flow_group(fdb, flow_group_in);
1642 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
1645 esw->fdb_table.offloads.peer_miss_grp = g;
1652 esw_create_miss_group(struct mlx5_eswitch *esw,
1653 struct mlx5_flow_table *fdb,
1657 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1658 struct mlx5_flow_group *g;
1659 void *match_criteria;
1663 memset(flow_group_in, 0, inlen);
1665 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1666 MLX5_MATCH_OUTER_HEADERS);
1667 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1669 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1670 outer_headers.dmac_47_16);
1673 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1674 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1675 *ix + MLX5_ESW_MISS_FLOWS);
1677 g = mlx5_create_flow_group(fdb, flow_group_in);
1680 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
1683 esw->fdb_table.offloads.miss_grp = g;
1685 err = esw_add_fdb_miss_rule(esw);
1692 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1697 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1699 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1700 struct mlx5_flow_table_attr ft_attr = {};
1701 struct mlx5_core_dev *dev = esw->dev;
1702 struct mlx5_flow_namespace *root_ns;
1703 struct mlx5_flow_table *fdb = NULL;
1704 int table_size, ix = 0, err = 0;
1705 u32 flags = 0, *flow_group_in;
1707 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1709 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1713 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1715 esw_warn(dev, "Failed to get FDB flow namespace\n");
1719 esw->fdb_table.offloads.ns = root_ns;
1720 err = mlx5_flow_namespace_set_mode(root_ns,
1721 esw->dev->priv.steering->mode);
1723 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1727 /* To be strictly correct:
1728 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1730 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1731 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1732 * but as the peer device might not be in switchdev mode it's not
1733 * possible. We use the fact that by default FW sets max vfs and max sfs
1734 * to the same value on both devices. If it needs to be changed in the future note
1735 * the peer miss group should also be created based on the number of
1736 * total vports of the peer (currently is also uses esw->total_vports).
1738 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1739 esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS;
1741 /* create the slow path fdb with encap set, so further table instances
1742 * can be created at run time while VFs are probed if the FW allows that.
1744 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1745 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1746 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1748 ft_attr.flags = flags;
1749 ft_attr.max_fte = table_size;
1750 ft_attr.prio = FDB_SLOW_PATH;
1752 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1755 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1758 esw->fdb_table.offloads.slow_fdb = fdb;
1760 /* Create empty TC-miss managed table. This allows plugging in following
1761 * priorities without directly exposing their level 0 table to
1762 * eswitch_offloads and passing it as miss_fdb to following call to
1763 * esw_chains_create().
1765 memset(&ft_attr, 0, sizeof(ft_attr));
1766 ft_attr.prio = FDB_TC_MISS;
1767 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1768 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1769 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1770 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1771 goto tc_miss_table_err;
1774 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1776 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1777 goto fdb_chains_err;
1780 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1782 goto send_vport_err;
1784 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1786 goto send_vport_meta_err;
1788 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
1792 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
1796 kvfree(flow_group_in);
1800 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1801 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1803 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1804 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1805 send_vport_meta_err:
1806 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1808 esw_chains_destroy(esw, esw_chains(esw));
1810 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1812 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1814 /* Holds true only as long as DMFS is the default */
1815 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1817 kvfree(flow_group_in);
1821 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1823 if (!mlx5_eswitch_get_slow_fdb(esw))
1826 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1827 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1828 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1829 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1830 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1831 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1832 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1833 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1834 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1836 esw_chains_destroy(esw, esw_chains(esw));
1838 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1839 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1840 /* Holds true only as long as DMFS is the default */
1841 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1842 MLX5_FLOW_STEERING_MODE_DMFS);
1843 atomic64_set(&esw->user_count, 0);
1846 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
1850 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1851 if (mlx5e_tc_int_port_supported(esw))
1852 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1857 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1859 struct mlx5_flow_table_attr ft_attr = {};
1860 struct mlx5_core_dev *dev = esw->dev;
1861 struct mlx5_flow_table *ft_offloads;
1862 struct mlx5_flow_namespace *ns;
1865 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1867 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1871 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
1872 MLX5_ESW_FT_OFFLOADS_DROP_RULE;
1875 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1876 if (IS_ERR(ft_offloads)) {
1877 err = PTR_ERR(ft_offloads);
1878 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1882 esw->offloads.ft_offloads = ft_offloads;
1886 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1888 struct mlx5_esw_offload *offloads = &esw->offloads;
1890 mlx5_destroy_flow_table(offloads->ft_offloads);
1893 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1895 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1896 struct mlx5_flow_group *g;
1901 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
1902 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1906 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1908 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1909 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1911 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1915 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1919 esw->offloads.vport_rx_group = g;
1921 kvfree(flow_group_in);
1925 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1927 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1930 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
1932 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
1933 * for the drop rule, which is placed at the end of the table.
1934 * So return the total of vport and int_port as rule index.
1936 return esw_get_nr_ft_offloads_steering_src_ports(esw);
1939 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
1941 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1942 struct mlx5_flow_group *g;
1947 flow_index = esw_create_vport_rx_drop_rule_index(esw);
1949 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1953 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1954 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1956 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1960 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
1964 esw->offloads.vport_rx_drop_group = g;
1966 kvfree(flow_group_in);
1970 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
1972 if (esw->offloads.vport_rx_drop_group)
1973 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
1977 mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
1979 struct mlx5_flow_spec *spec)
1983 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1984 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1985 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1986 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1988 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1989 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1990 mlx5_eswitch_get_vport_metadata_mask());
1992 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1994 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1995 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1997 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1998 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2000 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2004 struct mlx5_flow_handle *
2005 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
2006 struct mlx5_flow_destination *dest)
2008 struct mlx5_flow_act flow_act = {0};
2009 struct mlx5_flow_handle *flow_rule;
2010 struct mlx5_flow_spec *spec;
2012 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2014 flow_rule = ERR_PTR(-ENOMEM);
2018 mlx5_esw_set_spec_source_port(esw, vport, spec);
2020 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2021 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
2022 &flow_act, dest, 1);
2023 if (IS_ERR(flow_rule)) {
2024 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
2033 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2035 struct mlx5_flow_act flow_act = {};
2036 struct mlx5_flow_handle *flow_rule;
2038 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2039 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
2040 &flow_act, NULL, 0);
2041 if (IS_ERR(flow_rule)) {
2043 "fs offloads: Failed to add vport rx drop rule err %ld\n",
2044 PTR_ERR(flow_rule));
2045 return PTR_ERR(flow_rule);
2048 esw->offloads.vport_rx_drop_rule = flow_rule;
2053 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2055 if (esw->offloads.vport_rx_drop_rule)
2056 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
2059 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2061 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2062 struct mlx5_core_dev *dev = esw->dev;
2063 struct mlx5_vport *vport;
2066 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2069 if (!mlx5_esw_is_fdb_created(esw))
2072 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2073 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2074 mlx5_mode = MLX5_INLINE_MODE_NONE;
2076 case MLX5_CAP_INLINE_MODE_L2:
2077 mlx5_mode = MLX5_INLINE_MODE_L2;
2079 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2084 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2085 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2086 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2087 if (prev_mlx5_mode != mlx5_mode)
2089 prev_mlx5_mode = mlx5_mode;
2097 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2099 struct mlx5_esw_offload *offloads = &esw->offloads;
2101 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2104 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2105 mlx5_destroy_flow_group(offloads->restore_group);
2106 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2109 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2111 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2112 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2113 struct mlx5_flow_table_attr ft_attr = {};
2114 struct mlx5_core_dev *dev = esw->dev;
2115 struct mlx5_flow_namespace *ns;
2116 struct mlx5_modify_hdr *mod_hdr;
2117 void *match_criteria, *misc;
2118 struct mlx5_flow_table *ft;
2119 struct mlx5_flow_group *g;
2123 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2126 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2128 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2132 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2133 if (!flow_group_in) {
2138 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2139 ft = mlx5_create_flow_table(ns, &ft_attr);
2142 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2147 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2149 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2152 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2153 ESW_REG_C0_USER_DATA_METADATA_MASK);
2154 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2155 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2156 ft_attr.max_fte - 1);
2157 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2158 MLX5_MATCH_MISC_PARAMETERS_2);
2159 g = mlx5_create_flow_group(ft, flow_group_in);
2162 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2167 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2168 MLX5_SET(copy_action_in, modact, src_field,
2169 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2170 MLX5_SET(copy_action_in, modact, dst_field,
2171 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2172 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2173 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2175 if (IS_ERR(mod_hdr)) {
2176 err = PTR_ERR(mod_hdr);
2177 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2182 esw->offloads.ft_offloads_restore = ft;
2183 esw->offloads.restore_group = g;
2184 esw->offloads.restore_copy_hdr_id = mod_hdr;
2186 kvfree(flow_group_in);
2191 mlx5_destroy_flow_group(g);
2193 mlx5_destroy_flow_table(ft);
2195 kvfree(flow_group_in);
2200 static int esw_offloads_start(struct mlx5_eswitch *esw,
2201 struct netlink_ext_ack *extack)
2205 esw->mode = MLX5_ESWITCH_OFFLOADS;
2206 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
2208 NL_SET_ERR_MSG_MOD(extack,
2209 "Failed setting eswitch to offloads");
2210 esw->mode = MLX5_ESWITCH_LEGACY;
2211 mlx5_rescan_drivers(esw->dev);
2214 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2215 if (mlx5_eswitch_inline_mode_get(esw,
2216 &esw->offloads.inline_mode)) {
2217 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2218 NL_SET_ERR_MSG_MOD(extack,
2219 "Inline mode is different between vports");
2225 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2227 struct mlx5_eswitch_rep *rep;
2231 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2235 rep->vport = vport->vport;
2236 rep->vport_index = vport->index;
2237 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2238 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2240 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2251 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2252 struct mlx5_eswitch_rep *rep)
2254 xa_erase(&esw->offloads.vport_reps, rep->vport);
2258 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2260 struct mlx5_eswitch_rep *rep;
2263 mlx5_esw_for_each_rep(esw, i, rep)
2264 mlx5_esw_offloads_rep_cleanup(esw, rep);
2265 xa_destroy(&esw->offloads.vport_reps);
2268 static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2270 struct mlx5_vport *vport;
2274 xa_init(&esw->offloads.vport_reps);
2276 mlx5_esw_for_each_vport(esw, i, vport) {
2277 err = mlx5_esw_offloads_rep_init(esw, vport);
2284 esw_offloads_cleanup_reps(esw);
2288 static int esw_port_metadata_set(struct devlink *devlink, u32 id,
2289 struct devlink_param_gset_ctx *ctx)
2291 struct mlx5_core_dev *dev = devlink_priv(devlink);
2292 struct mlx5_eswitch *esw = dev->priv.eswitch;
2295 down_write(&esw->mode_lock);
2296 if (mlx5_esw_is_fdb_created(esw)) {
2300 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2305 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2307 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2309 up_write(&esw->mode_lock);
2313 static int esw_port_metadata_get(struct devlink *devlink, u32 id,
2314 struct devlink_param_gset_ctx *ctx)
2316 struct mlx5_core_dev *dev = devlink_priv(devlink);
2318 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
2322 static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
2323 union devlink_param_value val,
2324 struct netlink_ext_ack *extack)
2326 struct mlx5_core_dev *dev = devlink_priv(devlink);
2329 esw_mode = mlx5_eswitch_mode(dev);
2330 if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
2331 NL_SET_ERR_MSG_MOD(extack,
2332 "E-Switch must either disabled or non switchdev mode");
2338 static const struct devlink_param esw_devlink_params[] = {
2339 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
2340 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
2341 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2342 esw_port_metadata_get,
2343 esw_port_metadata_set,
2344 esw_port_metadata_validate),
2347 int esw_offloads_init(struct mlx5_eswitch *esw)
2351 err = esw_offloads_init_reps(esw);
2355 err = devl_params_register(priv_to_devlink(esw->dev),
2357 ARRAY_SIZE(esw_devlink_params));
2364 esw_offloads_cleanup_reps(esw);
2368 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2370 devl_params_unregister(priv_to_devlink(esw->dev),
2372 ARRAY_SIZE(esw_devlink_params));
2373 esw_offloads_cleanup_reps(esw);
2376 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2377 struct mlx5_eswitch_rep *rep, u8 rep_type)
2379 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2380 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2381 esw->offloads.rep_ops[rep_type]->unload(rep);
2384 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2386 struct mlx5_eswitch_rep *rep;
2389 mlx5_esw_for_each_rep(esw, i, rep)
2390 __esw_offloads_unload_rep(esw, rep, rep_type);
2393 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2395 struct mlx5_eswitch_rep *rep;
2399 rep = mlx5_eswitch_get_rep(esw, vport_num);
2400 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2401 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2402 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2403 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2411 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2412 for (--rep_type; rep_type >= 0; rep_type--)
2413 __esw_offloads_unload_rep(esw, rep, rep_type);
2417 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2419 struct mlx5_eswitch_rep *rep;
2422 rep = mlx5_eswitch_get_rep(esw, vport_num);
2423 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2424 __esw_offloads_unload_rep(esw, rep, rep_type);
2427 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2431 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2434 if (vport_num != MLX5_VPORT_UPLINK) {
2435 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2440 err = mlx5_esw_offloads_rep_load(esw, vport_num);
2446 if (vport_num != MLX5_VPORT_UPLINK)
2447 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2451 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2453 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2456 mlx5_esw_offloads_rep_unload(esw, vport_num);
2458 if (vport_num != MLX5_VPORT_UPLINK)
2459 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2462 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2463 struct mlx5_core_dev *slave)
2465 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2466 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2467 struct mlx5_flow_root_namespace *root;
2468 struct mlx5_flow_namespace *ns;
2471 MLX5_SET(set_flow_table_root_in, in, opcode,
2472 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2473 MLX5_SET(set_flow_table_root_in, in, table_type,
2477 ns = mlx5_get_flow_namespace(master,
2478 MLX5_FLOW_NAMESPACE_FDB);
2479 root = find_root(&ns->node);
2480 mutex_lock(&root->chain_lock);
2481 MLX5_SET(set_flow_table_root_in, in,
2482 table_eswitch_owner_vhca_id_valid, 1);
2483 MLX5_SET(set_flow_table_root_in, in,
2484 table_eswitch_owner_vhca_id,
2485 MLX5_CAP_GEN(master, vhca_id));
2486 MLX5_SET(set_flow_table_root_in, in, table_id,
2489 ns = mlx5_get_flow_namespace(slave,
2490 MLX5_FLOW_NAMESPACE_FDB);
2491 root = find_root(&ns->node);
2492 mutex_lock(&root->chain_lock);
2493 MLX5_SET(set_flow_table_root_in, in, table_id,
2497 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2498 mutex_unlock(&root->chain_lock);
2503 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2504 struct mlx5_core_dev *slave,
2505 struct mlx5_vport *vport,
2506 struct mlx5_flow_table *acl)
2508 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2509 struct mlx5_flow_handle *flow_rule = NULL;
2510 struct mlx5_flow_destination dest = {};
2511 struct mlx5_flow_act flow_act = {};
2512 struct mlx5_flow_spec *spec;
2516 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2520 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2521 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2523 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2524 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index);
2526 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2527 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2528 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2529 source_eswitch_owner_vhca_id);
2531 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2532 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2533 dest.vport.num = slave->priv.eswitch->manager_vport;
2534 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2535 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2537 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2539 if (IS_ERR(flow_rule)) {
2540 err = PTR_ERR(flow_rule);
2542 err = xa_insert(&vport->egress.offloads.bounce_rules,
2543 slave_index, flow_rule, GFP_KERNEL);
2545 mlx5_del_flow_rules(flow_rule);
2552 static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
2553 struct mlx5_flow_namespace *egress_ns,
2554 struct mlx5_vport *vport, size_t count)
2556 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2557 struct mlx5_flow_table_attr ft_attr = {
2558 .max_fte = count, .prio = 0, .level = 0,
2560 struct mlx5_flow_table *acl;
2561 struct mlx5_flow_group *g;
2562 void *match_criteria;
2566 if (vport->egress.acl)
2569 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2573 if (vport->vport || mlx5_core_is_ecpf(esw->dev))
2574 ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
2576 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2582 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2584 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2585 misc_parameters.source_port);
2586 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2587 misc_parameters.source_eswitch_owner_vhca_id);
2588 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2589 MLX5_MATCH_MISC_PARAMETERS);
2591 MLX5_SET(create_flow_group_in, flow_group_in,
2592 source_eswitch_owner_vhca_id_valid, 1);
2593 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2594 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count);
2596 g = mlx5_create_flow_group(acl, flow_group_in);
2602 vport->egress.acl = acl;
2603 vport->egress.offloads.bounce_grp = g;
2604 vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB;
2605 xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC);
2607 kvfree(flow_group_in);
2612 mlx5_destroy_flow_table(acl);
2614 kvfree(flow_group_in);
2618 static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
2620 if (!xa_empty(&vport->egress.offloads.bounce_rules))
2622 mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
2623 vport->egress.offloads.bounce_grp = NULL;
2624 mlx5_destroy_flow_table(vport->egress.acl);
2625 vport->egress.acl = NULL;
2628 static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2629 struct mlx5_core_dev *slave, size_t count)
2631 struct mlx5_eswitch *esw = master->priv.eswitch;
2632 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2633 struct mlx5_flow_namespace *egress_ns;
2634 struct mlx5_vport *vport;
2637 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2639 return PTR_ERR(vport);
2641 egress_ns = mlx5_get_flow_vport_acl_namespace(master,
2642 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2647 if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
2650 err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
2654 if (xa_load(&vport->egress.offloads.bounce_rules, slave_index))
2657 err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl);
2664 esw_master_egress_destroy_resources(vport);
2668 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
2669 struct mlx5_core_dev *slave_dev)
2671 struct mlx5_vport *vport;
2673 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2674 dev->priv.eswitch->manager_vport);
2676 esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id));
2678 if (xa_empty(&vport->egress.offloads.bounce_rules)) {
2679 esw_acl_egress_ofld_cleanup(vport);
2680 xa_destroy(&vport->egress.offloads.bounce_rules);
2684 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
2685 struct mlx5_eswitch *slave_esw, int max_slaves)
2689 err = esw_set_slave_root_fdb(master_esw->dev,
2694 err = esw_set_master_egress_rule(master_esw->dev,
2695 slave_esw->dev, max_slaves);
2702 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2706 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
2707 struct mlx5_eswitch *slave_esw)
2709 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2710 esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev);
2713 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2714 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2716 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
2717 struct mlx5_eswitch *peer_esw)
2719 const struct mlx5_eswitch_rep_ops *ops;
2720 struct mlx5_eswitch_rep *rep;
2724 mlx5_esw_for_each_rep(esw, i, rep) {
2725 rep_type = NUM_REP_TYPES;
2726 while (rep_type--) {
2727 ops = esw->offloads.rep_ops[rep_type];
2728 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2730 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
2735 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
2736 struct mlx5_eswitch *peer_esw)
2738 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2739 mlx5e_tc_clean_fdb_peer_flows(esw);
2741 mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
2742 esw_del_fdb_peer_miss_rules(esw, peer_esw->dev);
2745 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2746 struct mlx5_eswitch *peer_esw)
2748 const struct mlx5_eswitch_rep_ops *ops;
2749 struct mlx5_eswitch_rep *rep;
2754 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2758 mlx5_esw_for_each_rep(esw, i, rep) {
2759 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2760 ops = esw->offloads.rep_ops[rep_type];
2761 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2763 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2773 mlx5_esw_offloads_unpair(esw, peer_esw);
2777 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2778 struct mlx5_eswitch *peer_esw,
2781 u8 peer_idx = mlx5_get_dev_index(peer_esw->dev);
2782 struct mlx5_flow_root_namespace *peer_ns;
2783 u8 idx = mlx5_get_dev_index(esw->dev);
2784 struct mlx5_flow_root_namespace *ns;
2787 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2788 ns = esw->dev->priv.steering->fdb_root_ns;
2791 err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx);
2795 err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx);
2797 mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
2801 mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
2802 mlx5_flow_namespace_set_peer(peer_ns, NULL, idx);
2808 static int mlx5_esw_offloads_devcom_event(int event,
2812 struct mlx5_eswitch *esw = my_data;
2813 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2814 struct mlx5_eswitch *peer_esw = event_data;
2815 u16 esw_i, peer_esw_i;
2819 peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
2820 esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
2821 esw_paired = !!xa_load(&esw->paired, peer_esw_i);
2824 case ESW_OFFLOADS_DEVCOM_PAIR:
2825 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2826 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2832 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2835 err = mlx5_esw_offloads_pair(esw, peer_esw);
2839 err = mlx5_esw_offloads_pair(peer_esw, esw);
2843 err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
2847 err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
2852 peer_esw->num_peers++;
2853 mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2856 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2860 peer_esw->num_peers--;
2862 if (!esw->num_peers && !peer_esw->num_peers)
2863 mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2864 xa_erase(&peer_esw->paired, esw_i);
2865 xa_erase(&esw->paired, peer_esw_i);
2866 mlx5_esw_offloads_unpair(peer_esw, esw);
2867 mlx5_esw_offloads_unpair(esw, peer_esw);
2868 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2875 xa_erase(&esw->paired, peer_esw_i);
2877 mlx5_esw_offloads_unpair(peer_esw, esw);
2879 mlx5_esw_offloads_unpair(esw, peer_esw);
2881 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2883 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2888 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2890 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2893 for (i = 0; i < MLX5_MAX_PORTS; i++)
2894 INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
2895 mutex_init(&esw->offloads.peer_mutex);
2897 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2900 if (!mlx5_lag_is_supported(esw->dev))
2903 xa_init(&esw->paired);
2904 mlx5_devcom_register_component(devcom,
2905 MLX5_DEVCOM_ESW_OFFLOADS,
2906 mlx5_esw_offloads_devcom_event,
2910 mlx5_devcom_send_event(devcom,
2911 MLX5_DEVCOM_ESW_OFFLOADS,
2912 ESW_OFFLOADS_DEVCOM_PAIR,
2913 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2916 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2918 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2920 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2923 if (!mlx5_lag_is_supported(esw->dev))
2926 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2927 ESW_OFFLOADS_DEVCOM_UNPAIR,
2928 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2930 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2931 xa_destroy(&esw->paired);
2934 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2936 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2939 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2940 MLX5_FDB_TO_VPORT_REG_C_0))
2946 #define MLX5_ESW_METADATA_RSVD_UPLINK 1
2948 /* Share the same metadata for uplink's. This is fine because:
2949 * (a) In shared FDB mode (LAG) both uplink's are treated the
2950 * same and tagged with the same metadata.
2951 * (b) In non shared FDB mode, packets from physical port0
2952 * cannot hit eswitch of PF1 and vice versa.
2954 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
2956 return MLX5_ESW_METADATA_RSVD_UPLINK;
2959 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2961 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2962 /* Reserve 0xf for internal port offload */
2963 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
2967 /* Only 4 bits of pf_num */
2968 pf_num = mlx5_get_dev_index(esw->dev);
2969 if (pf_num > max_pf_num)
2972 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2973 /* Use only non-zero vport_id (2-4095) for all PF's */
2974 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
2975 MLX5_ESW_METADATA_RSVD_UPLINK + 1,
2976 vport_end_ida, GFP_KERNEL);
2979 id = (pf_num << ESW_VPORT_BITS) | id;
2983 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2985 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2987 /* Metadata contains only 12 bits of actual ida id */
2988 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2991 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2992 struct mlx5_vport *vport)
2994 if (vport->vport == MLX5_VPORT_UPLINK)
2995 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
2997 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2999 vport->metadata = vport->default_metadata;
3000 return vport->metadata ? 0 : -ENOSPC;
3003 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
3004 struct mlx5_vport *vport)
3006 if (!vport->default_metadata)
3009 if (vport->vport == MLX5_VPORT_UPLINK)
3012 WARN_ON(vport->metadata != vport->default_metadata);
3013 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
3016 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
3018 struct mlx5_vport *vport;
3021 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3024 mlx5_esw_for_each_vport(esw, i, vport)
3025 esw_offloads_vport_metadata_cleanup(esw, vport);
3028 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
3030 struct mlx5_vport *vport;
3034 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3037 mlx5_esw_for_each_vport(esw, i, vport) {
3038 err = esw_offloads_vport_metadata_setup(esw, vport);
3046 esw_offloads_metadata_uninit(esw);
3051 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
3052 struct mlx5_vport *vport)
3056 err = esw_acl_ingress_ofld_setup(esw, vport);
3060 err = esw_acl_egress_ofld_setup(esw, vport);
3067 esw_acl_ingress_ofld_cleanup(esw, vport);
3072 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
3073 struct mlx5_vport *vport)
3075 esw_acl_egress_ofld_cleanup(vport);
3076 esw_acl_ingress_ofld_cleanup(esw, vport);
3079 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
3081 struct mlx5_vport *vport;
3083 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3085 return PTR_ERR(vport);
3087 return esw_vport_create_offloads_acl_tables(esw, vport);
3090 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
3092 struct mlx5_vport *vport;
3094 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3098 esw_vport_destroy_offloads_acl_tables(esw, vport);
3101 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
3103 struct mlx5_eswitch_rep *rep;
3107 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3110 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3111 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3114 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3118 mlx5_esw_for_each_rep(esw, i, rep) {
3119 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3120 mlx5_esw_offloads_rep_load(esw, rep->vport);
3126 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3128 struct mlx5_esw_indir_table *indir;
3131 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3132 mutex_init(&esw->fdb_table.offloads.vports.lock);
3133 hash_init(esw->fdb_table.offloads.vports.table);
3134 atomic64_set(&esw->user_count, 0);
3136 indir = mlx5_esw_indir_table_init();
3137 if (IS_ERR(indir)) {
3138 err = PTR_ERR(indir);
3139 goto create_indir_err;
3141 esw->fdb_table.offloads.indir = indir;
3143 err = esw_create_uplink_offloads_acl_tables(esw);
3145 goto create_acl_err;
3147 err = esw_create_offloads_table(esw);
3149 goto create_offloads_err;
3151 err = esw_create_restore_table(esw);
3153 goto create_restore_err;
3155 err = esw_create_offloads_fdb_tables(esw);
3157 goto create_fdb_err;
3159 err = esw_create_vport_rx_group(esw);
3163 err = esw_create_vport_rx_drop_group(esw);
3165 goto create_rx_drop_fg_err;
3167 err = esw_create_vport_rx_drop_rule(esw);
3169 goto create_rx_drop_rule_err;
3173 create_rx_drop_rule_err:
3174 esw_destroy_vport_rx_drop_group(esw);
3175 create_rx_drop_fg_err:
3176 esw_destroy_vport_rx_group(esw);
3178 esw_destroy_offloads_fdb_tables(esw);
3180 esw_destroy_restore_table(esw);
3182 esw_destroy_offloads_table(esw);
3183 create_offloads_err:
3184 esw_destroy_uplink_offloads_acl_tables(esw);
3186 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3188 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3192 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3194 esw_destroy_vport_rx_drop_rule(esw);
3195 esw_destroy_vport_rx_drop_group(esw);
3196 esw_destroy_vport_rx_group(esw);
3197 esw_destroy_offloads_fdb_tables(esw);
3198 esw_destroy_restore_table(esw);
3199 esw_destroy_offloads_table(esw);
3200 esw_destroy_uplink_offloads_acl_tables(esw);
3201 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3202 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3206 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3208 struct devlink *devlink;
3209 bool host_pf_disabled;
3212 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3213 host_params_context.host_num_of_vfs);
3214 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3215 host_params_context.host_pf_disabled);
3217 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3220 devlink = priv_to_devlink(esw->dev);
3222 /* Number of VFs can only change from "0 to x" or "x to 0". */
3223 if (esw->esw_funcs.num_vfs > 0) {
3224 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3228 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3229 MLX5_VPORT_UC_ADDR_CHANGE);
3231 devl_unlock(devlink);
3235 esw->esw_funcs.num_vfs = new_num_vfs;
3236 devl_unlock(devlink);
3239 static void esw_functions_changed_event_handler(struct work_struct *work)
3241 struct mlx5_host_work *host_work;
3242 struct mlx5_eswitch *esw;
3245 host_work = container_of(work, struct mlx5_host_work, work);
3246 esw = host_work->esw;
3248 out = mlx5_esw_query_functions(esw->dev);
3252 esw_vfs_changed_event_handler(esw, out);
3258 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3260 struct mlx5_esw_functions *esw_funcs;
3261 struct mlx5_host_work *host_work;
3262 struct mlx5_eswitch *esw;
3264 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3268 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3269 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3271 host_work->esw = esw;
3273 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3274 queue_work(esw->work_queue, &host_work->work);
3279 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3281 const u32 *query_host_out;
3283 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3286 query_host_out = mlx5_esw_query_functions(esw->dev);
3287 if (IS_ERR(query_host_out))
3288 return PTR_ERR(query_host_out);
3290 /* Mark non local controller with non zero controller number. */
3291 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3292 host_params_context.host_number);
3293 kvfree(query_host_out);
3297 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3299 /* Local controller is always valid */
3300 if (controller == 0)
3303 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3306 /* External host number starts with zero in device */
3307 return (controller == esw->offloads.host_number + 1);
3310 int esw_offloads_enable(struct mlx5_eswitch *esw)
3312 struct mapping_ctx *reg_c0_obj_pool;
3313 struct mlx5_vport *vport;
3318 mutex_init(&esw->offloads.termtbl_mutex);
3319 mlx5_rdma_enable_roce(esw->dev);
3321 err = mlx5_esw_host_number_init(esw);
3325 err = esw_offloads_metadata_init(esw);
3329 err = esw_set_passing_vport_metadata(esw, true);
3331 goto err_vport_metadata;
3333 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3335 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
3336 sizeof(struct mlx5_mapped_obj),
3337 ESW_REG_C0_USER_DATA_METADATA_MASK,
3340 if (IS_ERR(reg_c0_obj_pool)) {
3341 err = PTR_ERR(reg_c0_obj_pool);
3344 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3346 err = esw_offloads_steering_init(esw);
3348 goto err_steering_init;
3350 /* Representor will control the vport link state */
3351 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3352 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3353 if (mlx5_core_ec_sriov_enabled(esw->dev))
3354 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
3355 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3357 /* Uplink vport rep must load first. */
3358 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
3362 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3369 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3371 esw_offloads_steering_cleanup(esw);
3373 mapping_destroy(reg_c0_obj_pool);
3375 esw_set_passing_vport_metadata(esw, false);
3377 esw_offloads_metadata_uninit(esw);
3379 mlx5_rdma_disable_roce(esw->dev);
3380 mutex_destroy(&esw->offloads.termtbl_mutex);
3384 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3385 struct netlink_ext_ack *extack)
3389 esw->mode = MLX5_ESWITCH_LEGACY;
3391 /* If changing from switchdev to legacy mode without sriov enabled,
3392 * no need to create legacy fdb.
3394 if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
3397 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3399 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3404 void esw_offloads_disable(struct mlx5_eswitch *esw)
3406 mlx5_eswitch_disable_pf_vf_vports(esw);
3407 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3408 esw_set_passing_vport_metadata(esw, false);
3409 esw_offloads_steering_cleanup(esw);
3410 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3411 esw_offloads_metadata_uninit(esw);
3412 mlx5_rdma_disable_roce(esw->dev);
3413 mutex_destroy(&esw->offloads.termtbl_mutex);
3416 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3419 case DEVLINK_ESWITCH_MODE_LEGACY:
3420 *mlx5_mode = MLX5_ESWITCH_LEGACY;
3422 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3423 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3432 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3434 switch (mlx5_mode) {
3435 case MLX5_ESWITCH_LEGACY:
3436 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3438 case MLX5_ESWITCH_OFFLOADS:
3439 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3448 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3451 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3452 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3454 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3455 *mlx5_mode = MLX5_INLINE_MODE_L2;
3457 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3458 *mlx5_mode = MLX5_INLINE_MODE_IP;
3460 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3461 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3470 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3472 switch (mlx5_mode) {
3473 case MLX5_INLINE_MODE_NONE:
3474 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3476 case MLX5_INLINE_MODE_L2:
3477 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3479 case MLX5_INLINE_MODE_IP:
3480 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3482 case MLX5_INLINE_MODE_TCP_UDP:
3483 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3492 static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
3494 struct net *devl_net, *netdev_net;
3495 struct mlx5_eswitch *esw;
3497 esw = mlx5_devlink_eswitch_get(devlink);
3498 netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
3499 devl_net = devlink_net(devlink);
3501 return net_eq(devl_net, netdev_net);
3504 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3505 struct netlink_ext_ack *extack)
3507 u16 cur_mlx5_mode, mlx5_mode = 0;
3508 struct mlx5_eswitch *esw;
3511 esw = mlx5_devlink_eswitch_get(devlink);
3513 return PTR_ERR(esw);
3515 if (esw_mode_from_devlink(mode, &mlx5_mode))
3518 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
3519 !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
3520 NL_SET_ERR_MSG_MOD(extack,
3521 "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
3525 mlx5_lag_disable_change(esw->dev);
3526 err = mlx5_esw_try_lock(esw);
3528 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
3531 cur_mlx5_mode = err;
3534 if (cur_mlx5_mode == mlx5_mode)
3537 mlx5_eswitch_disable_locked(esw);
3538 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3539 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3540 NL_SET_ERR_MSG_MOD(extack,
3541 "Can't change mode while devlink traps are active");
3545 err = esw_offloads_start(esw, extack);
3546 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
3547 err = esw_offloads_stop(esw, extack);
3548 mlx5_rescan_drivers(esw->dev);
3554 mlx5_esw_unlock(esw);
3556 mlx5_lag_enable_change(esw->dev);
3560 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3562 struct mlx5_eswitch *esw;
3565 esw = mlx5_devlink_eswitch_get(devlink);
3567 return PTR_ERR(esw);
3569 down_read(&esw->mode_lock);
3570 err = esw_mode_to_devlink(esw->mode, mode);
3571 up_read(&esw->mode_lock);
3575 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3576 struct netlink_ext_ack *extack)
3578 struct mlx5_core_dev *dev = esw->dev;
3579 struct mlx5_vport *vport;
3580 u16 err_vport_num = 0;
3584 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3585 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3587 err_vport_num = vport->vport;
3588 NL_SET_ERR_MSG_MOD(extack,
3589 "Failed to set min inline on vport");
3590 goto revert_inline_mode;
3593 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
3594 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3595 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3597 err_vport_num = vport->vport;
3598 NL_SET_ERR_MSG_MOD(extack,
3599 "Failed to set min inline on vport");
3600 goto revert_ec_vf_inline_mode;
3606 revert_ec_vf_inline_mode:
3607 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3608 if (vport->vport == err_vport_num)
3610 mlx5_modify_nic_vport_min_inline(dev,
3612 esw->offloads.inline_mode);
3615 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3616 if (vport->vport == err_vport_num)
3618 mlx5_modify_nic_vport_min_inline(dev,
3620 esw->offloads.inline_mode);
3625 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3626 struct netlink_ext_ack *extack)
3628 struct mlx5_core_dev *dev = devlink_priv(devlink);
3629 struct mlx5_eswitch *esw;
3633 esw = mlx5_devlink_eswitch_get(devlink);
3635 return PTR_ERR(esw);
3637 down_write(&esw->mode_lock);
3639 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3640 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3641 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3647 case MLX5_CAP_INLINE_MODE_L2:
3648 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3651 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3655 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3656 NL_SET_ERR_MSG_MOD(extack,
3657 "Can't set inline mode when flows are configured");
3662 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3666 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3670 esw->offloads.inline_mode = mlx5_mode;
3671 up_write(&esw->mode_lock);
3675 up_write(&esw->mode_lock);
3679 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3681 struct mlx5_eswitch *esw;
3684 esw = mlx5_devlink_eswitch_get(devlink);
3686 return PTR_ERR(esw);
3688 down_read(&esw->mode_lock);
3689 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3690 up_read(&esw->mode_lock);
3694 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
3696 struct devlink *devlink = priv_to_devlink(dev);
3697 struct mlx5_eswitch *esw;
3700 esw = mlx5_devlink_eswitch_get(devlink);
3702 devl_unlock(devlink);
3703 /* Failure means no eswitch => not possible to change encap */
3707 down_write(&esw->mode_lock);
3708 if (esw->mode != MLX5_ESWITCH_LEGACY &&
3709 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
3710 up_write(&esw->mode_lock);
3711 devl_unlock(devlink);
3715 esw->offloads.num_block_encap++;
3716 up_write(&esw->mode_lock);
3717 devl_unlock(devlink);
3721 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
3723 struct devlink *devlink = priv_to_devlink(dev);
3724 struct mlx5_eswitch *esw;
3726 esw = mlx5_devlink_eswitch_get(devlink);
3730 down_write(&esw->mode_lock);
3731 esw->offloads.num_block_encap--;
3732 up_write(&esw->mode_lock);
3735 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3736 enum devlink_eswitch_encap_mode encap,
3737 struct netlink_ext_ack *extack)
3739 struct mlx5_core_dev *dev = devlink_priv(devlink);
3740 struct mlx5_eswitch *esw;
3743 esw = mlx5_devlink_eswitch_get(devlink);
3745 return PTR_ERR(esw);
3747 down_write(&esw->mode_lock);
3749 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3750 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3751 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3756 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3761 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3762 esw->offloads.encap = encap;
3766 if (esw->offloads.encap == encap)
3769 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3770 NL_SET_ERR_MSG_MOD(extack,
3771 "Can't set encapsulation when flows are configured");
3776 if (esw->offloads.num_block_encap) {
3777 NL_SET_ERR_MSG_MOD(extack,
3778 "Can't set encapsulation when IPsec SA and/or policies are configured");
3783 esw_destroy_offloads_fdb_tables(esw);
3785 esw->offloads.encap = encap;
3787 err = esw_create_offloads_fdb_tables(esw);
3790 NL_SET_ERR_MSG_MOD(extack,
3791 "Failed re-creating fast FDB table");
3792 esw->offloads.encap = !encap;
3793 (void)esw_create_offloads_fdb_tables(esw);
3797 up_write(&esw->mode_lock);
3801 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3802 enum devlink_eswitch_encap_mode *encap)
3804 struct mlx5_eswitch *esw;
3806 esw = mlx5_devlink_eswitch_get(devlink);
3808 return PTR_ERR(esw);
3810 down_read(&esw->mode_lock);
3811 *encap = esw->offloads.encap;
3812 up_read(&esw->mode_lock);
3817 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3819 /* Currently, only ECPF based device has representor for host PF. */
3820 if (vport_num == MLX5_VPORT_PF &&
3821 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3824 if (vport_num == MLX5_VPORT_ECPF &&
3825 !mlx5_ecpf_vport_exists(esw->dev))
3831 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
3832 const struct mlx5_eswitch_rep_ops *ops,
3835 struct mlx5_eswitch_rep_data *rep_data;
3836 struct mlx5_eswitch_rep *rep;
3839 esw->offloads.rep_ops[rep_type] = ops;
3840 mlx5_esw_for_each_rep(esw, i, rep) {
3841 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
3843 rep_data = &rep->rep_data[rep_type];
3844 atomic_set(&rep_data->state, REP_REGISTERED);
3848 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
3850 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
3852 struct mlx5_eswitch_rep *rep;
3855 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
3856 __unload_reps_all_vport(esw, rep_type);
3858 mlx5_esw_for_each_rep(esw, i, rep)
3859 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
3861 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
3863 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
3865 struct mlx5_eswitch_rep *rep;
3867 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3868 return rep->rep_data[rep_type].priv;
3871 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
3875 struct mlx5_eswitch_rep *rep;
3877 rep = mlx5_eswitch_get_rep(esw, vport);
3879 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3880 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3881 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
3884 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
3886 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3888 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
3890 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3892 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
3895 return mlx5_eswitch_get_rep(esw, vport);
3897 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
3899 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3901 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3903 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3905 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3907 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3909 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3911 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
3914 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3916 if (WARN_ON_ONCE(IS_ERR(vport)))
3919 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
3921 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
3923 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3924 u16 vport_num, u32 controller, u32 sfnum)
3928 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3932 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
3936 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3942 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3944 mlx5_esw_vport_disable(esw, vport_num);
3948 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3950 mlx5_esw_offloads_rep_unload(esw, vport_num);
3951 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3952 mlx5_esw_vport_disable(esw, vport_num);
3955 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3957 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3964 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3968 err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
3972 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3973 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3980 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3982 u16 *old_entry, *vhca_map_entry, vhca_id;
3985 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3987 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3992 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3993 if (!vhca_map_entry)
3996 *vhca_map_entry = vport_num;
3997 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3998 if (xa_is_err(old_entry)) {
3999 kfree(vhca_map_entry);
4000 return xa_err(old_entry);
4006 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
4008 u16 *vhca_map_entry, vhca_id;
4011 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
4013 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
4016 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
4017 kfree(vhca_map_entry);
4020 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
4022 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
4031 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
4034 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
4036 if (WARN_ON_ONCE(IS_ERR(vport)))
4039 return vport->metadata;
4041 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
4044 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
4046 return vport_num == MLX5_VPORT_PF ||
4047 mlx5_eswitch_is_vf_vport(esw, vport_num) ||
4048 mlx5_esw_is_sf_vport(esw, vport_num);
4051 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
4052 u8 *hw_addr, int *hw_addr_len,
4053 struct netlink_ext_ack *extack)
4055 struct mlx5_eswitch *esw;
4056 struct mlx5_vport *vport;
4059 esw = mlx5_devlink_eswitch_get(port->devlink);
4061 return PTR_ERR(esw);
4063 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4064 if (!is_port_function_supported(esw, vport_num))
4067 vport = mlx5_eswitch_get_vport(esw, vport_num);
4068 if (IS_ERR(vport)) {
4069 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4070 return PTR_ERR(vport);
4073 mutex_lock(&esw->state_lock);
4074 ether_addr_copy(hw_addr, vport->info.mac);
4075 *hw_addr_len = ETH_ALEN;
4076 mutex_unlock(&esw->state_lock);
4080 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
4081 const u8 *hw_addr, int hw_addr_len,
4082 struct netlink_ext_ack *extack)
4084 struct mlx5_eswitch *esw;
4087 esw = mlx5_devlink_eswitch_get(port->devlink);
4089 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
4090 return PTR_ERR(esw);
4093 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4094 if (!is_port_function_supported(esw, vport_num)) {
4095 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
4099 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
4102 static struct mlx5_vport *
4103 mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
4107 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
4108 return ERR_PTR(-EOPNOTSUPP);
4110 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4111 if (!is_port_function_supported(esw, vport_num))
4112 return ERR_PTR(-EOPNOTSUPP);
4114 return mlx5_eswitch_get_vport(esw, vport_num);
4117 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
4118 struct netlink_ext_ack *extack)
4120 struct mlx5_eswitch *esw;
4121 struct mlx5_vport *vport;
4122 int err = -EOPNOTSUPP;
4124 esw = mlx5_devlink_eswitch_get(port->devlink);
4126 return PTR_ERR(esw);
4128 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4129 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4133 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4134 if (IS_ERR(vport)) {
4135 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4136 return PTR_ERR(vport);
4139 mutex_lock(&esw->state_lock);
4140 if (vport->enabled) {
4141 *is_enabled = vport->info.mig_enabled;
4144 mutex_unlock(&esw->state_lock);
4148 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
4149 struct netlink_ext_ack *extack)
4151 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4152 struct mlx5_eswitch *esw;
4153 struct mlx5_vport *vport;
4156 int err = -EOPNOTSUPP;
4158 esw = mlx5_devlink_eswitch_get(port->devlink);
4160 return PTR_ERR(esw);
4162 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4163 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4167 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4168 if (IS_ERR(vport)) {
4169 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4170 return PTR_ERR(vport);
4173 mutex_lock(&esw->state_lock);
4174 if (!vport->enabled) {
4175 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4179 if (vport->info.mig_enabled == enable) {
4184 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4190 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4191 MLX5_CAP_GENERAL_2);
4193 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4197 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4198 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
4200 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4201 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
4203 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
4207 vport->info.mig_enabled = enable;
4212 mutex_unlock(&esw->state_lock);
4216 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
4217 struct netlink_ext_ack *extack)
4219 struct mlx5_eswitch *esw;
4220 struct mlx5_vport *vport;
4221 int err = -EOPNOTSUPP;
4223 esw = mlx5_devlink_eswitch_get(port->devlink);
4225 return PTR_ERR(esw);
4227 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4228 if (IS_ERR(vport)) {
4229 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4230 return PTR_ERR(vport);
4233 mutex_lock(&esw->state_lock);
4234 if (vport->enabled) {
4235 *is_enabled = vport->info.roce_enabled;
4238 mutex_unlock(&esw->state_lock);
4242 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
4243 struct netlink_ext_ack *extack)
4245 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4246 struct mlx5_eswitch *esw;
4247 struct mlx5_vport *vport;
4248 int err = -EOPNOTSUPP;
4253 esw = mlx5_devlink_eswitch_get(port->devlink);
4255 return PTR_ERR(esw);
4257 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4258 if (IS_ERR(vport)) {
4259 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4260 return PTR_ERR(vport);
4262 vport_num = vport->vport;
4264 mutex_lock(&esw->state_lock);
4265 if (!vport->enabled) {
4266 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4270 if (vport->info.roce_enabled == enable) {
4275 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4281 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4284 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4288 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4289 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
4291 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
4292 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
4294 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
4298 vport->info.roce_enabled = enable;
4303 mutex_unlock(&esw->state_lock);