1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
4 #include <linux/build_bug.h>
5 #include <linux/list.h>
6 #include <linux/notifier.h>
7 #include <net/netevent.h>
8 #include <net/switchdev.h>
9 #include "lib/devcom.h"
12 #include "bridge_priv.h"
13 #define CREATE_TRACE_POINTS
14 #include "diag/bridge_tracepoint.h"
16 static const struct rhashtable_params fdb_ht_params = {
17 .key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
18 .key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
19 .head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
20 .automatic_shrinking = true,
24 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
27 struct switchdev_notifier_fdb_info send_info = {};
29 send_info.addr = addr;
31 send_info.offloaded = true;
32 call_switchdev_notifiers(val, dev, &send_info.info, NULL);
36 mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
38 if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
39 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
41 SWITCHDEV_FDB_DEL_TO_BRIDGE);
44 static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
46 return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
47 MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
48 MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
49 offsetof(struct vlan_ethhdr, h_vlan_proto);
52 static struct mlx5_pkt_reformat *
53 mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
55 struct mlx5_pkt_reformat_params reformat_params = {};
57 reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
58 reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
59 reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
60 reformat_params.size = sizeof(struct vlan_hdr);
61 return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
64 struct mlx5_flow_table *
65 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
67 struct mlx5_flow_table_attr ft_attr = {};
68 struct mlx5_core_dev *dev = esw->dev;
69 struct mlx5_flow_namespace *ns;
70 struct mlx5_flow_table *fdb;
72 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
74 esw_warn(dev, "Failed to get FDB namespace\n");
75 return ERR_PTR(-ENOENT);
78 ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
79 ft_attr.max_fte = max_fte;
80 ft_attr.level = level;
81 ft_attr.prio = FDB_BR_OFFLOAD;
82 fdb = mlx5_create_flow_table(ns, &ft_attr);
84 esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
89 static struct mlx5_flow_group *
90 mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
91 struct mlx5_eswitch *esw,
92 struct mlx5_flow_table *ingress_ft)
94 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
95 struct mlx5_flow_group *fg;
98 in = kvzalloc(inlen, GFP_KERNEL);
100 return ERR_PTR(-ENOMEM);
102 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
103 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
104 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
106 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
107 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
108 if (vlan_proto == ETH_P_8021Q)
109 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
110 else if (vlan_proto == ETH_P_8021AD)
111 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
112 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
114 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
115 mlx5_eswitch_get_vport_metadata_mask());
117 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
118 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
120 fg = mlx5_create_flow_group(ingress_ft, in);
124 "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
125 vlan_proto, PTR_ERR(fg));
130 static struct mlx5_flow_group *
131 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
132 struct mlx5_flow_table *ingress_ft)
134 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
135 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
137 return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
140 static struct mlx5_flow_group *
141 mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
142 struct mlx5_flow_table *ingress_ft)
144 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
145 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
147 return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
151 static struct mlx5_flow_group *
152 mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
153 u16 vlan_proto, struct mlx5_eswitch *esw,
154 struct mlx5_flow_table *ingress_ft)
156 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
157 struct mlx5_flow_group *fg;
160 in = kvzalloc(inlen, GFP_KERNEL);
162 return ERR_PTR(-ENOMEM);
164 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
165 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
166 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
168 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
169 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
170 if (vlan_proto == ETH_P_8021Q)
171 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
172 else if (vlan_proto == ETH_P_8021AD)
173 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
174 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
175 mlx5_eswitch_get_vport_metadata_mask());
177 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
178 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
180 fg = mlx5_create_flow_group(ingress_ft, in);
183 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
189 static struct mlx5_flow_group *
190 mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
191 struct mlx5_flow_table *ingress_ft)
193 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
194 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
196 return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
200 static struct mlx5_flow_group *
201 mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
202 struct mlx5_flow_table *ingress_ft)
204 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
205 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
207 return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
211 static struct mlx5_flow_group *
212 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
214 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
215 struct mlx5_flow_group *fg;
218 in = kvzalloc(inlen, GFP_KERNEL);
220 return ERR_PTR(-ENOMEM);
222 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
223 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
224 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
226 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
227 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
229 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
230 mlx5_eswitch_get_vport_metadata_mask());
232 MLX5_SET(create_flow_group_in, in, start_flow_index,
233 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
234 MLX5_SET(create_flow_group_in, in, end_flow_index,
235 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
237 fg = mlx5_create_flow_group(ingress_ft, in);
240 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
247 static struct mlx5_flow_group *
248 mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
249 struct mlx5_eswitch *esw,
250 struct mlx5_flow_table *egress_ft)
252 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
253 struct mlx5_flow_group *fg;
256 in = kvzalloc(inlen, GFP_KERNEL);
258 return ERR_PTR(-ENOMEM);
260 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
261 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
263 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
264 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
265 if (vlan_proto == ETH_P_8021Q)
266 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
267 else if (vlan_proto == ETH_P_8021AD)
268 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
269 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
271 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
272 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
274 fg = mlx5_create_flow_group(egress_ft, in);
277 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
283 static struct mlx5_flow_group *
284 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
286 unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
287 unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
289 return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
292 static struct mlx5_flow_group *
293 mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
294 struct mlx5_flow_table *egress_ft)
296 unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
297 unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
299 return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
302 static struct mlx5_flow_group *
303 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
305 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
306 struct mlx5_flow_group *fg;
309 in = kvzalloc(inlen, GFP_KERNEL);
311 return ERR_PTR(-ENOMEM);
313 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
314 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
316 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
317 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
319 MLX5_SET(create_flow_group_in, in, start_flow_index,
320 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
321 MLX5_SET(create_flow_group_in, in, end_flow_index,
322 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
324 fg = mlx5_create_flow_group(egress_ft, in);
327 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
333 static struct mlx5_flow_group *
334 mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
336 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
337 struct mlx5_flow_group *fg;
340 in = kvzalloc(inlen, GFP_KERNEL);
342 return ERR_PTR(-ENOMEM);
344 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
345 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
347 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
349 MLX5_SET(create_flow_group_in, in, start_flow_index,
350 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
351 MLX5_SET(create_flow_group_in, in, end_flow_index,
352 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
354 fg = mlx5_create_flow_group(egress_ft, in);
357 "Failed to create bridge egress table miss flow group (err=%ld)\n",
364 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
366 struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
367 struct mlx5_flow_table *ingress_ft, *skip_ft;
368 struct mlx5_eswitch *esw = br_offloads->esw;
371 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
374 ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
375 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
377 if (IS_ERR(ingress_ft))
378 return PTR_ERR(ingress_ft);
380 skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
381 MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
383 if (IS_ERR(skip_ft)) {
384 err = PTR_ERR(skip_ft);
388 vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
389 if (IS_ERR(vlan_fg)) {
390 err = PTR_ERR(vlan_fg);
394 vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
395 if (IS_ERR(vlan_filter_fg)) {
396 err = PTR_ERR(vlan_filter_fg);
397 goto err_vlan_filter_fg;
400 qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
401 if (IS_ERR(qinq_fg)) {
402 err = PTR_ERR(qinq_fg);
406 qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
407 if (IS_ERR(qinq_filter_fg)) {
408 err = PTR_ERR(qinq_filter_fg);
409 goto err_qinq_filter_fg;
412 mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
413 if (IS_ERR(mac_fg)) {
414 err = PTR_ERR(mac_fg);
418 br_offloads->ingress_ft = ingress_ft;
419 br_offloads->skip_ft = skip_ft;
420 br_offloads->ingress_vlan_fg = vlan_fg;
421 br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
422 br_offloads->ingress_qinq_fg = qinq_fg;
423 br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
424 br_offloads->ingress_mac_fg = mac_fg;
428 mlx5_destroy_flow_group(qinq_filter_fg);
430 mlx5_destroy_flow_group(qinq_fg);
432 mlx5_destroy_flow_group(vlan_filter_fg);
434 mlx5_destroy_flow_group(vlan_fg);
436 mlx5_destroy_flow_table(skip_ft);
438 mlx5_destroy_flow_table(ingress_ft);
443 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
445 mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
446 br_offloads->ingress_mac_fg = NULL;
447 mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
448 br_offloads->ingress_qinq_filter_fg = NULL;
449 mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
450 br_offloads->ingress_qinq_fg = NULL;
451 mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
452 br_offloads->ingress_vlan_filter_fg = NULL;
453 mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
454 br_offloads->ingress_vlan_fg = NULL;
455 mlx5_destroy_flow_table(br_offloads->skip_ft);
456 br_offloads->skip_ft = NULL;
457 mlx5_destroy_flow_table(br_offloads->ingress_ft);
458 br_offloads->ingress_ft = NULL;
461 static struct mlx5_flow_handle *
462 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
463 struct mlx5_flow_table *skip_ft,
464 struct mlx5_pkt_reformat *pkt_reformat);
467 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
468 struct mlx5_esw_bridge *bridge)
470 struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
471 struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
472 struct mlx5_flow_handle *miss_handle = NULL;
473 struct mlx5_eswitch *esw = br_offloads->esw;
474 struct mlx5_flow_table *egress_ft;
477 egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
478 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
480 if (IS_ERR(egress_ft))
481 return PTR_ERR(egress_ft);
483 vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
484 if (IS_ERR(vlan_fg)) {
485 err = PTR_ERR(vlan_fg);
489 qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
490 if (IS_ERR(qinq_fg)) {
491 err = PTR_ERR(qinq_fg);
495 mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
496 if (IS_ERR(mac_fg)) {
497 err = PTR_ERR(mac_fg);
501 if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
502 miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
503 if (IS_ERR(miss_fg)) {
504 esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
510 miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
511 if (IS_ERR(miss_pkt_reformat)) {
513 "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
514 PTR_ERR(miss_pkt_reformat));
515 miss_pkt_reformat = NULL;
516 mlx5_destroy_flow_group(miss_fg);
521 miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
522 br_offloads->skip_ft,
524 if (IS_ERR(miss_handle)) {
525 esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
526 PTR_ERR(miss_handle));
528 mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
529 miss_pkt_reformat = NULL;
530 mlx5_destroy_flow_group(miss_fg);
537 bridge->egress_ft = egress_ft;
538 bridge->egress_vlan_fg = vlan_fg;
539 bridge->egress_qinq_fg = qinq_fg;
540 bridge->egress_mac_fg = mac_fg;
541 bridge->egress_miss_fg = miss_fg;
542 bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
543 bridge->egress_miss_handle = miss_handle;
547 mlx5_destroy_flow_group(qinq_fg);
549 mlx5_destroy_flow_group(vlan_fg);
551 mlx5_destroy_flow_table(egress_ft);
556 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
558 if (bridge->egress_miss_handle)
559 mlx5_del_flow_rules(bridge->egress_miss_handle);
560 if (bridge->egress_miss_pkt_reformat)
561 mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
562 bridge->egress_miss_pkt_reformat);
563 if (bridge->egress_miss_fg)
564 mlx5_destroy_flow_group(bridge->egress_miss_fg);
565 mlx5_destroy_flow_group(bridge->egress_mac_fg);
566 mlx5_destroy_flow_group(bridge->egress_qinq_fg);
567 mlx5_destroy_flow_group(bridge->egress_vlan_fg);
568 mlx5_destroy_flow_table(bridge->egress_ft);
571 static struct mlx5_flow_handle *
572 mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
573 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
574 struct mlx5_esw_bridge *bridge,
575 struct mlx5_eswitch *esw)
577 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
578 struct mlx5_flow_act flow_act = {
579 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
580 .flags = FLOW_ACT_NO_APPEND,
582 struct mlx5_flow_destination dests[2] = {};
583 struct mlx5_flow_spec *rule_spec;
584 struct mlx5_flow_handle *handle;
587 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
589 return ERR_PTR(-ENOMEM);
591 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
593 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
594 outer_headers.smac_47_16);
595 ether_addr_copy(smac_v, addr);
596 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
597 outer_headers.smac_47_16);
598 eth_broadcast_addr(smac_c);
600 MLX5_SET(fte_match_param, rule_spec->match_criteria,
601 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
602 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
603 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
605 if (vlan && vlan->pkt_reformat_push) {
606 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
607 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
608 flow_act.pkt_reformat = vlan->pkt_reformat_push;
609 flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
611 if (bridge->vlan_proto == ETH_P_8021Q) {
612 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
613 outer_headers.cvlan_tag);
614 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
615 outer_headers.cvlan_tag);
616 } else if (bridge->vlan_proto == ETH_P_8021AD) {
617 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
618 outer_headers.svlan_tag);
619 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
620 outer_headers.svlan_tag);
622 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
623 outer_headers.first_vid);
624 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
628 dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
629 dests[0].ft = bridge->egress_ft;
630 dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
631 dests[1].counter_id = counter_id;
633 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
640 static struct mlx5_flow_handle *
641 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
642 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
643 struct mlx5_esw_bridge *bridge)
645 return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
646 bridge, bridge->br_offloads->esw);
649 static struct mlx5_flow_handle *
650 mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
651 const unsigned char *addr,
652 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
653 struct mlx5_esw_bridge *bridge)
655 struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
656 struct mlx5_eswitch *tmp, *peer_esw = NULL;
657 static struct mlx5_flow_handle *handle;
659 if (!mlx5_devcom_for_each_peer_begin(devcom))
660 return ERR_PTR(-ENODEV);
662 mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
663 if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
670 handle = ERR_PTR(-ENODEV);
674 handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
678 mlx5_devcom_for_each_peer_end(devcom);
682 static struct mlx5_flow_handle *
683 mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
684 struct mlx5_esw_bridge *bridge)
686 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
687 struct mlx5_flow_destination dest = {
688 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
689 .ft = br_offloads->skip_ft,
691 struct mlx5_flow_act flow_act = {
692 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
693 .flags = FLOW_ACT_NO_APPEND,
695 struct mlx5_flow_spec *rule_spec;
696 struct mlx5_flow_handle *handle;
699 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
701 return ERR_PTR(-ENOMEM);
703 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
705 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
706 outer_headers.smac_47_16);
707 ether_addr_copy(smac_v, addr);
708 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
709 outer_headers.smac_47_16);
710 eth_broadcast_addr(smac_c);
712 MLX5_SET(fte_match_param, rule_spec->match_criteria,
713 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
714 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
715 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
717 if (bridge->vlan_proto == ETH_P_8021Q) {
718 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
719 outer_headers.cvlan_tag);
720 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
721 outer_headers.cvlan_tag);
722 } else if (bridge->vlan_proto == ETH_P_8021AD) {
723 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
724 outer_headers.svlan_tag);
725 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
726 outer_headers.svlan_tag);
729 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
735 static struct mlx5_flow_handle *
736 mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
737 struct mlx5_esw_bridge_vlan *vlan,
738 struct mlx5_esw_bridge *bridge)
740 struct mlx5_flow_destination dest = {
741 .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
742 .vport.num = vport_num,
744 struct mlx5_flow_act flow_act = {
745 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
746 .flags = FLOW_ACT_NO_APPEND,
748 struct mlx5_flow_spec *rule_spec;
749 struct mlx5_flow_handle *handle;
752 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
754 return ERR_PTR(-ENOMEM);
756 if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
757 vport_num == MLX5_VPORT_UPLINK)
758 rule_spec->flow_context.flow_source =
759 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
760 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
762 dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
763 outer_headers.dmac_47_16);
764 ether_addr_copy(dmac_v, addr);
765 dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
766 outer_headers.dmac_47_16);
767 eth_broadcast_addr(dmac_c);
770 if (vlan->pkt_reformat_pop) {
771 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
772 flow_act.pkt_reformat = vlan->pkt_reformat_pop;
775 if (bridge->vlan_proto == ETH_P_8021Q) {
776 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
777 outer_headers.cvlan_tag);
778 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
779 outer_headers.cvlan_tag);
780 } else if (bridge->vlan_proto == ETH_P_8021AD) {
781 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
782 outer_headers.svlan_tag);
783 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
784 outer_headers.svlan_tag);
786 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
787 outer_headers.first_vid);
788 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
792 if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
793 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
794 dest.vport.vhca_id = esw_owner_vhca_id;
796 handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
802 static struct mlx5_flow_handle *
803 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
804 struct mlx5_flow_table *skip_ft,
805 struct mlx5_pkt_reformat *pkt_reformat)
807 struct mlx5_flow_destination dest = {
808 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
811 struct mlx5_flow_act flow_act = {
812 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
813 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
814 .flags = FLOW_ACT_NO_APPEND,
815 .pkt_reformat = pkt_reformat,
817 struct mlx5_flow_spec *rule_spec;
818 struct mlx5_flow_handle *handle;
820 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
822 return ERR_PTR(-ENOMEM);
824 rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
826 MLX5_SET(fte_match_param, rule_spec->match_criteria,
827 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
828 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
829 ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
831 handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
837 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(struct net_device *br_netdev,
838 struct mlx5_esw_bridge_offloads *br_offloads)
840 struct mlx5_esw_bridge *bridge;
843 bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
845 return ERR_PTR(-ENOMEM);
847 bridge->br_offloads = br_offloads;
848 err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
852 err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
856 err = mlx5_esw_bridge_mdb_init(bridge);
860 INIT_LIST_HEAD(&bridge->fdb_list);
861 bridge->ifindex = br_netdev->ifindex;
863 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
864 bridge->vlan_proto = ETH_P_8021Q;
865 list_add(&bridge->list, &br_offloads->bridges);
866 mlx5_esw_bridge_debugfs_init(br_netdev, bridge);
871 rhashtable_destroy(&bridge->fdb_ht);
873 mlx5_esw_bridge_egress_table_cleanup(bridge);
879 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
884 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
885 struct mlx5_esw_bridge *bridge)
887 if (--bridge->refcnt)
890 mlx5_esw_bridge_debugfs_cleanup(bridge);
891 mlx5_esw_bridge_egress_table_cleanup(bridge);
892 mlx5_esw_bridge_mcast_disable(bridge);
893 list_del(&bridge->list);
894 mlx5_esw_bridge_mdb_cleanup(bridge);
895 rhashtable_destroy(&bridge->fdb_ht);
898 if (list_empty(&br_offloads->bridges))
899 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
902 static struct mlx5_esw_bridge *
903 mlx5_esw_bridge_lookup(struct net_device *br_netdev, struct mlx5_esw_bridge_offloads *br_offloads)
905 struct mlx5_esw_bridge *bridge;
909 list_for_each_entry(bridge, &br_offloads->bridges, list) {
910 if (bridge->ifindex == br_netdev->ifindex) {
911 mlx5_esw_bridge_get(bridge);
916 if (!br_offloads->ingress_ft) {
917 int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
923 bridge = mlx5_esw_bridge_create(br_netdev, br_offloads);
924 if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
925 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
929 static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
931 return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
934 unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
936 return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
939 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
940 struct mlx5_esw_bridge_offloads *br_offloads)
942 return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
945 static struct mlx5_esw_bridge_port *
946 mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
947 struct mlx5_esw_bridge_offloads *br_offloads)
949 return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
953 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
954 struct mlx5_esw_bridge_offloads *br_offloads)
956 xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
959 static struct mlx5_esw_bridge *
960 mlx5_esw_bridge_from_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
961 struct mlx5_esw_bridge_offloads *br_offloads)
963 struct mlx5_esw_bridge_port *port;
965 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
972 static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
974 trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
976 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
978 SWITCHDEV_FDB_ADD_TO_BRIDGE);
982 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
983 struct mlx5_esw_bridge *bridge)
985 trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
987 rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
988 mlx5_del_flow_rules(entry->egress_handle);
989 if (entry->filter_handle)
990 mlx5_del_flow_rules(entry->filter_handle);
991 mlx5_del_flow_rules(entry->ingress_handle);
992 mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
993 list_del(&entry->vlan_list);
994 list_del(&entry->list);
999 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
1000 struct mlx5_esw_bridge *bridge)
1002 mlx5_esw_bridge_fdb_del_notify(entry);
1003 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1006 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
1008 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1010 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1011 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1014 static struct mlx5_esw_bridge_vlan *
1015 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
1017 return xa_load(&port->vlans, vid);
1021 mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
1022 struct mlx5_eswitch *esw)
1025 __be16 h_vlan_proto;
1027 } vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
1028 struct mlx5_pkt_reformat_params reformat_params = {};
1029 struct mlx5_pkt_reformat *pkt_reformat;
1031 if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
1032 MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
1033 MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
1034 offsetof(struct vlan_ethhdr, h_vlan_proto)) {
1035 esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
1039 reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
1040 reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
1041 reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
1042 reformat_params.size = sizeof(vlan_hdr);
1043 reformat_params.data = &vlan_hdr;
1044 pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
1046 MLX5_FLOW_NAMESPACE_FDB);
1047 if (IS_ERR(pkt_reformat)) {
1048 esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
1049 PTR_ERR(pkt_reformat));
1050 return PTR_ERR(pkt_reformat);
1053 vlan->pkt_reformat_push = pkt_reformat;
1058 mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1060 mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
1061 vlan->pkt_reformat_push = NULL;
1065 mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1067 struct mlx5_pkt_reformat *pkt_reformat;
1069 if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
1070 esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
1074 pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
1075 if (IS_ERR(pkt_reformat)) {
1076 esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
1077 PTR_ERR(pkt_reformat));
1078 return PTR_ERR(pkt_reformat);
1081 vlan->pkt_reformat_pop = pkt_reformat;
1086 mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1088 mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
1089 vlan->pkt_reformat_pop = NULL;
1093 mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1095 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1096 struct mlx5_modify_hdr *pkt_mod_hdr;
1098 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1099 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1100 MLX5_SET(set_action_in, action, offset, 8);
1101 MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
1102 MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
1104 pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
1105 if (IS_ERR(pkt_mod_hdr))
1106 return PTR_ERR(pkt_mod_hdr);
1108 vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
1113 mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1115 mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
1116 vlan->pkt_mod_hdr_push_mark = NULL;
1120 mlx5_esw_bridge_vlan_push_pop_fhs_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
1121 struct mlx5_esw_bridge_vlan *vlan)
1123 return mlx5_esw_bridge_vlan_mcast_init(vlan_proto, port, vlan);
1127 mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(struct mlx5_esw_bridge_vlan *vlan)
1129 mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
1133 mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_port *port,
1134 struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1138 if (flags & BRIDGE_VLAN_INFO_PVID) {
1139 err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
1143 err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
1145 goto err_vlan_push_mark;
1148 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
1149 err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
1153 err = mlx5_esw_bridge_vlan_push_pop_fhs_create(vlan_proto, port, vlan);
1155 goto err_vlan_pop_fhs;
1161 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1163 if (vlan->pkt_mod_hdr_push_mark)
1164 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1166 if (vlan->pkt_reformat_push)
1167 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1171 static struct mlx5_esw_bridge_vlan *
1172 mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
1173 struct mlx5_eswitch *esw)
1175 struct mlx5_esw_bridge_vlan *vlan;
1178 vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
1180 return ERR_PTR(-ENOMEM);
1183 vlan->flags = flags;
1184 INIT_LIST_HEAD(&vlan->fdb_list);
1186 err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, port, vlan, esw);
1188 goto err_vlan_push_pop;
1190 err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
1194 trace_mlx5_esw_bridge_vlan_create(vlan);
1198 if (vlan->mcast_handle)
1199 mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
1200 if (vlan->pkt_reformat_pop)
1201 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1202 if (vlan->pkt_mod_hdr_push_mark)
1203 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1204 if (vlan->pkt_reformat_push)
1205 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1208 return ERR_PTR(err);
1211 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
1212 struct mlx5_esw_bridge_vlan *vlan)
1214 xa_erase(&port->vlans, vlan->vid);
1217 static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_port *port,
1218 struct mlx5_esw_bridge_vlan *vlan,
1219 struct mlx5_esw_bridge *bridge)
1221 struct mlx5_eswitch *esw = bridge->br_offloads->esw;
1222 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1224 list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
1225 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1226 mlx5_esw_bridge_port_mdb_vlan_flush(port, vlan);
1228 if (vlan->mcast_handle)
1229 mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
1230 if (vlan->pkt_reformat_pop)
1231 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1232 if (vlan->pkt_mod_hdr_push_mark)
1233 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1234 if (vlan->pkt_reformat_push)
1235 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1238 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
1239 struct mlx5_esw_bridge_vlan *vlan,
1240 struct mlx5_esw_bridge *bridge)
1242 trace_mlx5_esw_bridge_vlan_cleanup(vlan);
1243 mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
1244 mlx5_esw_bridge_vlan_erase(port, vlan);
1248 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
1249 struct mlx5_esw_bridge *bridge)
1251 struct mlx5_esw_bridge_vlan *vlan;
1252 unsigned long index;
1254 xa_for_each(&port->vlans, index, vlan)
1255 mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
1258 static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
1259 struct mlx5_esw_bridge *bridge)
1261 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1262 struct mlx5_esw_bridge_vlan *vlan;
1266 xa_for_each(&port->vlans, i, vlan) {
1267 mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
1268 err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, port,
1269 vlan, br_offloads->esw);
1271 esw_warn(br_offloads->esw->dev,
1272 "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
1273 vlan->vid, bridge->vlan_proto, port->vport_num,
1283 mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
1285 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1286 struct mlx5_esw_bridge_port *port;
1290 xa_for_each(&br_offloads->ports, i, port) {
1291 if (port->bridge != bridge)
1294 err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
1302 static struct mlx5_esw_bridge_vlan *
1303 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
1304 struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
1306 struct mlx5_esw_bridge_port *port;
1307 struct mlx5_esw_bridge_vlan *vlan;
1309 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
1311 /* FDB is added asynchronously on wq while port might have been deleted
1312 * concurrently. Report on 'info' logging level and skip the FDB offload.
1314 esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
1315 return ERR_PTR(-EINVAL);
1318 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1320 /* FDB is added asynchronously on wq while vlan might have been deleted
1321 * concurrently. Report on 'info' logging level and skip the FDB offload.
1323 esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
1325 return ERR_PTR(-EINVAL);
1331 static struct mlx5_esw_bridge_fdb_entry *
1332 mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
1333 const unsigned char *addr, u16 vid)
1335 struct mlx5_esw_bridge_fdb_key key = {};
1337 ether_addr_copy(key.addr, addr);
1339 return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1342 static struct mlx5_esw_bridge_fdb_entry *
1343 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1344 const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
1345 struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
1347 struct mlx5_esw_bridge_vlan *vlan = NULL;
1348 struct mlx5_esw_bridge_fdb_entry *entry;
1349 struct mlx5_flow_handle *handle;
1350 struct mlx5_fc *counter;
1353 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1354 vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
1357 return ERR_CAST(vlan);
1360 entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
1362 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1364 entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
1366 return ERR_PTR(-ENOMEM);
1368 ether_addr_copy(entry->key.addr, addr);
1369 entry->key.vid = vid;
1371 entry->vport_num = vport_num;
1372 entry->esw_owner_vhca_id = esw_owner_vhca_id;
1373 entry->lastuse = jiffies;
1375 entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
1377 entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
1379 counter = mlx5_fc_create(esw->dev, true);
1380 if (IS_ERR(counter)) {
1381 err = PTR_ERR(counter);
1382 goto err_ingress_fc_create;
1384 entry->ingress_counter = counter;
1387 mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
1388 addr, vlan, mlx5_fc_id(counter),
1390 mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
1391 mlx5_fc_id(counter), bridge);
1392 if (IS_ERR(handle)) {
1393 err = PTR_ERR(handle);
1394 esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
1395 vport_num, err, peer);
1396 goto err_ingress_flow_create;
1398 entry->ingress_handle = handle;
1400 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
1401 handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
1402 if (IS_ERR(handle)) {
1403 err = PTR_ERR(handle);
1404 esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
1406 goto err_ingress_filter_flow_create;
1408 entry->filter_handle = handle;
1411 handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
1413 if (IS_ERR(handle)) {
1414 err = PTR_ERR(handle);
1415 esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
1417 goto err_egress_flow_create;
1419 entry->egress_handle = handle;
1421 err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1423 esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
1428 list_add(&entry->vlan_list, &vlan->fdb_list);
1430 INIT_LIST_HEAD(&entry->vlan_list);
1431 list_add(&entry->list, &bridge->fdb_list);
1433 trace_mlx5_esw_bridge_fdb_entry_init(entry);
1437 mlx5_del_flow_rules(entry->egress_handle);
1438 err_egress_flow_create:
1439 if (entry->filter_handle)
1440 mlx5_del_flow_rules(entry->filter_handle);
1441 err_ingress_filter_flow_create:
1442 mlx5_del_flow_rules(entry->ingress_handle);
1443 err_ingress_flow_create:
1444 mlx5_fc_destroy(esw->dev, entry->ingress_counter);
1445 err_ingress_fc_create:
1447 return ERR_PTR(err);
1450 int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
1451 struct mlx5_esw_bridge_offloads *br_offloads)
1453 struct mlx5_esw_bridge *bridge;
1455 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1459 bridge->ageing_time = clock_t_to_jiffies(ageing_time);
1463 int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1464 struct mlx5_esw_bridge_offloads *br_offloads)
1466 struct mlx5_esw_bridge *bridge;
1469 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1473 filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1474 if (filtering == enable)
1477 mlx5_esw_bridge_fdb_flush(bridge);
1478 mlx5_esw_bridge_mdb_flush(bridge);
1480 bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1482 bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1487 int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
1488 struct mlx5_esw_bridge_offloads *br_offloads)
1490 struct mlx5_esw_bridge *bridge;
1492 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id,
1497 if (bridge->vlan_proto == proto)
1499 if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
1500 esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
1504 mlx5_esw_bridge_fdb_flush(bridge);
1505 mlx5_esw_bridge_mdb_flush(bridge);
1506 bridge->vlan_proto = proto;
1507 mlx5_esw_bridge_vlans_recreate(bridge);
1512 int mlx5_esw_bridge_mcast_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1513 struct mlx5_esw_bridge_offloads *br_offloads)
1515 struct mlx5_eswitch *esw = br_offloads->esw;
1516 struct mlx5_esw_bridge *bridge;
1520 if (!(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table) ||
1521 MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table_limit_regc)) ||
1522 !MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_uplink_hairpin) ||
1523 !MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
1526 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1530 mcast = bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG;
1531 if (mcast == enable)
1535 err = mlx5_esw_bridge_mcast_enable(bridge);
1537 mlx5_esw_bridge_mcast_disable(bridge);
1542 static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
1543 struct mlx5_esw_bridge_offloads *br_offloads,
1544 struct mlx5_esw_bridge *bridge)
1546 struct mlx5_eswitch *esw = br_offloads->esw;
1547 struct mlx5_esw_bridge_port *port;
1550 port = kvzalloc(sizeof(*port), GFP_KERNEL);
1554 port->vport_num = vport_num;
1555 port->esw_owner_vhca_id = esw_owner_vhca_id;
1556 port->bridge = bridge;
1557 port->flags |= flags;
1558 xa_init(&port->vlans);
1560 err = mlx5_esw_bridge_port_mcast_init(port);
1563 "Failed to initialize port multicast (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1564 port->vport_num, port->esw_owner_vhca_id, err);
1565 goto err_port_mcast;
1568 err = mlx5_esw_bridge_port_insert(port, br_offloads);
1571 "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1572 port->vport_num, port->esw_owner_vhca_id, err);
1573 goto err_port_insert;
1575 trace_mlx5_esw_bridge_vport_init(port);
1580 mlx5_esw_bridge_port_mcast_cleanup(port);
1586 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1587 struct mlx5_esw_bridge_port *port)
1589 u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
1590 struct mlx5_esw_bridge *bridge = port->bridge;
1591 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1593 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1594 if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
1595 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1597 trace_mlx5_esw_bridge_vport_cleanup(port);
1598 mlx5_esw_bridge_port_vlans_flush(port, bridge);
1599 mlx5_esw_bridge_port_mcast_cleanup(port);
1600 mlx5_esw_bridge_port_erase(port, br_offloads);
1602 mlx5_esw_bridge_put(br_offloads, bridge);
1606 static int mlx5_esw_bridge_vport_link_with_flags(struct net_device *br_netdev, u16 vport_num,
1607 u16 esw_owner_vhca_id, u16 flags,
1608 struct mlx5_esw_bridge_offloads *br_offloads,
1609 struct netlink_ext_ack *extack)
1611 struct mlx5_esw_bridge *bridge;
1614 bridge = mlx5_esw_bridge_lookup(br_netdev, br_offloads);
1615 if (IS_ERR(bridge)) {
1616 NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1617 return PTR_ERR(bridge);
1620 err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
1622 NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1628 mlx5_esw_bridge_put(br_offloads, bridge);
1632 int mlx5_esw_bridge_vport_link(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
1633 struct mlx5_esw_bridge_offloads *br_offloads,
1634 struct netlink_ext_ack *extack)
1636 return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id, 0,
1637 br_offloads, extack);
1640 int mlx5_esw_bridge_vport_unlink(struct net_device *br_netdev, u16 vport_num,
1641 u16 esw_owner_vhca_id,
1642 struct mlx5_esw_bridge_offloads *br_offloads,
1643 struct netlink_ext_ack *extack)
1645 struct mlx5_esw_bridge_port *port;
1648 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1650 NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1653 if (port->bridge->ifindex != br_netdev->ifindex) {
1654 NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1658 err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1660 NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1664 int mlx5_esw_bridge_vport_peer_link(struct net_device *br_netdev, u16 vport_num,
1665 u16 esw_owner_vhca_id,
1666 struct mlx5_esw_bridge_offloads *br_offloads,
1667 struct netlink_ext_ack *extack)
1669 if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
1672 return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id,
1673 MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1674 br_offloads, extack);
1677 int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_num,
1678 u16 esw_owner_vhca_id,
1679 struct mlx5_esw_bridge_offloads *br_offloads,
1680 struct netlink_ext_ack *extack)
1682 return mlx5_esw_bridge_vport_unlink(br_netdev, vport_num, esw_owner_vhca_id, br_offloads,
1686 int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
1687 struct mlx5_esw_bridge_offloads *br_offloads,
1688 struct netlink_ext_ack *extack)
1690 struct mlx5_esw_bridge_port *port;
1691 struct mlx5_esw_bridge_vlan *vlan;
1693 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1697 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1699 if (vlan->flags == flags)
1701 mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1704 vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
1707 NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1708 return PTR_ERR(vlan);
1713 void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
1714 struct mlx5_esw_bridge_offloads *br_offloads)
1716 struct mlx5_esw_bridge_port *port;
1717 struct mlx5_esw_bridge_vlan *vlan;
1719 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1723 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1726 mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1729 void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1730 struct mlx5_esw_bridge_offloads *br_offloads,
1731 struct switchdev_notifier_fdb_info *fdb_info)
1733 struct mlx5_esw_bridge_fdb_entry *entry;
1734 struct mlx5_esw_bridge *bridge;
1736 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1740 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1742 esw_debug(br_offloads->esw->dev,
1743 "FDB update entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1744 fdb_info->addr, fdb_info->vid, vport_num);
1748 entry->lastuse = jiffies;
1751 void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1752 struct mlx5_esw_bridge_offloads *br_offloads,
1753 struct switchdev_notifier_fdb_info *fdb_info)
1755 struct mlx5_esw_bridge_fdb_entry *entry;
1756 struct mlx5_esw_bridge *bridge;
1758 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1762 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1764 esw_debug(br_offloads->esw->dev,
1765 "FDB mark deleted entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1766 fdb_info->addr, fdb_info->vid, vport_num);
1770 entry->flags |= MLX5_ESW_BRIDGE_FLAG_DELETED;
1773 void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1774 struct mlx5_esw_bridge_offloads *br_offloads,
1775 struct switchdev_notifier_fdb_info *fdb_info)
1777 struct mlx5_esw_bridge_fdb_entry *entry;
1778 struct mlx5_esw_bridge_port *port;
1779 struct mlx5_esw_bridge *bridge;
1781 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1785 bridge = port->bridge;
1786 entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
1787 fdb_info->vid, fdb_info->added_by_user,
1788 port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1789 br_offloads->esw, bridge);
1793 if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1794 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1795 SWITCHDEV_FDB_OFFLOADED);
1796 else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
1797 /* Take over dynamic entries to prevent kernel bridge from aging them out. */
1798 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1799 SWITCHDEV_FDB_ADD_TO_BRIDGE);
1802 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1803 struct mlx5_esw_bridge_offloads *br_offloads,
1804 struct switchdev_notifier_fdb_info *fdb_info)
1806 struct mlx5_eswitch *esw = br_offloads->esw;
1807 struct mlx5_esw_bridge_fdb_entry *entry;
1808 struct mlx5_esw_bridge *bridge;
1810 bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1814 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1817 "FDB remove entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1818 fdb_info->addr, fdb_info->vid, vport_num);
1822 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1825 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1827 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1828 struct mlx5_esw_bridge *bridge;
1830 list_for_each_entry(bridge, &br_offloads->bridges, list) {
1831 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1832 unsigned long lastuse =
1833 (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1835 if (entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER |
1836 MLX5_ESW_BRIDGE_FLAG_DELETED))
1839 if (time_after(lastuse, entry->lastuse))
1840 mlx5_esw_bridge_fdb_entry_refresh(entry);
1841 else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
1842 time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
1843 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1848 int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1849 const unsigned char *addr, u16 vid,
1850 struct mlx5_esw_bridge_offloads *br_offloads,
1851 struct netlink_ext_ack *extack)
1853 struct mlx5_esw_bridge_vlan *vlan;
1854 struct mlx5_esw_bridge_port *port;
1855 struct mlx5_esw_bridge *bridge;
1858 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1860 esw_warn(br_offloads->esw->dev,
1861 "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
1863 NL_SET_ERR_MSG_FMT_MOD(extack,
1864 "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
1869 bridge = port->bridge;
1870 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1871 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1873 esw_warn(br_offloads->esw->dev,
1874 "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
1875 addr, vid, vport_num);
1876 NL_SET_ERR_MSG_FMT_MOD(extack,
1877 "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
1878 addr, vid, vport_num);
1883 err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
1885 NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
1886 addr, vid, vport_num);
1893 void mlx5_esw_bridge_port_mdb_del(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1894 const unsigned char *addr, u16 vid,
1895 struct mlx5_esw_bridge_offloads *br_offloads)
1897 struct mlx5_esw_bridge_port *port;
1899 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1903 mlx5_esw_bridge_port_mdb_detach(dev, port, addr, vid);
1906 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1908 struct mlx5_esw_bridge_port *port;
1911 xa_for_each(&br_offloads->ports, i, port)
1912 mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1914 WARN_ONCE(!list_empty(&br_offloads->bridges),
1915 "Cleaning up bridge offloads while still having bridges attached\n");
1918 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1920 struct mlx5_esw_bridge_offloads *br_offloads;
1924 br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1926 return ERR_PTR(-ENOMEM);
1928 INIT_LIST_HEAD(&br_offloads->bridges);
1929 xa_init(&br_offloads->ports);
1930 br_offloads->esw = esw;
1931 esw->br_offloads = br_offloads;
1932 mlx5_esw_bridge_debugfs_offloads_init(br_offloads);
1937 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1939 struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1946 mlx5_esw_bridge_flush(br_offloads);
1947 WARN_ON(!xa_empty(&br_offloads->ports));
1948 mlx5_esw_bridge_debugfs_offloads_cleanup(br_offloads);
1950 esw->br_offloads = NULL;
1951 kvfree(br_offloads);