2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/netdevice.h>
34 #include <net/bonding.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/eswitch.h>
37 #include <linux/mlx5/vport.h>
38 #include "lib/devcom.h"
39 #include "mlx5_core.h"
41 #include "esw/acl/ofld.h"
47 MLX5_LAG_EGRESS_PORT_1 = 1,
48 MLX5_LAG_EGRESS_PORT_2,
51 /* General purpose, use for short periods of time.
52 * Beware of lock dependencies (preferably, no locks should be acquired
55 static DEFINE_SPINLOCK(lag_lock);
57 static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
59 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
60 return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT;
62 if (mode == MLX5_LAG_MODE_MPESW)
63 return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW;
65 return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY;
68 static u8 lag_active_port_bits(struct mlx5_lag *ldev)
70 u8 enabled_ports[MLX5_MAX_PORTS] = {};
75 mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
77 for (idx = 0; idx < num_enabled; idx++)
78 active_port |= BIT_MASK(enabled_ports[idx]);
83 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
86 bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
88 int port_sel_mode = get_port_sel_mode(mode, flags);
89 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
92 lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
93 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
94 MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
96 switch (port_sel_mode) {
97 case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
98 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
99 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
101 case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
102 if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
105 MLX5_SET(lagc, lag_ctx, active_port,
106 lag_active_port_bits(mlx5_lag_dev(dev)));
111 MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode);
113 return mlx5_cmd_exec_in(dev, create_lag, in);
116 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
119 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
120 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
122 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
123 MLX5_SET(modify_lag_in, in, field_select, 0x1);
125 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
126 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
128 return mlx5_cmd_exec_in(dev, modify_lag, in);
131 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
133 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
135 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
137 return mlx5_cmd_exec_in(dev, create_vport_lag, in);
139 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
141 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
143 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
145 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
147 return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
149 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
151 static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
152 u8 *ports, int *num_disabled)
157 for (i = 0; i < num_ports; i++) {
158 if (!tracker->netdev_state[i].tx_enabled ||
159 !tracker->netdev_state[i].link_up)
160 ports[(*num_disabled)++] = i;
164 void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
165 u8 *ports, int *num_enabled)
170 for (i = 0; i < num_ports; i++) {
171 if (tracker->netdev_state[i].tx_enabled &&
172 tracker->netdev_state[i].link_up)
173 ports[(*num_enabled)++] = i;
176 if (*num_enabled == 0)
177 mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
180 static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
181 struct mlx5_lag *ldev,
182 struct lag_tracker *tracker,
185 char buf[MLX5_MAX_PORTS * 10 + 1] = {};
186 u8 enabled_ports[MLX5_MAX_PORTS] = {};
194 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
195 mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
197 for (i = 0; i < num_enabled; i++) {
198 err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
203 buf[written - 2] = 0;
204 mlx5_core_info(dev, "lag map active ports: %s\n", buf);
206 for (i = 0; i < ldev->ports; i++) {
207 for (j = 0; j < ldev->buckets; j++) {
208 idx = i * ldev->buckets + j;
209 err = scnprintf(buf + written, 10,
210 " port %d:%d", i + 1, ldev->v2p_map[idx]);
216 mlx5_core_info(dev, "lag map:%s\n", buf);
220 static int mlx5_lag_netdev_event(struct notifier_block *this,
221 unsigned long event, void *ptr);
222 static void mlx5_do_bond_work(struct work_struct *work);
224 static void mlx5_ldev_free(struct kref *ref)
226 struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
228 if (ldev->nb.notifier_call)
229 unregister_netdevice_notifier_net(&init_net, &ldev->nb);
230 mlx5_lag_mp_cleanup(ldev);
231 cancel_delayed_work_sync(&ldev->bond_work);
232 destroy_workqueue(ldev->wq);
233 mutex_destroy(&ldev->lock);
237 static void mlx5_ldev_put(struct mlx5_lag *ldev)
239 kref_put(&ldev->ref, mlx5_ldev_free);
242 static void mlx5_ldev_get(struct mlx5_lag *ldev)
244 kref_get(&ldev->ref);
247 static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
249 struct mlx5_lag *ldev;
252 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
256 ldev->wq = create_singlethread_workqueue("mlx5_lag");
262 kref_init(&ldev->ref);
263 mutex_init(&ldev->lock);
264 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
266 ldev->nb.notifier_call = mlx5_lag_netdev_event;
267 if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
268 ldev->nb.notifier_call = NULL;
269 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
271 ldev->mode = MLX5_LAG_MODE_NONE;
273 err = mlx5_lag_mp_init(ldev);
275 mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
278 ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports);
284 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
285 struct net_device *ndev)
289 for (i = 0; i < ldev->ports; i++)
290 if (ldev->pf[i].netdev == ndev)
296 static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
298 return ldev->mode == MLX5_LAG_MODE_ROCE;
301 static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
303 return ldev->mode == MLX5_LAG_MODE_SRIOV;
306 /* Create a mapping between steering slots and active ports.
307 * As we have ldev->buckets slots per port first assume the native
308 * mapping should be used.
309 * If there are ports that are disabled fill the relevant slots
310 * with mapping that points to active ports.
312 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
317 int disabled[MLX5_MAX_PORTS] = {};
318 int enabled[MLX5_MAX_PORTS] = {};
319 int disabled_ports_num = 0;
320 int enabled_ports_num = 0;
326 for (i = 0; i < num_ports; i++) {
327 if (tracker->netdev_state[i].tx_enabled &&
328 tracker->netdev_state[i].link_up)
329 enabled[enabled_ports_num++] = i;
331 disabled[disabled_ports_num++] = i;
334 /* Use native mapping by default where each port's buckets
335 * point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
337 for (i = 0; i < num_ports; i++)
338 for (j = 0; j < buckets; j++) {
339 idx = i * buckets + j;
340 ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
343 /* If all ports are disabled/enabled keep native mapping */
344 if (enabled_ports_num == num_ports ||
345 disabled_ports_num == num_ports)
348 /* Go over the disabled ports and for each assign a random active port */
349 for (i = 0; i < disabled_ports_num; i++) {
350 for (j = 0; j < buckets; j++) {
351 get_random_bytes(&rand, 4);
352 ports[disabled[i] * buckets + j] = enabled[rand % enabled_ports_num] + 1;
357 static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
361 for (i = 0; i < ldev->ports; i++)
362 if (ldev->pf[i].has_drop)
367 static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
371 for (i = 0; i < ldev->ports; i++) {
372 if (!ldev->pf[i].has_drop)
375 mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
377 ldev->pf[i].has_drop = false;
381 static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
382 struct lag_tracker *tracker)
384 u8 disabled_ports[MLX5_MAX_PORTS] = {};
385 struct mlx5_core_dev *dev;
391 /* First delete the current drop rule so there won't be any dropped
394 mlx5_lag_drop_rule_cleanup(ldev);
396 if (!ldev->tracker.has_inactive)
399 mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
401 for (i = 0; i < num_disabled; i++) {
402 disabled_index = disabled_ports[i];
403 dev = ldev->pf[disabled_index].dev;
404 err = mlx5_esw_acl_ingress_vport_drop_rule_create(dev->priv.eswitch,
407 ldev->pf[disabled_index].has_drop = true;
410 "Failed to create lag drop rule, error: %d", err);
414 static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
416 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
419 lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
421 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
422 MLX5_SET(modify_lag_in, in, field_select, 0x2);
424 MLX5_SET(lagc, lag_ctx, active_port, ports);
426 return mlx5_cmd_exec_in(dev, modify_lag, in);
429 static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
431 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
435 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
436 ret = mlx5_lag_port_sel_modify(ldev, ports);
438 !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table_bypass))
441 active_ports = lag_active_port_bits(ldev);
443 return mlx5_cmd_modify_active_port(dev0, active_ports);
445 return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
448 void mlx5_modify_lag(struct mlx5_lag *ldev,
449 struct lag_tracker *tracker)
451 u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
452 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
458 mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
460 for (i = 0; i < ldev->ports; i++) {
461 for (j = 0; j < ldev->buckets; j++) {
462 idx = i * ldev->buckets + j;
463 if (ports[idx] == ldev->v2p_map[idx])
465 err = _mlx5_modify_lag(ldev, ports);
468 "Failed to modify LAG (%d)\n",
472 memcpy(ldev->v2p_map, ports, sizeof(ports));
474 mlx5_lag_print_mapping(dev0, ldev, tracker,
480 if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
481 !(ldev->mode == MLX5_LAG_MODE_ROCE))
482 mlx5_lag_drop_rule_setup(ldev, tracker);
485 static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
486 unsigned long *flags)
488 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
490 if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
497 ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
499 set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
504 static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
505 struct lag_tracker *tracker,
506 enum mlx5_lag_mode mode,
507 unsigned long *flags)
509 struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
511 if (mode == MLX5_LAG_MODE_MPESW)
514 if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
515 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)
516 set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
519 static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
520 struct lag_tracker *tracker, bool shared_fdb,
521 unsigned long *flags)
523 bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
527 set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
528 set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
531 if (mode == MLX5_LAG_MODE_MPESW)
532 set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
535 return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
537 mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
541 char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
543 int port_sel_mode = get_port_sel_mode(mode, flags);
545 switch (port_sel_mode) {
546 case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity";
547 case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: return "hash";
548 case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW: return "mpesw";
549 default: return "invalid";
553 static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
555 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
556 struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
560 for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
561 struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
563 err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
564 slave_esw, ldev->ports);
570 for (; i > MLX5_LAG_P1; i--)
571 mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
572 ldev->pf[i].dev->priv.eswitch);
576 static int mlx5_create_lag(struct mlx5_lag *ldev,
577 struct lag_tracker *tracker,
578 enum mlx5_lag_mode mode,
581 bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
582 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
583 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
587 mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
588 mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
589 shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
591 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
594 "Failed to create LAG (%d)\n",
600 err = mlx5_lag_create_single_fdb(ldev);
602 mlx5_core_err(dev0, "Can't enable single FDB mode\n");
604 mlx5_core_info(dev0, "Operation mode is single FDB\n");
608 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
609 if (mlx5_cmd_exec_in(dev0, destroy_lag, in))
611 "Failed to deactivate RoCE LAG; driver restart required\n");
617 int mlx5_activate_lag(struct mlx5_lag *ldev,
618 struct lag_tracker *tracker,
619 enum mlx5_lag_mode mode,
622 bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
623 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
624 unsigned long flags = 0;
627 err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
631 if (mode != MLX5_LAG_MODE_MPESW) {
632 mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
633 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
634 err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
638 "Failed to create LAG port selection(%d)\n",
645 err = mlx5_create_lag(ldev, tracker, mode, flags);
647 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
648 mlx5_lag_port_sel_destroy(ldev);
651 "Failed to activate RoCE LAG\n");
654 "Failed to activate VF LAG\n"
655 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
659 if (tracker && tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
661 mlx5_lag_drop_rule_setup(ldev, tracker);
664 ldev->mode_flags = flags;
668 int mlx5_deactivate_lag(struct mlx5_lag *ldev)
670 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
671 struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
672 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
673 bool roce_lag = __mlx5_lag_is_roce(ldev);
674 unsigned long flags = ldev->mode_flags;
678 ldev->mode = MLX5_LAG_MODE_NONE;
679 ldev->mode_flags = 0;
680 mlx5_lag_mp_reset(ldev);
682 if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
683 for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
684 mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
685 ldev->pf[i].dev->priv.eswitch);
686 clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
689 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
690 err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
694 "Failed to deactivate RoCE LAG; driver restart required\n");
697 "Failed to deactivate VF LAG; driver restart required\n"
698 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
703 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
704 mlx5_lag_port_sel_destroy(ldev);
705 if (mlx5_lag_has_drop_rule(ldev))
706 mlx5_lag_drop_rule_cleanup(ldev);
711 #define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 2
712 bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
714 #ifdef CONFIG_MLX5_ESWITCH
715 struct mlx5_core_dev *dev;
720 for (i = 0; i < ldev->ports; i++)
721 if (!ldev->pf[i].dev)
724 #ifdef CONFIG_MLX5_ESWITCH
725 for (i = 0; i < ldev->ports; i++) {
726 dev = ldev->pf[i].dev;
727 if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
731 dev = ldev->pf[MLX5_LAG_P1].dev;
732 mode = mlx5_eswitch_mode(dev);
733 for (i = 0; i < ldev->ports; i++)
734 if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
737 if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
740 for (i = 0; i < ldev->ports; i++)
741 if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
747 void mlx5_lag_add_devices(struct mlx5_lag *ldev)
751 for (i = 0; i < ldev->ports; i++) {
752 if (!ldev->pf[i].dev)
755 if (ldev->pf[i].dev->priv.flags &
756 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
759 ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
760 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
764 void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
768 for (i = 0; i < ldev->ports; i++) {
769 if (!ldev->pf[i].dev)
772 if (ldev->pf[i].dev->priv.flags &
773 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
776 ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
777 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
781 void mlx5_disable_lag(struct mlx5_lag *ldev)
783 bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
784 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
785 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
790 roce_lag = __mlx5_lag_is_roce(ldev);
793 mlx5_lag_remove_devices(ldev);
794 } else if (roce_lag) {
795 if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
796 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
797 mlx5_rescan_drivers_locked(dev0);
799 for (i = 1; i < ldev->ports; i++)
800 mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
803 err = mlx5_deactivate_lag(ldev);
807 if (shared_fdb || roce_lag)
808 mlx5_lag_add_devices(ldev);
811 if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
812 mlx5_eswitch_reload_reps(dev0->priv.eswitch);
813 if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
814 mlx5_eswitch_reload_reps(dev1->priv.eswitch);
818 bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
820 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
821 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
823 if (is_mdev_switchdev_mode(dev0) &&
824 is_mdev_switchdev_mode(dev1) &&
825 mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
826 mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
827 mlx5_devcom_comp_is_ready(dev0->priv.devcom,
828 MLX5_DEVCOM_ESW_OFFLOADS) &&
829 MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
830 MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
831 MLX5_CAP_ESW(dev0, esw_shared_ingress_acl))
837 static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
839 bool roce_lag = true;
842 for (i = 0; i < ldev->ports; i++)
843 roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
845 #ifdef CONFIG_MLX5_ESWITCH
846 for (i = 0; i < ldev->ports; i++)
847 roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
853 static bool mlx5_lag_should_modify_lag(struct mlx5_lag *ldev, bool do_bond)
855 return do_bond && __mlx5_lag_is_active(ldev) &&
856 ldev->mode != MLX5_LAG_MODE_MPESW;
859 static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
861 return !do_bond && __mlx5_lag_is_active(ldev) &&
862 ldev->mode != MLX5_LAG_MODE_MPESW;
865 static void mlx5_do_bond(struct mlx5_lag *ldev)
867 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
868 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
869 struct lag_tracker tracker = { };
870 bool do_bond, roce_lag;
874 if (!mlx5_lag_is_ready(ldev)) {
877 /* VF LAG is in multipath mode, ignore bond change requests */
878 if (mlx5_lag_is_multipath(dev0))
881 tracker = ldev->tracker;
883 do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
886 if (do_bond && !__mlx5_lag_is_active(ldev)) {
887 bool shared_fdb = mlx5_shared_fdb_supported(ldev);
889 roce_lag = mlx5_lag_is_roce_lag(ldev);
891 if (shared_fdb || roce_lag)
892 mlx5_lag_remove_devices(ldev);
894 err = mlx5_activate_lag(ldev, &tracker,
895 roce_lag ? MLX5_LAG_MODE_ROCE :
899 if (shared_fdb || roce_lag)
900 mlx5_lag_add_devices(ldev);
903 } else if (roce_lag) {
904 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
905 mlx5_rescan_drivers_locked(dev0);
906 for (i = 1; i < ldev->ports; i++)
907 mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
908 } else if (shared_fdb) {
909 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
910 mlx5_rescan_drivers_locked(dev0);
912 err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
914 err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
917 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
918 mlx5_rescan_drivers_locked(dev0);
919 mlx5_deactivate_lag(ldev);
920 mlx5_lag_add_devices(ldev);
921 mlx5_eswitch_reload_reps(dev0->priv.eswitch);
922 mlx5_eswitch_reload_reps(dev1->priv.eswitch);
923 mlx5_core_err(dev0, "Failed to enable lag\n");
927 } else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
928 mlx5_modify_lag(ldev, &tracker);
929 } else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
930 mlx5_disable_lag(ldev);
934 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
936 queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
939 static void mlx5_do_bond_work(struct work_struct *work)
941 struct delayed_work *delayed_work = to_delayed_work(work);
942 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
946 status = mlx5_dev_list_trylock();
948 mlx5_queue_bond_work(ldev, HZ);
952 mutex_lock(&ldev->lock);
953 if (ldev->mode_changes_in_progress) {
954 mutex_unlock(&ldev->lock);
955 mlx5_dev_list_unlock();
956 mlx5_queue_bond_work(ldev, HZ);
961 mutex_unlock(&ldev->lock);
962 mlx5_dev_list_unlock();
965 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
966 struct lag_tracker *tracker,
967 struct netdev_notifier_changeupper_info *info)
969 struct net_device *upper = info->upper_dev, *ndev_tmp;
970 struct netdev_lag_upper_info *lag_upper_info = NULL;
971 bool is_bonded, is_in_lag, mode_supported;
972 bool has_inactive = 0;
979 if (!netif_is_lag_master(upper))
983 lag_upper_info = info->upper_info;
985 /* The event may still be of interest if the slave does not belong to
986 * us, but is enslaved to a master which has one or more of our netdevs
987 * as slaves (e.g., if a new slave is added to a master that bonds two
988 * of our netdevs, we should unbond).
991 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
992 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
994 slave = bond_slave_get_rcu(ndev_tmp);
996 has_inactive |= bond_is_slave_inactive(slave);
997 bond_status |= (1 << idx);
1004 /* None of this lagdev's netdevs are slaves of this master. */
1005 if (!(bond_status & GENMASK(ldev->ports - 1, 0)))
1008 if (lag_upper_info) {
1009 tracker->tx_type = lag_upper_info->tx_type;
1010 tracker->hash_type = lag_upper_info->hash_type;
1013 tracker->has_inactive = has_inactive;
1014 /* Determine bonding status:
1015 * A device is considered bonded if both its physical ports are slaves
1016 * of the same lag master, and only them.
1018 is_in_lag = num_slaves == ldev->ports &&
1019 bond_status == GENMASK(ldev->ports - 1, 0);
1021 /* Lag mode must be activebackup or hash. */
1022 mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
1023 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
1025 is_bonded = is_in_lag && mode_supported;
1026 if (tracker->is_bonded != is_bonded) {
1027 tracker->is_bonded = is_bonded;
1034 if (!mlx5_lag_is_ready(ldev))
1035 NL_SET_ERR_MSG_MOD(info->info.extack,
1036 "Can't activate LAG offload, PF is configured with more than 64 VFs");
1037 else if (!mode_supported)
1038 NL_SET_ERR_MSG_MOD(info->info.extack,
1039 "Can't activate LAG offload, TX type isn't supported");
1044 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
1045 struct lag_tracker *tracker,
1046 struct net_device *ndev,
1047 struct netdev_notifier_changelowerstate_info *info)
1049 struct netdev_lag_lower_state_info *lag_lower_info;
1052 if (!netif_is_lag_port(ndev))
1055 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
1059 /* This information is used to determine virtual to physical
1062 lag_lower_info = info->lower_state_info;
1063 if (!lag_lower_info)
1066 tracker->netdev_state[idx] = *lag_lower_info;
1071 static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
1072 struct lag_tracker *tracker,
1073 struct net_device *ndev)
1075 struct net_device *ndev_tmp;
1076 struct slave *slave;
1077 bool has_inactive = 0;
1080 if (!netif_is_lag_master(ndev))
1084 for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
1085 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
1089 slave = bond_slave_get_rcu(ndev_tmp);
1091 has_inactive |= bond_is_slave_inactive(slave);
1095 if (tracker->has_inactive == has_inactive)
1098 tracker->has_inactive = has_inactive;
1103 /* this handler is always registered to netdev events */
1104 static int mlx5_lag_netdev_event(struct notifier_block *this,
1105 unsigned long event, void *ptr)
1107 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1108 struct lag_tracker tracker;
1109 struct mlx5_lag *ldev;
1112 if (event != NETDEV_CHANGEUPPER &&
1113 event != NETDEV_CHANGELOWERSTATE &&
1114 event != NETDEV_CHANGEINFODATA)
1117 ldev = container_of(this, struct mlx5_lag, nb);
1119 tracker = ldev->tracker;
1122 case NETDEV_CHANGEUPPER:
1123 changed = mlx5_handle_changeupper_event(ldev, &tracker, ptr);
1125 case NETDEV_CHANGELOWERSTATE:
1126 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
1129 case NETDEV_CHANGEINFODATA:
1130 changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
1134 ldev->tracker = tracker;
1137 mlx5_queue_bond_work(ldev, 0);
1142 static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
1143 struct mlx5_core_dev *dev,
1144 struct net_device *netdev)
1146 unsigned int fn = mlx5_get_dev_index(dev);
1147 unsigned long flags;
1149 if (fn >= ldev->ports)
1152 spin_lock_irqsave(&lag_lock, flags);
1153 ldev->pf[fn].netdev = netdev;
1154 ldev->tracker.netdev_state[fn].link_up = 0;
1155 ldev->tracker.netdev_state[fn].tx_enabled = 0;
1156 spin_unlock_irqrestore(&lag_lock, flags);
1159 static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
1160 struct net_device *netdev)
1162 unsigned long flags;
1165 spin_lock_irqsave(&lag_lock, flags);
1166 for (i = 0; i < ldev->ports; i++) {
1167 if (ldev->pf[i].netdev == netdev) {
1168 ldev->pf[i].netdev = NULL;
1172 spin_unlock_irqrestore(&lag_lock, flags);
1175 static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
1176 struct mlx5_core_dev *dev)
1178 unsigned int fn = mlx5_get_dev_index(dev);
1180 if (fn >= ldev->ports)
1183 ldev->pf[fn].dev = dev;
1184 dev->priv.lag = ldev;
1187 static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
1188 struct mlx5_core_dev *dev)
1192 for (i = 0; i < ldev->ports; i++)
1193 if (ldev->pf[i].dev == dev)
1196 if (i == ldev->ports)
1199 ldev->pf[i].dev = NULL;
1200 dev->priv.lag = NULL;
1203 /* Must be called with intf_mutex held */
1204 static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
1206 struct mlx5_lag *ldev = NULL;
1207 struct mlx5_core_dev *tmp_dev;
1209 tmp_dev = mlx5_get_next_phys_dev_lag(dev);
1211 ldev = mlx5_lag_dev(tmp_dev);
1214 ldev = mlx5_lag_dev_alloc(dev);
1216 mlx5_core_err(dev, "Failed to alloc lag dev\n");
1219 mlx5_ldev_add_mdev(ldev, dev);
1223 mutex_lock(&ldev->lock);
1224 if (ldev->mode_changes_in_progress) {
1225 mutex_unlock(&ldev->lock);
1228 mlx5_ldev_get(ldev);
1229 mlx5_ldev_add_mdev(ldev, dev);
1230 mutex_unlock(&ldev->lock);
1235 void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
1237 struct mlx5_lag *ldev;
1239 ldev = mlx5_lag_dev(dev);
1243 /* mdev is being removed, might as well remove debugfs
1244 * as early as possible.
1246 mlx5_ldev_remove_debugfs(dev->priv.dbg.lag_debugfs);
1248 mutex_lock(&ldev->lock);
1249 if (ldev->mode_changes_in_progress) {
1250 mutex_unlock(&ldev->lock);
1254 mlx5_ldev_remove_mdev(ldev, dev);
1255 mutex_unlock(&ldev->lock);
1256 mlx5_ldev_put(ldev);
1259 void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
1263 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1264 !MLX5_CAP_GEN(dev, lag_master) ||
1265 (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS ||
1266 MLX5_CAP_GEN(dev, num_lag_ports) <= 1))
1270 mlx5_dev_list_lock();
1271 err = __mlx5_lag_dev_add_mdev(dev);
1272 mlx5_dev_list_unlock();
1278 mlx5_ldev_add_debugfs(dev);
1281 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
1282 struct net_device *netdev)
1284 struct mlx5_lag *ldev;
1287 ldev = mlx5_lag_dev(dev);
1291 mutex_lock(&ldev->lock);
1292 mlx5_ldev_remove_netdev(ldev, netdev);
1293 clear_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
1295 lag_is_active = __mlx5_lag_is_active(ldev);
1296 mutex_unlock(&ldev->lock);
1299 mlx5_queue_bond_work(ldev, 0);
1302 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
1303 struct net_device *netdev)
1305 struct mlx5_lag *ldev;
1308 ldev = mlx5_lag_dev(dev);
1312 mutex_lock(&ldev->lock);
1313 mlx5_ldev_add_netdev(ldev, dev, netdev);
1315 for (i = 0; i < ldev->ports; i++)
1316 if (!ldev->pf[i].netdev)
1319 if (i >= ldev->ports)
1320 set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
1321 mutex_unlock(&ldev->lock);
1322 mlx5_queue_bond_work(ldev, 0);
1325 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
1327 struct mlx5_lag *ldev;
1328 unsigned long flags;
1331 spin_lock_irqsave(&lag_lock, flags);
1332 ldev = mlx5_lag_dev(dev);
1333 res = ldev && __mlx5_lag_is_roce(ldev);
1334 spin_unlock_irqrestore(&lag_lock, flags);
1338 EXPORT_SYMBOL(mlx5_lag_is_roce);
1340 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
1342 struct mlx5_lag *ldev;
1343 unsigned long flags;
1346 spin_lock_irqsave(&lag_lock, flags);
1347 ldev = mlx5_lag_dev(dev);
1348 res = ldev && __mlx5_lag_is_active(ldev);
1349 spin_unlock_irqrestore(&lag_lock, flags);
1353 EXPORT_SYMBOL(mlx5_lag_is_active);
1355 bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev)
1357 struct mlx5_lag *ldev;
1358 unsigned long flags;
1361 spin_lock_irqsave(&lag_lock, flags);
1362 ldev = mlx5_lag_dev(dev);
1364 res = test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags);
1365 spin_unlock_irqrestore(&lag_lock, flags);
1369 EXPORT_SYMBOL(mlx5_lag_mode_is_hash);
1371 bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
1373 struct mlx5_lag *ldev;
1374 unsigned long flags;
1377 spin_lock_irqsave(&lag_lock, flags);
1378 ldev = mlx5_lag_dev(dev);
1379 res = ldev && __mlx5_lag_is_active(ldev) &&
1380 dev == ldev->pf[MLX5_LAG_P1].dev;
1381 spin_unlock_irqrestore(&lag_lock, flags);
1385 EXPORT_SYMBOL(mlx5_lag_is_master);
1387 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
1389 struct mlx5_lag *ldev;
1390 unsigned long flags;
1393 spin_lock_irqsave(&lag_lock, flags);
1394 ldev = mlx5_lag_dev(dev);
1395 res = ldev && __mlx5_lag_is_sriov(ldev);
1396 spin_unlock_irqrestore(&lag_lock, flags);
1400 EXPORT_SYMBOL(mlx5_lag_is_sriov);
1402 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
1404 struct mlx5_lag *ldev;
1405 unsigned long flags;
1408 spin_lock_irqsave(&lag_lock, flags);
1409 ldev = mlx5_lag_dev(dev);
1410 res = ldev && test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
1411 spin_unlock_irqrestore(&lag_lock, flags);
1415 EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
1417 void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
1419 struct mlx5_lag *ldev;
1421 ldev = mlx5_lag_dev(dev);
1425 mlx5_dev_list_lock();
1426 mutex_lock(&ldev->lock);
1428 ldev->mode_changes_in_progress++;
1429 if (__mlx5_lag_is_active(ldev))
1430 mlx5_disable_lag(ldev);
1432 mutex_unlock(&ldev->lock);
1433 mlx5_dev_list_unlock();
1436 void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
1438 struct mlx5_lag *ldev;
1440 ldev = mlx5_lag_dev(dev);
1444 mutex_lock(&ldev->lock);
1445 ldev->mode_changes_in_progress--;
1446 mutex_unlock(&ldev->lock);
1447 mlx5_queue_bond_work(ldev, 0);
1450 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
1452 struct net_device *ndev = NULL;
1453 struct mlx5_lag *ldev;
1454 unsigned long flags;
1457 spin_lock_irqsave(&lag_lock, flags);
1458 ldev = mlx5_lag_dev(dev);
1460 if (!(ldev && __mlx5_lag_is_roce(ldev)))
1463 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1464 for (i = 0; i < ldev->ports; i++)
1465 if (ldev->tracker.netdev_state[i].tx_enabled)
1466 ndev = ldev->pf[i].netdev;
1468 ndev = ldev->pf[ldev->ports - 1].netdev;
1470 ndev = ldev->pf[MLX5_LAG_P1].netdev;
1476 spin_unlock_irqrestore(&lag_lock, flags);
1480 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
1482 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1483 struct net_device *slave)
1485 struct mlx5_lag *ldev;
1486 unsigned long flags;
1490 spin_lock_irqsave(&lag_lock, flags);
1491 ldev = mlx5_lag_dev(dev);
1492 if (!(ldev && __mlx5_lag_is_roce(ldev)))
1495 for (i = 0; i < ldev->ports; i++) {
1496 if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
1502 port = ldev->v2p_map[port * ldev->buckets];
1505 spin_unlock_irqrestore(&lag_lock, flags);
1508 EXPORT_SYMBOL(mlx5_lag_get_slave_port);
1510 u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
1512 struct mlx5_lag *ldev;
1514 ldev = mlx5_lag_dev(dev);
1520 EXPORT_SYMBOL(mlx5_lag_get_num_ports);
1522 struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i)
1524 struct mlx5_core_dev *peer_dev = NULL;
1525 struct mlx5_lag *ldev;
1526 unsigned long flags;
1529 spin_lock_irqsave(&lag_lock, flags);
1530 ldev = mlx5_lag_dev(dev);
1534 if (*i == ldev->ports)
1536 for (idx = *i; idx < ldev->ports; idx++)
1537 if (ldev->pf[idx].dev != dev)
1540 if (idx == ldev->ports) {
1546 peer_dev = ldev->pf[idx].dev;
1549 spin_unlock_irqrestore(&lag_lock, flags);
1552 EXPORT_SYMBOL(mlx5_lag_get_next_peer_mdev);
1554 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1559 int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
1560 struct mlx5_core_dev **mdev;
1561 struct mlx5_lag *ldev;
1562 unsigned long flags;
1567 out = kvzalloc(outlen, GFP_KERNEL);
1571 mdev = kvzalloc(sizeof(mdev[0]) * MLX5_MAX_PORTS, GFP_KERNEL);
1577 memset(values, 0, sizeof(*values) * num_counters);
1579 spin_lock_irqsave(&lag_lock, flags);
1580 ldev = mlx5_lag_dev(dev);
1581 if (ldev && __mlx5_lag_is_active(ldev)) {
1582 num_ports = ldev->ports;
1583 for (i = 0; i < ldev->ports; i++)
1584 mdev[i] = ldev->pf[i].dev;
1587 mdev[MLX5_LAG_P1] = dev;
1589 spin_unlock_irqrestore(&lag_lock, flags);
1591 for (i = 0; i < num_ports; ++i) {
1592 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
1594 MLX5_SET(query_cong_statistics_in, in, opcode,
1595 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
1596 ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
1601 for (j = 0; j < num_counters; ++j)
1602 values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
1611 EXPORT_SYMBOL(mlx5_lag_query_cong_counters);