1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies */
11 #include "sf/dev/dev.h"
14 static int mlx5_devlink_flash_update(struct devlink *devlink,
15 struct devlink_flash_update_params *params,
16 struct netlink_ext_ack *extack)
18 struct mlx5_core_dev *dev = devlink_priv(devlink);
20 return mlx5_firmware_flash(dev, params->fw, extack);
23 static u8 mlx5_fw_ver_major(u32 version)
25 return (version >> 24) & 0xff;
28 static u8 mlx5_fw_ver_minor(u32 version)
30 return (version >> 16) & 0xff;
33 static u16 mlx5_fw_ver_subminor(u32 version)
35 return version & 0xffff;
38 #define DEVLINK_FW_STRING_LEN 32
41 mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
42 struct netlink_ext_ack *extack)
44 struct mlx5_core_dev *dev = devlink_priv(devlink);
45 char version_str[DEVLINK_FW_STRING_LEN];
46 u32 running_fw, stored_fw;
49 err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
53 err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
57 err = mlx5_fw_version_query(dev, &running_fw, &stored_fw);
61 snprintf(version_str, sizeof(version_str), "%d.%d.%04d",
62 mlx5_fw_ver_major(running_fw), mlx5_fw_ver_minor(running_fw),
63 mlx5_fw_ver_subminor(running_fw));
64 err = devlink_info_version_running_put(req, "fw.version", version_str);
67 err = devlink_info_version_running_put(req,
68 DEVLINK_INFO_VERSION_GENERIC_FW,
73 /* no pending version, return running (stored) version */
75 stored_fw = running_fw;
77 snprintf(version_str, sizeof(version_str), "%d.%d.%04d",
78 mlx5_fw_ver_major(stored_fw), mlx5_fw_ver_minor(stored_fw),
79 mlx5_fw_ver_subminor(stored_fw));
80 err = devlink_info_version_stored_put(req, "fw.version", version_str);
83 return devlink_info_version_stored_put(req,
84 DEVLINK_INFO_VERSION_GENERIC_FW,
88 static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netlink_ext_ack *extack)
90 struct mlx5_core_dev *dev = devlink_priv(devlink);
91 u8 reset_level, reset_type, net_port_alive;
94 err = mlx5_fw_reset_query(dev, &reset_level, &reset_type);
97 if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL3)) {
98 NL_SET_ERR_MSG_MOD(extack, "FW activate requires reboot");
102 net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
103 err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
107 err = mlx5_fw_reset_wait_reset_done(dev);
110 NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
114 static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
115 struct netlink_ext_ack *extack)
117 struct mlx5_core_dev *dev = devlink_priv(devlink);
121 err = mlx5_fw_reset_query(dev, &reset_level, NULL);
124 if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL0)) {
125 NL_SET_ERR_MSG_MOD(extack,
126 "FW upgrade to the stored FW can't be done by FW live patching");
130 return mlx5_fw_reset_set_live_patch(dev);
133 static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
134 enum devlink_reload_action action,
135 enum devlink_reload_limit limit,
136 struct netlink_ext_ack *extack)
138 struct mlx5_core_dev *dev = devlink_priv(devlink);
139 bool sf_dev_allocated;
141 sf_dev_allocated = mlx5_sf_dev_allocated(dev);
142 if (sf_dev_allocated) {
143 /* Reload results in deleting SF device which further results in
144 * unregistering devlink instance while holding devlink_mutext.
145 * Hence, do not support reload.
147 NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated");
151 if (mlx5_lag_is_active(dev)) {
152 NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
157 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
158 mlx5_unload_one(dev);
160 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
161 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
162 return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
163 return mlx5_devlink_reload_fw_activate(devlink, extack);
165 /* Unsupported action should not get to this function */
171 static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
172 enum devlink_reload_limit limit, u32 *actions_performed,
173 struct netlink_ext_ack *extack)
175 struct mlx5_core_dev *dev = devlink_priv(devlink);
177 *actions_performed = BIT(action);
179 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
180 return mlx5_load_one(dev);
181 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
182 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
184 /* On fw_activate action, also driver is reloaded and reinit performed */
185 *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
186 return mlx5_load_one(dev);
188 /* Unsupported action should not get to this function */
196 static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
198 struct mlx5_devlink_trap *dl_trap;
200 list_for_each_entry(dl_trap, &dev->priv.traps, list)
201 if (dl_trap->trap.id == trap_id)
207 static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap,
210 struct mlx5_core_dev *dev = devlink_priv(devlink);
211 struct mlx5_devlink_trap *dl_trap;
213 dl_trap = kzalloc(sizeof(*dl_trap), GFP_KERNEL);
217 dl_trap->trap.id = trap->id;
218 dl_trap->trap.action = DEVLINK_TRAP_ACTION_DROP;
219 dl_trap->item = trap_ctx;
221 if (mlx5_find_trap_by_id(dev, trap->id)) {
223 mlx5_core_err(dev, "Devlink trap: Trap 0x%x already found", trap->id);
227 list_add_tail(&dl_trap->list, &dev->priv.traps);
231 static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap,
234 struct mlx5_core_dev *dev = devlink_priv(devlink);
235 struct mlx5_devlink_trap *dl_trap;
237 dl_trap = mlx5_find_trap_by_id(dev, trap->id);
239 mlx5_core_err(dev, "Devlink trap: Missing trap id 0x%x", trap->id);
242 list_del(&dl_trap->list);
246 static int mlx5_devlink_trap_action_set(struct devlink *devlink,
247 const struct devlink_trap *trap,
248 enum devlink_trap_action action,
249 struct netlink_ext_ack *extack)
251 struct mlx5_core_dev *dev = devlink_priv(devlink);
252 enum devlink_trap_action action_orig;
253 struct mlx5_devlink_trap *dl_trap;
256 if (is_mdev_switchdev_mode(dev)) {
257 NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
261 dl_trap = mlx5_find_trap_by_id(dev, trap->id);
263 mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
268 if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
273 if (action == dl_trap->trap.action)
276 action_orig = dl_trap->trap.action;
277 dl_trap->trap.action = action;
278 err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
281 dl_trap->trap.action = action_orig;
286 static const struct devlink_ops mlx5_devlink_ops = {
287 #ifdef CONFIG_MLX5_ESWITCH
288 .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
289 .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
290 .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
291 .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
292 .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
293 .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
294 .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get,
295 .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set,
296 .rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set,
297 .rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set,
298 .rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set,
299 .rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
300 .rate_node_new = mlx5_esw_devlink_rate_node_new,
301 .rate_node_del = mlx5_esw_devlink_rate_node_del,
302 .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
304 #ifdef CONFIG_MLX5_SF_MANAGER
305 .port_new = mlx5_devlink_sf_port_new,
306 .port_del = mlx5_devlink_sf_port_del,
307 .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
308 .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
310 .flash_update = mlx5_devlink_flash_update,
311 .info_get = mlx5_devlink_info_get,
312 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
313 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
314 .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
315 .reload_down = mlx5_devlink_reload_down,
316 .reload_up = mlx5_devlink_reload_up,
317 .trap_init = mlx5_devlink_trap_init,
318 .trap_fini = mlx5_devlink_trap_fini,
319 .trap_action_set = mlx5_devlink_trap_action_set,
322 void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
323 struct devlink_port *dl_port)
325 struct devlink *devlink = priv_to_devlink(dev);
326 struct mlx5_devlink_trap *dl_trap;
328 dl_trap = mlx5_find_trap_by_id(dev, trap_id);
330 mlx5_core_err(dev, "Devlink trap: Report on invalid trap id 0x%x", trap_id);
334 if (dl_trap->trap.action != DEVLINK_TRAP_ACTION_TRAP) {
335 mlx5_core_dbg(dev, "Devlink trap: Trap id %d has action %d", trap_id,
336 dl_trap->trap.action);
339 devlink_trap_report(devlink, skb, dl_trap->item, dl_port, NULL);
342 int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev)
344 struct mlx5_devlink_trap *dl_trap;
347 list_for_each_entry(dl_trap, &dev->priv.traps, list)
348 if (dl_trap->trap.action == DEVLINK_TRAP_ACTION_TRAP)
354 int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
355 enum devlink_trap_action *action)
357 struct mlx5_devlink_trap *dl_trap;
359 dl_trap = mlx5_find_trap_by_id(dev, trap_id);
361 mlx5_core_err(dev, "Devlink trap: Get action on invalid trap id 0x%x",
366 *action = dl_trap->trap.action;
370 struct devlink *mlx5_devlink_alloc(struct device *dev)
372 return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev),
376 void mlx5_devlink_free(struct devlink *devlink)
378 devlink_free(devlink);
381 static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id,
382 union devlink_param_value val,
383 struct netlink_ext_ack *extack)
385 struct mlx5_core_dev *dev = devlink_priv(devlink);
386 char *value = val.vstr;
389 if (!strcmp(value, "dmfs")) {
391 } else if (!strcmp(value, "smfs")) {
395 eswitch_mode = mlx5_eswitch_mode(dev);
396 smfs_cap = mlx5_fs_dr_is_supported(dev);
400 NL_SET_ERR_MSG_MOD(extack,
401 "Software managed steering is not supported by current device");
404 else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
405 NL_SET_ERR_MSG_MOD(extack,
406 "Software managed steering is not supported when eswitch offloads enabled.");
410 NL_SET_ERR_MSG_MOD(extack,
411 "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
418 static int mlx5_devlink_fs_mode_set(struct devlink *devlink, u32 id,
419 struct devlink_param_gset_ctx *ctx)
421 struct mlx5_core_dev *dev = devlink_priv(devlink);
422 enum mlx5_flow_steering_mode mode;
424 if (!strcmp(ctx->val.vstr, "smfs"))
425 mode = MLX5_FLOW_STEERING_MODE_SMFS;
427 mode = MLX5_FLOW_STEERING_MODE_DMFS;
428 dev->priv.steering->mode = mode;
433 static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
434 struct devlink_param_gset_ctx *ctx)
436 struct mlx5_core_dev *dev = devlink_priv(devlink);
438 if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
439 strcpy(ctx->val.vstr, "smfs");
441 strcpy(ctx->val.vstr, "dmfs");
445 static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
446 union devlink_param_value val,
447 struct netlink_ext_ack *extack)
449 struct mlx5_core_dev *dev = devlink_priv(devlink);
450 bool new_state = val.vbool;
452 if (new_state && !MLX5_CAP_GEN(dev, roce)) {
453 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
456 if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) {
457 NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE");
464 #ifdef CONFIG_MLX5_ESWITCH
465 static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id,
466 union devlink_param_value val,
467 struct netlink_ext_ack *extack)
469 int group_num = val.vu32;
471 if (group_num < 1 || group_num > 1024) {
472 NL_SET_ERR_MSG_MOD(extack,
473 "Unsupported group number, supported range is 1-1024");
480 static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id,
481 struct devlink_param_gset_ctx *ctx)
483 struct mlx5_core_dev *dev = devlink_priv(devlink);
485 if (!MLX5_ESWITCH_MANAGER(dev))
488 return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool);
491 static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id,
492 struct devlink_param_gset_ctx *ctx)
494 struct mlx5_core_dev *dev = devlink_priv(devlink);
496 if (!MLX5_ESWITCH_MANAGER(dev))
499 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
503 static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id,
504 union devlink_param_value val,
505 struct netlink_ext_ack *extack)
507 struct mlx5_core_dev *dev = devlink_priv(devlink);
510 if (!MLX5_ESWITCH_MANAGER(dev)) {
511 NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
514 esw_mode = mlx5_eswitch_mode(dev);
515 if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
516 NL_SET_ERR_MSG_MOD(extack,
517 "E-Switch must either disabled or non switchdev mode");
525 static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
526 struct devlink_param_gset_ctx *ctx)
528 struct mlx5_core_dev *dev = devlink_priv(devlink);
530 mlx5_fw_reset_enable_remote_dev_reset_set(dev, ctx->val.vbool);
534 static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
535 struct devlink_param_gset_ctx *ctx)
537 struct mlx5_core_dev *dev = devlink_priv(devlink);
539 ctx->val.vbool = mlx5_fw_reset_enable_remote_dev_reset_get(dev);
543 static const struct devlink_param mlx5_devlink_params[] = {
544 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
545 "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
546 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
547 mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
548 mlx5_devlink_fs_mode_validate),
549 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
550 NULL, NULL, mlx5_devlink_enable_roce_validate),
551 #ifdef CONFIG_MLX5_ESWITCH
552 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
553 "fdb_large_groups", DEVLINK_PARAM_TYPE_U32,
554 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
556 mlx5_devlink_large_group_num_validate),
557 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
558 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
559 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
560 mlx5_devlink_esw_port_metadata_get,
561 mlx5_devlink_esw_port_metadata_set,
562 mlx5_devlink_esw_port_metadata_validate),
564 DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
565 mlx5_devlink_enable_remote_dev_reset_get,
566 mlx5_devlink_enable_remote_dev_reset_set, NULL),
569 static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
571 struct mlx5_core_dev *dev = devlink_priv(devlink);
572 union devlink_param_value value;
574 if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS)
575 strcpy(value.vstr, "dmfs");
577 strcpy(value.vstr, "smfs");
578 devlink_param_driverinit_value_set(devlink,
579 MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
582 value.vbool = MLX5_CAP_GEN(dev, roce);
583 devlink_param_driverinit_value_set(devlink,
584 DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
587 #ifdef CONFIG_MLX5_ESWITCH
588 value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
589 devlink_param_driverinit_value_set(devlink,
590 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
593 if (MLX5_ESWITCH_MANAGER(dev)) {
594 if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) {
595 dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
600 devlink_param_driverinit_value_set(devlink,
601 MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
607 static const struct devlink_param enable_eth_param =
608 DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
611 static int mlx5_devlink_eth_param_register(struct devlink *devlink)
613 struct mlx5_core_dev *dev = devlink_priv(devlink);
614 union devlink_param_value value;
617 if (!mlx5_eth_supported(dev))
620 err = devlink_param_register(devlink, &enable_eth_param);
625 devlink_param_driverinit_value_set(devlink,
626 DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
628 devlink_param_publish(devlink, &enable_eth_param);
632 static void mlx5_devlink_eth_param_unregister(struct devlink *devlink)
634 struct mlx5_core_dev *dev = devlink_priv(devlink);
636 if (!mlx5_eth_supported(dev))
639 devlink_param_unpublish(devlink, &enable_eth_param);
640 devlink_param_unregister(devlink, &enable_eth_param);
643 static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
644 union devlink_param_value val,
645 struct netlink_ext_ack *extack)
647 struct mlx5_core_dev *dev = devlink_priv(devlink);
648 bool new_state = val.vbool;
650 if (new_state && !mlx5_rdma_supported(dev))
655 static const struct devlink_param enable_rdma_param =
656 DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
657 NULL, NULL, mlx5_devlink_enable_rdma_validate);
659 static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
661 struct mlx5_core_dev *dev = devlink_priv(devlink);
662 union devlink_param_value value;
665 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
668 err = devlink_param_register(devlink, &enable_rdma_param);
673 devlink_param_driverinit_value_set(devlink,
674 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
676 devlink_param_publish(devlink, &enable_rdma_param);
680 static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
682 struct mlx5_core_dev *dev = devlink_priv(devlink);
684 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
687 devlink_param_unpublish(devlink, &enable_rdma_param);
688 devlink_param_unregister(devlink, &enable_rdma_param);
691 static const struct devlink_param enable_vnet_param =
692 DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
695 static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
697 struct mlx5_core_dev *dev = devlink_priv(devlink);
698 union devlink_param_value value;
701 if (!mlx5_vnet_supported(dev))
704 err = devlink_param_register(devlink, &enable_vnet_param);
709 devlink_param_driverinit_value_set(devlink,
710 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
712 devlink_param_publish(devlink, &enable_rdma_param);
716 static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink)
718 struct mlx5_core_dev *dev = devlink_priv(devlink);
720 if (!mlx5_vnet_supported(dev))
723 devlink_param_unpublish(devlink, &enable_vnet_param);
724 devlink_param_unregister(devlink, &enable_vnet_param);
727 static int mlx5_devlink_auxdev_params_register(struct devlink *devlink)
731 err = mlx5_devlink_eth_param_register(devlink);
735 err = mlx5_devlink_rdma_param_register(devlink);
739 err = mlx5_devlink_vnet_param_register(devlink);
745 mlx5_devlink_rdma_param_unregister(devlink);
747 mlx5_devlink_eth_param_unregister(devlink);
751 static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
753 mlx5_devlink_vnet_param_unregister(devlink);
754 mlx5_devlink_rdma_param_unregister(devlink);
755 mlx5_devlink_eth_param_unregister(devlink);
758 #define MLX5_TRAP_DROP(_id, _group_id) \
759 DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
760 DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
761 DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT)
763 static const struct devlink_trap mlx5_traps_arr[] = {
764 MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
765 MLX5_TRAP_DROP(DMAC_FILTER, L2_DROPS),
768 static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
769 DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
772 static int mlx5_devlink_traps_register(struct devlink *devlink)
774 struct mlx5_core_dev *core_dev = devlink_priv(devlink);
777 err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
778 ARRAY_SIZE(mlx5_trap_groups_arr));
782 err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
789 devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
790 ARRAY_SIZE(mlx5_trap_groups_arr));
794 static void mlx5_devlink_traps_unregister(struct devlink *devlink)
796 devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
797 devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
798 ARRAY_SIZE(mlx5_trap_groups_arr));
801 int mlx5_devlink_register(struct devlink *devlink)
805 err = devlink_register(devlink);
809 err = devlink_params_register(devlink, mlx5_devlink_params,
810 ARRAY_SIZE(mlx5_devlink_params));
813 mlx5_devlink_set_params_init_values(devlink);
814 devlink_params_publish(devlink);
816 err = mlx5_devlink_auxdev_params_register(devlink);
820 err = mlx5_devlink_traps_register(devlink);
827 mlx5_devlink_auxdev_params_unregister(devlink);
829 devlink_params_unregister(devlink, mlx5_devlink_params,
830 ARRAY_SIZE(mlx5_devlink_params));
832 devlink_unregister(devlink);
836 void mlx5_devlink_unregister(struct devlink *devlink)
838 mlx5_devlink_traps_unregister(devlink);
839 mlx5_devlink_auxdev_params_unregister(devlink);
840 devlink_params_unpublish(devlink);
841 devlink_params_unregister(devlink, mlx5_devlink_params,
842 ARRAY_SIZE(mlx5_devlink_params));
843 devlink_unregister(devlink);