kfree(dl_trap);
}
+static int mlx5_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ enum devlink_trap_action action_orig;
+ struct mlx5_devlink_trap *dl_trap;
+ int err = 0;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap->id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (action == dl_trap->trap.action)
+ goto out;
+
+ action_orig = dl_trap->trap.action;
+ dl_trap->trap.action = action;
+ err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
+ &dl_trap->trap);
+ if (err)
+ dl_trap->trap.action = action_orig;
+out:
+ return err;
+}
+
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
.reload_up = mlx5_devlink_reload_up,
.trap_init = mlx5_devlink_trap_init,
.trap_fini = mlx5_devlink_trap_fini,
+ .trap_action_set = mlx5_devlink_trap_action_set,
};
void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
struct mlx5_pme_stats pme_stats;
/*pcie_core*/
struct work_struct pcie_core_work;
+ /* driver notifier chain for sw events */
+ struct blocking_notifier_head sw_nh;
};
static const char *eqe_type_str(u8 type)
return -ENOMEM;
}
INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
+ BLOCKING_INIT_NOTIFIER_HEAD(&events->sw_nh);
return 0;
}
{
return atomic_notifier_call_chain(&events->fw_nh, event, data);
}
+
+/* This API is used only for processing and forwarding driver-specific
+ * events to mlx5 consumers.
+ */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_chain_register(&events->sw_nh, nb);
+}
+
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_chain_unregister(&events->sw_nh, nb);
+}
+
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_call_chain(&events->sw_nh, event, data);
+}
MLX5_EVENT_TYPE_MAX = 0x100,
};
+enum mlx5_driver_event {
+ MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+};
+
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
MAX_MR_CACHE_ENTRIES
};
+/* Async-atomic event notifier used by mlx5 core to forward FW
+ * evetns recived from event queue to mlx5 consumers.
+ * Optimise event queue dipatching.
+ */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+
+/* Async-atomic event notifier used for forwarding
+ * evetns from the event queue into the to mlx5 events dispatcher,
+ * eswitch, clock and others.
+ */
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+/* Blocking event notifier used to forward SW events, used for slow path */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data);
+
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);