1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
11 #include "en_accel/macsec.h"
12 #include "en_accel/macsec_fs.h"
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
17 enum mlx5_macsec_aso_event_arm {
18 MLX5E_ASO_EPN_ARM = BIT(0),
22 MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
25 struct mlx5e_macsec_handle {
26 struct mlx5e_macsec *macsec;
35 struct mlx5e_macsec_aso_out {
40 struct mlx5e_macsec_aso_in {
45 struct mlx5e_macsec_epn_state {
51 struct mlx5e_macsec_async_work {
52 struct mlx5e_macsec *macsec;
53 struct mlx5_core_dev *mdev;
54 struct work_struct work;
58 struct mlx5e_macsec_sa {
68 struct rhash_head hash;
70 union mlx5e_macsec_rule *macsec_rule;
71 struct rcu_head rcu_head;
72 struct mlx5e_macsec_epn_state epn_state;
75 struct mlx5e_macsec_rx_sc;
76 struct mlx5e_macsec_rx_sc_xarray_element {
78 struct mlx5e_macsec_rx_sc *rx_sc;
81 struct mlx5e_macsec_rx_sc {
84 struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
85 struct list_head rx_sc_list_element;
86 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
87 struct metadata_dst *md_dst;
88 struct rcu_head rcu_head;
91 struct mlx5e_macsec_umr {
93 u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
97 struct mlx5e_macsec_aso {
99 struct mlx5_aso *maso;
100 /* Protects macsec ASO */
101 struct mutex aso_lock;
103 struct mlx5e_macsec_umr *umr;
108 static const struct rhashtable_params rhash_sci = {
109 .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
110 .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
111 .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
112 .automatic_shrinking = true,
116 struct mlx5e_macsec_device {
117 const struct net_device *netdev;
118 struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
119 struct list_head macsec_rx_sc_list_head;
120 unsigned char *dev_addr;
121 struct list_head macsec_device_list_element;
124 struct mlx5e_macsec {
125 struct list_head macsec_device_list_head;
127 struct mlx5e_macsec_fs *macsec_fs;
128 struct mutex lock; /* Protects mlx5e_macsec internal contexts */
130 /* Tx sci -> fs id mapping handling */
131 struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
133 /* Rx fs_id -> rx_sc mapping */
134 struct xarray sc_xarray;
136 struct mlx5_core_dev *mdev;
139 struct mlx5e_macsec_stats stats;
142 struct mlx5e_macsec_aso aso;
144 struct notifier_block nb;
145 struct workqueue_struct *wq;
148 struct mlx5_macsec_obj_attrs {
154 struct mlx5e_macsec_epn_state epn_state;
161 struct mlx5_aso_ctrl_param {
163 u8 condition_0_operand;
164 u8 condition_1_operand;
165 u8 condition_0_offset;
166 u8 condition_1_offset;
168 u8 condition_operand;
169 u32 condition_0_data;
170 u32 condition_0_mask;
171 u32 condition_1_data;
172 u32 condition_1_mask;
177 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
179 struct mlx5e_macsec_umr *umr;
180 struct device *dma_device;
184 umr = kzalloc(sizeof(*umr), GFP_KERNEL);
190 dma_device = &mdev->pdev->dev;
191 dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
192 err = dma_mapping_error(dma_device, dma_addr);
194 mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
198 err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
200 mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
204 umr->dma_addr = dma_addr;
211 dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
217 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
219 struct mlx5e_macsec_umr *umr = aso->umr;
221 mlx5_core_destroy_mkey(mdev, umr->mkey);
222 dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
226 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
230 if (!attrs->replay_protect)
233 switch (attrs->replay_window) {
235 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
238 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
241 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
244 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
249 MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
250 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
255 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
256 struct mlx5_macsec_obj_attrs *attrs,
260 u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
261 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
266 obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
267 aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
269 MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
270 MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
271 MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
272 MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
273 MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
276 if (attrs->epn_state.epn_enabled) {
280 MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
281 MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
282 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
283 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
284 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
285 salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
286 for (i = 0; i < 3 ; i++)
287 memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
289 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
292 MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
294 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
296 err = macsec_set_replay_protection(attrs, aso_ctx);
301 /* general object fields set */
302 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
303 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
305 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
308 "MACsec offload: Failed to create MACsec object (err = %d)\n",
313 *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
318 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
320 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
321 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
323 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
324 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
325 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
327 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
330 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
331 struct mlx5e_macsec_sa *sa,
334 int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
335 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
337 if ((is_tx) && sa->fs_id) {
338 /* Make sure ongoing datapath readers sees a valid SA */
339 rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
343 if (!sa->macsec_rule)
346 mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
347 mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
348 sa->macsec_rule = NULL;
351 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
352 struct mlx5e_macsec_sa *sa,
356 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
357 struct mlx5e_macsec *macsec = priv->macsec;
358 struct mlx5_macsec_rule_attrs rule_attrs;
359 struct mlx5_core_dev *mdev = priv->mdev;
360 struct mlx5_macsec_obj_attrs obj_attrs;
361 union mlx5e_macsec_rule *macsec_rule;
362 struct macsec_key *key;
365 obj_attrs.next_pn = sa->next_pn;
366 obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
367 obj_attrs.enc_key_id = sa->enc_key_id;
368 obj_attrs.encrypt = encrypt;
369 obj_attrs.aso_pdn = macsec->aso.pdn;
370 obj_attrs.epn_state = sa->epn_state;
372 key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
374 if (sa->epn_state.epn_enabled) {
375 obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
376 cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
378 memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
381 obj_attrs.replay_window = ctx->secy->replay_window;
382 obj_attrs.replay_protect = ctx->secy->replay_protect;
384 err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
388 rule_attrs.macsec_obj_id = sa->macsec_obj_id;
389 rule_attrs.sci = sa->sci;
390 rule_attrs.assoc_num = sa->assoc_num;
391 rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
392 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
394 macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
397 goto destroy_macsec_object;
400 sa->macsec_rule = macsec_rule;
403 err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
405 goto destroy_macsec_object_and_rule;
410 destroy_macsec_object_and_rule:
411 mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
412 destroy_macsec_object:
413 mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
418 static struct mlx5e_macsec_rx_sc *
419 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
421 struct mlx5e_macsec_rx_sc *iter;
423 list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
424 if (iter->sci == sci)
431 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
432 struct mlx5e_macsec_sa *rx_sa,
435 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
436 struct mlx5e_macsec *macsec = priv->macsec;
439 if (rx_sa->active == active)
442 rx_sa->active = active;
444 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
448 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
450 rx_sa->active = false;
455 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
457 const struct net_device *netdev = ctx->netdev;
458 const struct macsec_secy *secy = ctx->secy;
460 if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
462 "MACsec offload is supported only when validate_frame is in strict mode\n");
466 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
467 netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
468 MACSEC_DEFAULT_ICV_LEN);
472 if (!secy->protect_frames) {
474 "MACsec offload is supported only when protect_frames is set\n");
478 if (!ctx->secy->tx_sc.encrypt) {
479 netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
486 static struct mlx5e_macsec_device *
487 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
488 const struct macsec_context *ctx)
490 struct mlx5e_macsec_device *iter;
491 const struct list_head *list;
493 list = &macsec->macsec_device_list_head;
494 list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
495 if (iter->netdev == ctx->secy->netdev)
502 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
503 const pn_t *next_pn_halves, ssci_t ssci)
505 struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
508 sa->salt = key->salt;
509 epn_state->epn_enabled = 1;
510 epn_state->epn_msb = next_pn_halves->upper;
511 epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
514 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
516 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
517 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
518 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
519 const struct macsec_secy *secy = ctx->secy;
520 struct mlx5e_macsec_device *macsec_device;
521 struct mlx5_core_dev *mdev = priv->mdev;
522 u8 assoc_num = ctx->sa.assoc_num;
523 struct mlx5e_macsec_sa *tx_sa;
524 struct mlx5e_macsec *macsec;
527 mutex_lock(&priv->macsec->lock);
529 macsec = priv->macsec;
530 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
531 if (!macsec_device) {
532 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
537 if (macsec_device->tx_sa[assoc_num]) {
538 netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
543 tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
549 tx_sa->active = ctx_tx_sa->active;
550 tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
551 tx_sa->sci = secy->sci;
552 tx_sa->assoc_num = assoc_num;
555 update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
558 err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
559 MLX5_ACCEL_OBJ_MACSEC_KEY,
564 macsec_device->tx_sa[assoc_num] = tx_sa;
565 if (!secy->operational ||
566 assoc_num != tx_sc->encoding_sa ||
570 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
572 goto destroy_encryption_key;
574 mutex_unlock(&macsec->lock);
578 destroy_encryption_key:
579 macsec_device->tx_sa[assoc_num] = NULL;
580 mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
584 mutex_unlock(&macsec->lock);
589 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
591 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
592 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
593 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
594 struct mlx5e_macsec_device *macsec_device;
595 u8 assoc_num = ctx->sa.assoc_num;
596 struct mlx5e_macsec_sa *tx_sa;
597 struct mlx5e_macsec *macsec;
598 struct net_device *netdev;
601 mutex_lock(&priv->macsec->lock);
603 macsec = priv->macsec;
604 netdev = ctx->netdev;
605 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
606 if (!macsec_device) {
607 netdev_err(netdev, "MACsec offload: Failed to find device context\n");
612 tx_sa = macsec_device->tx_sa[assoc_num];
614 netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
619 if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
620 netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
626 if (tx_sa->active == ctx_tx_sa->active)
629 tx_sa->active = ctx_tx_sa->active;
630 if (tx_sa->assoc_num != tx_sc->encoding_sa)
633 if (ctx_tx_sa->active) {
634 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
638 if (!tx_sa->macsec_rule) {
643 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
646 mutex_unlock(&macsec->lock);
651 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
653 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
654 struct mlx5e_macsec_device *macsec_device;
655 u8 assoc_num = ctx->sa.assoc_num;
656 struct mlx5e_macsec_sa *tx_sa;
657 struct mlx5e_macsec *macsec;
660 mutex_lock(&priv->macsec->lock);
661 macsec = priv->macsec;
662 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
663 if (!macsec_device) {
664 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
669 tx_sa = macsec_device->tx_sa[assoc_num];
671 netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
676 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
677 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
679 macsec_device->tx_sa[assoc_num] = NULL;
682 mutex_unlock(&macsec->lock);
687 static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
689 struct mlx5e_macsec_sa *macsec_sa;
693 macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
695 fs_id = macsec_sa->fs_id;
701 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
703 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
704 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
705 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
706 struct mlx5e_macsec_device *macsec_device;
707 struct mlx5e_macsec_rx_sc *rx_sc;
708 struct list_head *rx_sc_list;
709 struct mlx5e_macsec *macsec;
712 mutex_lock(&priv->macsec->lock);
713 macsec = priv->macsec;
714 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
715 if (!macsec_device) {
716 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
721 rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
722 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
724 netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
730 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
736 sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
737 if (!sc_xarray_element) {
742 sc_xarray_element->rx_sc = rx_sc;
743 err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
744 XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
747 netdev_err(ctx->netdev,
748 "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
749 MLX5_MACEC_RX_FS_ID_MAX);
750 goto destroy_sc_xarray_elemenet;
753 rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
754 if (!rx_sc->md_dst) {
759 rx_sc->sci = ctx_rx_sc->sci;
760 rx_sc->active = ctx_rx_sc->active;
761 list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
763 rx_sc->sc_xarray_element = sc_xarray_element;
764 rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
765 mutex_unlock(&macsec->lock);
770 xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
771 destroy_sc_xarray_elemenet:
772 kfree(sc_xarray_element);
777 mutex_unlock(&macsec->lock);
782 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
784 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
785 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
786 struct mlx5e_macsec_device *macsec_device;
787 struct mlx5e_macsec_rx_sc *rx_sc;
788 struct mlx5e_macsec_sa *rx_sa;
789 struct mlx5e_macsec *macsec;
790 struct list_head *list;
794 mutex_lock(&priv->macsec->lock);
796 macsec = priv->macsec;
797 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
798 if (!macsec_device) {
799 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
804 list = &macsec_device->macsec_rx_sc_list_head;
805 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
811 if (rx_sc->active == ctx_rx_sc->active)
814 rx_sc->active = ctx_rx_sc->active;
815 for (i = 0; i < MACSEC_NUM_AN; ++i) {
816 rx_sa = rx_sc->rx_sa[i];
820 err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
826 mutex_unlock(&macsec->lock);
831 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
833 struct mlx5e_macsec_sa *rx_sa;
836 for (i = 0; i < MACSEC_NUM_AN; ++i) {
837 rx_sa = rx_sc->rx_sa[i];
841 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
842 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
845 rx_sc->rx_sa[i] = NULL;
848 /* At this point the relevant MACsec offload Rx rule already removed at
849 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
850 * Rx related data propagating using xa_erase which uses rcu to sync,
851 * once fs_id is erased then this rx_sc is hidden from datapath.
853 list_del_rcu(&rx_sc->rx_sc_list_element);
854 xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
855 metadata_dst_free(rx_sc->md_dst);
856 kfree(rx_sc->sc_xarray_element);
860 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
862 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
863 struct mlx5e_macsec_device *macsec_device;
864 struct mlx5e_macsec_rx_sc *rx_sc;
865 struct mlx5e_macsec *macsec;
866 struct list_head *list;
869 mutex_lock(&priv->macsec->lock);
871 macsec = priv->macsec;
872 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
873 if (!macsec_device) {
874 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
879 list = &macsec_device->macsec_rx_sc_list_head;
880 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
882 netdev_err(ctx->netdev,
883 "MACsec offload rx_sc sci %lld doesn't exist\n",
884 ctx->sa.rx_sa->sc->sci);
889 macsec_del_rxsc_ctx(macsec, rx_sc);
891 mutex_unlock(&macsec->lock);
896 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
898 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
899 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
900 struct mlx5e_macsec_device *macsec_device;
901 struct mlx5_core_dev *mdev = priv->mdev;
902 u8 assoc_num = ctx->sa.assoc_num;
903 struct mlx5e_macsec_rx_sc *rx_sc;
904 sci_t sci = ctx_rx_sa->sc->sci;
905 struct mlx5e_macsec_sa *rx_sa;
906 struct mlx5e_macsec *macsec;
907 struct list_head *list;
910 mutex_lock(&priv->macsec->lock);
912 macsec = priv->macsec;
913 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
914 if (!macsec_device) {
915 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
920 list = &macsec_device->macsec_rx_sc_list_head;
921 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
923 netdev_err(ctx->netdev,
924 "MACsec offload rx_sc sci %lld doesn't exist\n",
925 ctx->sa.rx_sa->sc->sci);
930 if (rx_sc->rx_sa[assoc_num]) {
931 netdev_err(ctx->netdev,
932 "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
938 rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
944 rx_sa->active = ctx_rx_sa->active;
945 rx_sa->next_pn = ctx_rx_sa->next_pn;
947 rx_sa->assoc_num = assoc_num;
948 rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
951 update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
954 err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
955 MLX5_ACCEL_OBJ_MACSEC_KEY,
960 rx_sc->rx_sa[assoc_num] = rx_sa;
964 //TODO - add support for both authentication and encryption flows
965 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
967 goto destroy_encryption_key;
971 destroy_encryption_key:
972 rx_sc->rx_sa[assoc_num] = NULL;
973 mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
977 mutex_unlock(&macsec->lock);
982 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
984 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
985 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
986 struct mlx5e_macsec_device *macsec_device;
987 u8 assoc_num = ctx->sa.assoc_num;
988 struct mlx5e_macsec_rx_sc *rx_sc;
989 sci_t sci = ctx_rx_sa->sc->sci;
990 struct mlx5e_macsec_sa *rx_sa;
991 struct mlx5e_macsec *macsec;
992 struct list_head *list;
995 mutex_lock(&priv->macsec->lock);
997 macsec = priv->macsec;
998 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
999 if (!macsec_device) {
1000 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1005 list = &macsec_device->macsec_rx_sc_list_head;
1006 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1008 netdev_err(ctx->netdev,
1009 "MACsec offload rx_sc sci %lld doesn't exist\n",
1010 ctx->sa.rx_sa->sc->sci);
1015 rx_sa = rx_sc->rx_sa[assoc_num];
1017 netdev_err(ctx->netdev,
1018 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1024 if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
1025 netdev_err(ctx->netdev,
1026 "MACsec offload update RX sa %d PN isn't supported\n",
1032 err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
1034 mutex_unlock(&macsec->lock);
1039 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1041 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1042 struct mlx5e_macsec_device *macsec_device;
1043 sci_t sci = ctx->sa.rx_sa->sc->sci;
1044 struct mlx5e_macsec_rx_sc *rx_sc;
1045 u8 assoc_num = ctx->sa.assoc_num;
1046 struct mlx5e_macsec_sa *rx_sa;
1047 struct mlx5e_macsec *macsec;
1048 struct list_head *list;
1051 mutex_lock(&priv->macsec->lock);
1053 macsec = priv->macsec;
1054 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1055 if (!macsec_device) {
1056 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1061 list = &macsec_device->macsec_rx_sc_list_head;
1062 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1064 netdev_err(ctx->netdev,
1065 "MACsec offload rx_sc sci %lld doesn't exist\n",
1066 ctx->sa.rx_sa->sc->sci);
1071 rx_sa = rx_sc->rx_sa[assoc_num];
1073 netdev_err(ctx->netdev,
1074 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1080 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1081 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1083 rx_sc->rx_sa[assoc_num] = NULL;
1086 mutex_unlock(&macsec->lock);
1091 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1093 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1094 const struct net_device *dev = ctx->secy->netdev;
1095 const struct net_device *netdev = ctx->netdev;
1096 struct mlx5e_macsec_device *macsec_device;
1097 struct mlx5e_macsec *macsec;
1100 if (!mlx5e_macsec_secy_features_validate(ctx))
1103 mutex_lock(&priv->macsec->lock);
1104 macsec = priv->macsec;
1105 if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1106 netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1110 if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1111 netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1112 MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1117 macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1118 if (!macsec_device) {
1123 macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1124 if (!macsec_device->dev_addr) {
1125 kfree(macsec_device);
1130 macsec_device->netdev = dev;
1132 INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1133 list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1135 ++macsec->num_of_devices;
1137 mutex_unlock(&macsec->lock);
1142 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1143 struct mlx5e_macsec_device *macsec_device)
1145 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1146 const struct net_device *dev = ctx->secy->netdev;
1147 struct mlx5e_macsec *macsec = priv->macsec;
1148 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1149 struct mlx5e_macsec_sa *rx_sa;
1150 struct list_head *list;
1154 list = &macsec_device->macsec_rx_sc_list_head;
1155 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1156 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1157 rx_sa = rx_sc->rx_sa[i];
1158 if (!rx_sa || !rx_sa->macsec_rule)
1161 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1165 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1166 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1167 rx_sa = rx_sc->rx_sa[i];
1171 if (rx_sa->active) {
1172 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
1179 memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1184 /* this function is called from 2 macsec ops functions:
1185 * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1186 * and create new Tx contexts(macsec object + steering).
1187 * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1188 * destroy Tx and Rx contexts(macsec object + steering)
1190 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1192 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1193 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1194 const struct net_device *dev = ctx->secy->netdev;
1195 struct mlx5e_macsec_device *macsec_device;
1196 struct mlx5e_macsec_sa *tx_sa;
1197 struct mlx5e_macsec *macsec;
1200 if (!mlx5e_macsec_secy_features_validate(ctx))
1203 mutex_lock(&priv->macsec->lock);
1205 macsec = priv->macsec;
1206 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1207 if (!macsec_device) {
1208 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1213 /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1214 if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1215 err = macsec_upd_secy_hw_address(ctx, macsec_device);
1220 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1221 tx_sa = macsec_device->tx_sa[i];
1225 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1228 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1229 tx_sa = macsec_device->tx_sa[i];
1233 if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1234 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
1241 mutex_unlock(&macsec->lock);
1246 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1248 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1249 struct mlx5e_macsec_device *macsec_device;
1250 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1251 struct mlx5e_macsec_sa *tx_sa;
1252 struct mlx5e_macsec *macsec;
1253 struct list_head *list;
1257 mutex_lock(&priv->macsec->lock);
1258 macsec = priv->macsec;
1259 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1260 if (!macsec_device) {
1261 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1267 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1268 tx_sa = macsec_device->tx_sa[i];
1272 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1273 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1275 macsec_device->tx_sa[i] = NULL;
1278 list = &macsec_device->macsec_rx_sc_list_head;
1279 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1280 macsec_del_rxsc_ctx(macsec, rx_sc);
1282 kfree(macsec_device->dev_addr);
1283 macsec_device->dev_addr = NULL;
1285 list_del_rcu(&macsec_device->macsec_device_list_element);
1286 --macsec->num_of_devices;
1287 kfree(macsec_device);
1290 mutex_unlock(&macsec->lock);
1295 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1296 struct mlx5_macsec_obj_attrs *attrs)
1298 attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1299 attrs->epn_state.overlap = sa->epn_state.overlap;
1302 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1303 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1304 struct mlx5_aso_ctrl_param *param)
1306 memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1307 if (macsec_aso->umr->dma_addr) {
1308 aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
1309 aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
1310 aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
1316 aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1317 aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1318 param->condition_0_operand << 4;
1319 aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1320 param->condition_0_offset << 4;
1321 aso_ctrl->data_offset_condition_operand = param->data_offset |
1322 param->condition_operand << 6;
1323 aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1324 aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1325 aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1326 aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1327 aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1328 aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1331 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1334 u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1335 u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1336 u64 modify_field_select = 0;
1340 /* General object fields set */
1341 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1342 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1343 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1344 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1346 mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1351 obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1352 modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1355 if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1356 !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1357 mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1362 obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1363 MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1364 MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1365 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1366 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1368 /* General object fields set */
1369 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1371 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1374 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1375 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1376 struct mlx5e_macsec_aso_in *in)
1378 struct mlx5_aso_ctrl_param param = {};
1380 param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1381 param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1382 param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1383 if (in->mode == MLX5_MACSEC_EPN) {
1384 param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1385 param.bitwise_data = BIT_ULL(54);
1386 param.data_mask = param.bitwise_data;
1388 macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, ¶m);
1391 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1392 struct mlx5e_macsec_aso_in *in)
1394 struct mlx5e_macsec_aso *aso;
1395 struct mlx5_aso_wqe *aso_wqe;
1396 struct mlx5_aso *maso;
1402 mutex_lock(&aso->aso_lock);
1403 aso_wqe = mlx5_aso_get_wqe(maso);
1404 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1405 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1406 macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1407 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1408 err = mlx5_aso_poll_cq(maso, false);
1409 mutex_unlock(&aso->aso_lock);
1414 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1415 struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1417 struct mlx5e_macsec_aso *aso;
1418 struct mlx5_aso_wqe *aso_wqe;
1419 struct mlx5_aso *maso;
1425 mutex_lock(&aso->aso_lock);
1427 aso_wqe = mlx5_aso_get_wqe(maso);
1428 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1429 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1430 macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1432 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1433 err = mlx5_aso_poll_cq(maso, false);
1437 if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1438 out->event_arm |= MLX5E_ASO_EPN_ARM;
1440 out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1443 mutex_unlock(&aso->aso_lock);
1447 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1450 const struct list_head *device_list;
1451 struct mlx5e_macsec_sa *macsec_sa;
1452 struct mlx5e_macsec_device *iter;
1455 device_list = &macsec->macsec_device_list_head;
1457 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1458 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1459 macsec_sa = iter->tx_sa[i];
1460 if (!macsec_sa || !macsec_sa->active)
1462 if (macsec_sa->macsec_obj_id == obj_id)
1470 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1473 const struct list_head *device_list, *sc_list;
1474 struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1475 struct mlx5e_macsec_sa *macsec_sa;
1476 struct mlx5e_macsec_device *iter;
1479 device_list = &macsec->macsec_device_list_head;
1481 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1482 sc_list = &iter->macsec_rx_sc_list_head;
1483 list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1484 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1485 macsec_sa = mlx5e_rx_sc->rx_sa[i];
1486 if (!macsec_sa || !macsec_sa->active)
1488 if (macsec_sa->macsec_obj_id == obj_id)
1497 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1498 struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1500 struct mlx5_macsec_obj_attrs attrs = {};
1501 struct mlx5e_macsec_aso_in in = {};
1503 /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1504 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1505 * esn_overlap to OLD (1).
1506 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1507 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1508 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1511 if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1512 sa->epn_state.epn_msb++;
1513 sa->epn_state.overlap = 0;
1515 sa->epn_state.overlap = 1;
1518 macsec_build_accel_attrs(sa, &attrs);
1519 mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1521 /* Re-set EPN arm event */
1523 in.mode = MLX5_MACSEC_EPN;
1524 macsec_aso_set_arm_event(mdev, macsec, &in);
1527 static void macsec_async_event(struct work_struct *work)
1529 struct mlx5e_macsec_async_work *async_work;
1530 struct mlx5e_macsec_aso_out out = {};
1531 struct mlx5e_macsec_aso_in in = {};
1532 struct mlx5e_macsec_sa *macsec_sa;
1533 struct mlx5e_macsec *macsec;
1534 struct mlx5_core_dev *mdev;
1537 async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1538 macsec = async_work->macsec;
1539 mutex_lock(&macsec->lock);
1541 mdev = async_work->mdev;
1542 obj_id = async_work->obj_id;
1543 macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1545 macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1547 mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1548 goto out_async_work;
1552 /* Query MACsec ASO context */
1554 macsec_aso_query(mdev, macsec, &in, &out);
1557 if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1558 macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1562 mutex_unlock(&macsec->lock);
1565 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1567 struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1568 struct mlx5e_macsec_async_work *async_work;
1569 struct mlx5_eqe_obj_change *obj_change;
1570 struct mlx5_eqe *eqe = data;
1574 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1577 obj_change = &eqe->data.obj_change;
1578 obj_type = be16_to_cpu(obj_change->obj_type);
1579 obj_id = be32_to_cpu(obj_change->obj_id);
1581 if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1584 async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1588 async_work->macsec = macsec;
1589 async_work->mdev = macsec->mdev;
1590 async_work->obj_id = obj_id;
1592 INIT_WORK(&async_work->work, macsec_async_event);
1594 WARN_ON(!queue_work(macsec->wq, &async_work->work));
1599 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1601 struct mlx5_aso *maso;
1604 err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1607 "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1612 maso = mlx5_aso_create(mdev, aso->pdn);
1614 err = PTR_ERR(maso);
1618 err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1622 mutex_init(&aso->aso_lock);
1629 mlx5_aso_destroy(maso);
1631 mlx5_core_dealloc_pd(mdev, aso->pdn);
1635 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1640 mlx5e_macsec_aso_dereg_mr(mdev, aso);
1642 mlx5_aso_destroy(aso->maso);
1644 mlx5_core_dealloc_pd(mdev, aso->pdn);
1647 bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
1649 if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
1650 MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
1653 if (!MLX5_CAP_GEN(mdev, log_max_dek))
1656 if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
1659 if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
1660 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
1663 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
1664 !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
1667 if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
1668 !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
1671 if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
1672 !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
1678 void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
1680 mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
1683 struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
1688 return &macsec->stats;
1691 static const struct macsec_ops macsec_offload_ops = {
1692 .mdo_add_txsa = mlx5e_macsec_add_txsa,
1693 .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1694 .mdo_del_txsa = mlx5e_macsec_del_txsa,
1695 .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1696 .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1697 .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1698 .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1699 .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1700 .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1701 .mdo_add_secy = mlx5e_macsec_add_secy,
1702 .mdo_upd_secy = mlx5e_macsec_upd_secy,
1703 .mdo_del_secy = mlx5e_macsec_del_secy,
1706 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1708 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1711 fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1718 dev_kfree_skb_any(skb);
1722 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1723 struct sk_buff *skb,
1724 struct mlx5_wqe_eth_seg *eseg)
1726 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1729 fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1733 eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1736 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1737 struct sk_buff *skb,
1738 struct mlx5_cqe64 *cqe)
1740 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1741 u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1742 struct mlx5e_priv *priv = netdev_priv(netdev);
1743 struct mlx5e_macsec_rx_sc *rx_sc;
1744 struct mlx5e_macsec *macsec;
1747 macsec = priv->macsec;
1751 fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1754 sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1755 rx_sc = sc_xarray_element->rx_sc;
1757 dst_hold(&rx_sc->md_dst->dst);
1758 skb_dst_set(skb, &rx_sc->md_dst->dst);
1764 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1766 struct net_device *netdev = priv->netdev;
1768 if (!mlx5e_is_macsec_device(priv->mdev))
1772 mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1773 netdev->macsec_ops = &macsec_offload_ops;
1774 netdev->features |= NETIF_F_HW_MACSEC;
1775 netif_keep_dst(netdev);
1778 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1780 struct mlx5_core_dev *mdev = priv->mdev;
1781 struct mlx5e_macsec *macsec = NULL;
1782 struct mlx5e_macsec_fs *macsec_fs;
1785 if (!mlx5e_is_macsec_device(priv->mdev)) {
1786 mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1790 macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1794 INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1795 mutex_init(&macsec->lock);
1797 err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
1799 mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
1804 err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1806 mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1810 macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1816 xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1818 priv->macsec = macsec;
1820 macsec->mdev = mdev;
1822 macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
1828 macsec->macsec_fs = macsec_fs;
1830 macsec->nb.notifier_call = macsec_obj_change_event;
1831 mlx5_notifier_register(mdev, &macsec->nb);
1833 mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1838 destroy_workqueue(macsec->wq);
1840 mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1842 rhashtable_destroy(&macsec->sci_hash);
1845 priv->macsec = NULL;
1849 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1851 struct mlx5e_macsec *macsec = priv->macsec;
1852 struct mlx5_core_dev *mdev = priv->mdev;
1857 mlx5_notifier_unregister(mdev, &macsec->nb);
1858 mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
1859 destroy_workqueue(macsec->wq);
1860 mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1861 rhashtable_destroy(&macsec->sci_hash);
1862 mutex_destroy(&macsec->lock);