1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
11 #include "en_accel/macsec.h"
12 #include "en_accel/macsec_fs.h"
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
17 enum mlx5_macsec_aso_event_arm {
18 MLX5E_ASO_EPN_ARM = BIT(0),
22 MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
25 struct mlx5e_macsec_handle {
26 struct mlx5e_macsec *macsec;
35 struct mlx5e_macsec_aso_out {
40 struct mlx5e_macsec_aso_in {
45 struct mlx5e_macsec_epn_state {
51 struct mlx5e_macsec_async_work {
52 struct mlx5e_macsec *macsec;
53 struct mlx5_core_dev *mdev;
54 struct work_struct work;
58 struct mlx5e_macsec_sa {
67 struct rhash_head hash;
69 union mlx5e_macsec_rule *macsec_rule;
70 struct rcu_head rcu_head;
71 struct mlx5e_macsec_epn_state epn_state;
74 struct mlx5e_macsec_rx_sc;
75 struct mlx5e_macsec_rx_sc_xarray_element {
77 struct mlx5e_macsec_rx_sc *rx_sc;
80 struct mlx5e_macsec_rx_sc {
83 struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
84 struct list_head rx_sc_list_element;
85 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
86 struct metadata_dst *md_dst;
87 struct rcu_head rcu_head;
90 struct mlx5e_macsec_umr {
92 u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
96 struct mlx5e_macsec_aso {
98 struct mlx5_aso *maso;
99 /* Protects macsec ASO */
100 struct mutex aso_lock;
102 struct mlx5e_macsec_umr *umr;
107 static const struct rhashtable_params rhash_sci = {
108 .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
109 .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
110 .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
111 .automatic_shrinking = true,
115 struct mlx5e_macsec_device {
116 const struct net_device *netdev;
117 struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
118 struct list_head macsec_rx_sc_list_head;
119 unsigned char *dev_addr;
120 struct list_head macsec_device_list_element;
123 struct mlx5e_macsec {
124 struct list_head macsec_device_list_head;
126 struct mlx5e_macsec_fs *macsec_fs;
127 struct mutex lock; /* Protects mlx5e_macsec internal contexts */
129 /* Tx sci -> fs id mapping handling */
130 struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
132 /* Rx fs_id -> rx_sc mapping */
133 struct xarray sc_xarray;
135 struct mlx5_core_dev *mdev;
138 struct mlx5e_macsec_stats stats;
141 struct mlx5e_macsec_aso aso;
143 struct notifier_block nb;
144 struct workqueue_struct *wq;
147 struct mlx5_macsec_obj_attrs {
153 struct mlx5e_macsec_epn_state epn_state;
160 struct mlx5_aso_ctrl_param {
162 u8 condition_0_operand;
163 u8 condition_1_operand;
164 u8 condition_0_offset;
165 u8 condition_1_offset;
167 u8 condition_operand;
168 u32 condition_0_data;
169 u32 condition_0_mask;
170 u32 condition_1_data;
171 u32 condition_1_mask;
176 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
178 struct mlx5e_macsec_umr *umr;
179 struct device *dma_device;
183 umr = kzalloc(sizeof(*umr), GFP_KERNEL);
189 dma_device = &mdev->pdev->dev;
190 dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
191 err = dma_mapping_error(dma_device, dma_addr);
193 mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
197 err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
199 mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
203 umr->dma_addr = dma_addr;
210 dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
216 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
218 struct mlx5e_macsec_umr *umr = aso->umr;
220 mlx5_core_destroy_mkey(mdev, umr->mkey);
221 dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
225 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
229 if (!attrs->replay_protect)
232 MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
233 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
238 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
239 struct mlx5_macsec_obj_attrs *attrs,
243 u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
244 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
249 obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
250 aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
252 MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
253 MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
254 MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
255 MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
256 MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
259 if (attrs->epn_state.epn_enabled) {
263 MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
264 MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
265 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
266 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
267 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
268 salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
269 for (i = 0; i < 3 ; i++)
270 memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
272 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
275 MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
277 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
279 err = macsec_set_replay_protection(attrs, aso_ctx);
284 /* general object fields set */
285 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
286 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
288 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
291 "MACsec offload: Failed to create MACsec object (err = %d)\n",
296 *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
301 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
303 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
304 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
306 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
307 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
308 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
310 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
313 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
314 struct mlx5e_macsec_sa *sa,
317 int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
318 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
320 if ((is_tx) && sa->fs_id) {
321 /* Make sure ongoing datapath readers sees a valid SA */
322 rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
326 if (!sa->macsec_rule)
329 mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
330 mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
331 sa->macsec_rule = NULL;
334 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
335 struct mlx5e_macsec_sa *sa,
339 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
340 struct mlx5e_macsec *macsec = priv->macsec;
341 struct mlx5_macsec_rule_attrs rule_attrs;
342 struct mlx5_core_dev *mdev = priv->mdev;
343 struct mlx5_macsec_obj_attrs obj_attrs;
344 union mlx5e_macsec_rule *macsec_rule;
345 struct macsec_key *key;
348 obj_attrs.next_pn = sa->next_pn;
349 obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
350 obj_attrs.enc_key_id = sa->enc_key_id;
351 obj_attrs.encrypt = encrypt;
352 obj_attrs.aso_pdn = macsec->aso.pdn;
353 obj_attrs.epn_state = sa->epn_state;
355 key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
357 if (sa->epn_state.epn_enabled) {
358 obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
359 cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
361 memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
364 obj_attrs.replay_window = ctx->secy->replay_window;
365 obj_attrs.replay_protect = ctx->secy->replay_protect;
367 err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
371 rule_attrs.macsec_obj_id = sa->macsec_obj_id;
372 rule_attrs.sci = sa->sci;
373 rule_attrs.assoc_num = sa->assoc_num;
374 rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
375 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
377 macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
380 goto destroy_macsec_object;
383 sa->macsec_rule = macsec_rule;
386 err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
388 goto destroy_macsec_object_and_rule;
393 destroy_macsec_object_and_rule:
394 mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
395 destroy_macsec_object:
396 mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
401 static struct mlx5e_macsec_rx_sc *
402 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
404 struct mlx5e_macsec_rx_sc *iter;
406 list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
407 if (iter->sci == sci)
414 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
415 struct mlx5e_macsec_sa *rx_sa,
418 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
419 struct mlx5e_macsec *macsec = priv->macsec;
422 if (rx_sa->active == active)
425 rx_sa->active = active;
427 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
431 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
433 rx_sa->active = false;
438 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
440 const struct net_device *netdev = ctx->netdev;
441 const struct macsec_secy *secy = ctx->secy;
443 if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
445 "MACsec offload is supported only when validate_frame is in strict mode\n");
449 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
450 netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
451 MACSEC_DEFAULT_ICV_LEN);
455 if (!secy->protect_frames) {
457 "MACsec offload is supported only when protect_frames is set\n");
464 static struct mlx5e_macsec_device *
465 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
466 const struct macsec_context *ctx)
468 struct mlx5e_macsec_device *iter;
469 const struct list_head *list;
471 list = &macsec->macsec_device_list_head;
472 list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
473 if (iter->netdev == ctx->secy->netdev)
480 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
481 const pn_t *next_pn_halves)
483 struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
485 sa->salt = key->salt;
486 epn_state->epn_enabled = 1;
487 epn_state->epn_msb = next_pn_halves->upper;
488 epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
491 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
493 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
494 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
495 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
496 const struct macsec_secy *secy = ctx->secy;
497 struct mlx5e_macsec_device *macsec_device;
498 struct mlx5_core_dev *mdev = priv->mdev;
499 u8 assoc_num = ctx->sa.assoc_num;
500 struct mlx5e_macsec_sa *tx_sa;
501 struct mlx5e_macsec *macsec;
504 mutex_lock(&priv->macsec->lock);
506 macsec = priv->macsec;
507 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
508 if (!macsec_device) {
509 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
514 if (macsec_device->tx_sa[assoc_num]) {
515 netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
520 tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
526 tx_sa->active = ctx_tx_sa->active;
527 tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
528 tx_sa->sci = secy->sci;
529 tx_sa->assoc_num = assoc_num;
532 update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
534 err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
535 MLX5_ACCEL_OBJ_MACSEC_KEY,
540 macsec_device->tx_sa[assoc_num] = tx_sa;
541 if (!secy->operational ||
542 assoc_num != tx_sc->encoding_sa ||
546 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
548 goto destroy_encryption_key;
550 mutex_unlock(&macsec->lock);
554 destroy_encryption_key:
555 macsec_device->tx_sa[assoc_num] = NULL;
556 mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
560 mutex_unlock(&macsec->lock);
565 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
567 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
568 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
569 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
570 struct mlx5e_macsec_device *macsec_device;
571 u8 assoc_num = ctx->sa.assoc_num;
572 struct mlx5e_macsec_sa *tx_sa;
573 struct mlx5e_macsec *macsec;
574 struct net_device *netdev;
577 mutex_lock(&priv->macsec->lock);
579 macsec = priv->macsec;
580 netdev = ctx->netdev;
581 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
582 if (!macsec_device) {
583 netdev_err(netdev, "MACsec offload: Failed to find device context\n");
588 tx_sa = macsec_device->tx_sa[assoc_num];
590 netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
595 if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
596 netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
602 if (tx_sa->active == ctx_tx_sa->active)
605 if (tx_sa->assoc_num != tx_sc->encoding_sa)
608 if (ctx_tx_sa->active) {
609 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
613 if (!tx_sa->macsec_rule) {
618 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
621 tx_sa->active = ctx_tx_sa->active;
623 mutex_unlock(&macsec->lock);
628 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
630 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
631 struct mlx5e_macsec_device *macsec_device;
632 u8 assoc_num = ctx->sa.assoc_num;
633 struct mlx5e_macsec_sa *tx_sa;
634 struct mlx5e_macsec *macsec;
637 mutex_lock(&priv->macsec->lock);
638 macsec = priv->macsec;
639 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
640 if (!macsec_device) {
641 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
646 tx_sa = macsec_device->tx_sa[assoc_num];
648 netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
653 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
654 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
656 macsec_device->tx_sa[assoc_num] = NULL;
659 mutex_unlock(&macsec->lock);
664 static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
666 struct mlx5e_macsec_sa *macsec_sa;
670 macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
672 fs_id = macsec_sa->fs_id;
678 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
680 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
681 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
682 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
683 struct mlx5e_macsec_device *macsec_device;
684 struct mlx5e_macsec_rx_sc *rx_sc;
685 struct list_head *rx_sc_list;
686 struct mlx5e_macsec *macsec;
689 mutex_lock(&priv->macsec->lock);
690 macsec = priv->macsec;
691 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
692 if (!macsec_device) {
693 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
698 rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
699 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
701 netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
707 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
713 sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
714 if (!sc_xarray_element) {
719 sc_xarray_element->rx_sc = rx_sc;
720 err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
721 XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
724 netdev_err(ctx->netdev,
725 "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
726 MLX5_MACEC_RX_FS_ID_MAX);
727 goto destroy_sc_xarray_elemenet;
730 rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
731 if (!rx_sc->md_dst) {
736 rx_sc->sci = ctx_rx_sc->sci;
737 rx_sc->active = ctx_rx_sc->active;
738 list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
740 rx_sc->sc_xarray_element = sc_xarray_element;
741 rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
742 mutex_unlock(&macsec->lock);
747 xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
748 destroy_sc_xarray_elemenet:
749 kfree(sc_xarray_element);
754 mutex_unlock(&macsec->lock);
759 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
761 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
762 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
763 struct mlx5e_macsec_device *macsec_device;
764 struct mlx5e_macsec_rx_sc *rx_sc;
765 struct mlx5e_macsec_sa *rx_sa;
766 struct mlx5e_macsec *macsec;
767 struct list_head *list;
771 mutex_lock(&priv->macsec->lock);
773 macsec = priv->macsec;
774 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
775 if (!macsec_device) {
776 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
781 list = &macsec_device->macsec_rx_sc_list_head;
782 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
788 if (rx_sc->active == ctx_rx_sc->active)
791 rx_sc->active = ctx_rx_sc->active;
792 for (i = 0; i < MACSEC_NUM_AN; ++i) {
793 rx_sa = rx_sc->rx_sa[i];
797 err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
803 mutex_unlock(&macsec->lock);
808 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
810 struct mlx5e_macsec_sa *rx_sa;
813 for (i = 0; i < MACSEC_NUM_AN; ++i) {
814 rx_sa = rx_sc->rx_sa[i];
818 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
819 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
822 rx_sc->rx_sa[i] = NULL;
825 /* At this point the relevant MACsec offload Rx rule already removed at
826 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
827 * Rx related data propagating using xa_erase which uses rcu to sync,
828 * once fs_id is erased then this rx_sc is hidden from datapath.
830 list_del_rcu(&rx_sc->rx_sc_list_element);
831 xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
832 metadata_dst_free(rx_sc->md_dst);
833 kfree(rx_sc->sc_xarray_element);
837 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
839 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
840 struct mlx5e_macsec_device *macsec_device;
841 struct mlx5e_macsec_rx_sc *rx_sc;
842 struct mlx5e_macsec *macsec;
843 struct list_head *list;
846 mutex_lock(&priv->macsec->lock);
848 macsec = priv->macsec;
849 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
850 if (!macsec_device) {
851 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
856 list = &macsec_device->macsec_rx_sc_list_head;
857 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
859 netdev_err(ctx->netdev,
860 "MACsec offload rx_sc sci %lld doesn't exist\n",
861 ctx->sa.rx_sa->sc->sci);
866 macsec_del_rxsc_ctx(macsec, rx_sc);
868 mutex_unlock(&macsec->lock);
873 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
875 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
876 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
877 struct mlx5e_macsec_device *macsec_device;
878 struct mlx5_core_dev *mdev = priv->mdev;
879 u8 assoc_num = ctx->sa.assoc_num;
880 struct mlx5e_macsec_rx_sc *rx_sc;
881 sci_t sci = ctx_rx_sa->sc->sci;
882 struct mlx5e_macsec_sa *rx_sa;
883 struct mlx5e_macsec *macsec;
884 struct list_head *list;
887 mutex_lock(&priv->macsec->lock);
889 macsec = priv->macsec;
890 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
891 if (!macsec_device) {
892 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
897 list = &macsec_device->macsec_rx_sc_list_head;
898 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
900 netdev_err(ctx->netdev,
901 "MACsec offload rx_sc sci %lld doesn't exist\n",
902 ctx->sa.rx_sa->sc->sci);
907 if (rx_sc->rx_sa[assoc_num]) {
908 netdev_err(ctx->netdev,
909 "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
915 rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
921 rx_sa->active = ctx_rx_sa->active;
922 rx_sa->next_pn = ctx_rx_sa->next_pn;
924 rx_sa->assoc_num = assoc_num;
925 rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
928 update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
930 err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
931 MLX5_ACCEL_OBJ_MACSEC_KEY,
936 rx_sc->rx_sa[assoc_num] = rx_sa;
940 //TODO - add support for both authentication and encryption flows
941 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
943 goto destroy_encryption_key;
947 destroy_encryption_key:
948 rx_sc->rx_sa[assoc_num] = NULL;
949 mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
953 mutex_unlock(&macsec->lock);
958 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
960 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
961 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
962 struct mlx5e_macsec_device *macsec_device;
963 u8 assoc_num = ctx->sa.assoc_num;
964 struct mlx5e_macsec_rx_sc *rx_sc;
965 sci_t sci = ctx_rx_sa->sc->sci;
966 struct mlx5e_macsec_sa *rx_sa;
967 struct mlx5e_macsec *macsec;
968 struct list_head *list;
971 mutex_lock(&priv->macsec->lock);
973 macsec = priv->macsec;
974 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
975 if (!macsec_device) {
976 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
981 list = &macsec_device->macsec_rx_sc_list_head;
982 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
984 netdev_err(ctx->netdev,
985 "MACsec offload rx_sc sci %lld doesn't exist\n",
986 ctx->sa.rx_sa->sc->sci);
991 rx_sa = rx_sc->rx_sa[assoc_num];
993 netdev_err(ctx->netdev,
994 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1000 if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
1001 netdev_err(ctx->netdev,
1002 "MACsec offload update RX sa %d PN isn't supported\n",
1008 err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
1010 mutex_unlock(&macsec->lock);
1015 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1017 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1018 struct mlx5e_macsec_device *macsec_device;
1019 sci_t sci = ctx->sa.rx_sa->sc->sci;
1020 struct mlx5e_macsec_rx_sc *rx_sc;
1021 u8 assoc_num = ctx->sa.assoc_num;
1022 struct mlx5e_macsec_sa *rx_sa;
1023 struct mlx5e_macsec *macsec;
1024 struct list_head *list;
1027 mutex_lock(&priv->macsec->lock);
1029 macsec = priv->macsec;
1030 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1031 if (!macsec_device) {
1032 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1037 list = &macsec_device->macsec_rx_sc_list_head;
1038 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1040 netdev_err(ctx->netdev,
1041 "MACsec offload rx_sc sci %lld doesn't exist\n",
1042 ctx->sa.rx_sa->sc->sci);
1047 rx_sa = rx_sc->rx_sa[assoc_num];
1049 netdev_err(ctx->netdev,
1050 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1056 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1057 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1059 rx_sc->rx_sa[assoc_num] = NULL;
1062 mutex_unlock(&macsec->lock);
1067 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1069 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1070 const struct net_device *dev = ctx->secy->netdev;
1071 const struct net_device *netdev = ctx->netdev;
1072 struct mlx5e_macsec_device *macsec_device;
1073 struct mlx5e_macsec *macsec;
1076 if (!mlx5e_macsec_secy_features_validate(ctx))
1079 mutex_lock(&priv->macsec->lock);
1080 macsec = priv->macsec;
1081 if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1082 netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1086 if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1087 netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1088 MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1093 macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1094 if (!macsec_device) {
1099 macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1100 if (!macsec_device->dev_addr) {
1101 kfree(macsec_device);
1106 macsec_device->netdev = dev;
1108 INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1109 list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1111 ++macsec->num_of_devices;
1113 mutex_unlock(&macsec->lock);
1118 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1119 struct mlx5e_macsec_device *macsec_device)
1121 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1122 const struct net_device *dev = ctx->secy->netdev;
1123 struct mlx5e_macsec *macsec = priv->macsec;
1124 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1125 struct mlx5e_macsec_sa *rx_sa;
1126 struct list_head *list;
1130 list = &macsec_device->macsec_rx_sc_list_head;
1131 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1132 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1133 rx_sa = rx_sc->rx_sa[i];
1134 if (!rx_sa || !rx_sa->macsec_rule)
1137 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1141 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1142 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1143 rx_sa = rx_sc->rx_sa[i];
1147 if (rx_sa->active) {
1148 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
1155 memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1160 /* this function is called from 2 macsec ops functions:
1161 * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1162 * and create new Tx contexts(macsec object + steering).
1163 * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1164 * destroy Tx and Rx contexts(macsec object + steering)
1166 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1168 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1169 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1170 const struct net_device *dev = ctx->secy->netdev;
1171 struct mlx5e_macsec_device *macsec_device;
1172 struct mlx5e_macsec_sa *tx_sa;
1173 struct mlx5e_macsec *macsec;
1176 if (!mlx5e_macsec_secy_features_validate(ctx))
1179 mutex_lock(&priv->macsec->lock);
1181 macsec = priv->macsec;
1182 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1183 if (!macsec_device) {
1184 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1189 /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1190 if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1191 err = macsec_upd_secy_hw_address(ctx, macsec_device);
1196 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1197 tx_sa = macsec_device->tx_sa[i];
1201 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1204 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1205 tx_sa = macsec_device->tx_sa[i];
1209 if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1210 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
1217 mutex_unlock(&macsec->lock);
1222 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1224 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1225 struct mlx5e_macsec_device *macsec_device;
1226 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1227 struct mlx5e_macsec_sa *tx_sa;
1228 struct mlx5e_macsec *macsec;
1229 struct list_head *list;
1233 mutex_lock(&priv->macsec->lock);
1234 macsec = priv->macsec;
1235 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1236 if (!macsec_device) {
1237 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1243 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1244 tx_sa = macsec_device->tx_sa[i];
1248 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1249 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1251 macsec_device->tx_sa[i] = NULL;
1254 list = &macsec_device->macsec_rx_sc_list_head;
1255 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1256 macsec_del_rxsc_ctx(macsec, rx_sc);
1258 kfree(macsec_device->dev_addr);
1259 macsec_device->dev_addr = NULL;
1261 list_del_rcu(&macsec_device->macsec_device_list_element);
1262 --macsec->num_of_devices;
1263 kfree(macsec_device);
1266 mutex_unlock(&macsec->lock);
1271 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1272 struct mlx5_macsec_obj_attrs *attrs)
1274 attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1275 attrs->epn_state.overlap = sa->epn_state.overlap;
1278 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1279 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1280 struct mlx5_aso_ctrl_param *param)
1282 memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1283 if (macsec_aso->umr->dma_addr) {
1284 aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
1285 aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
1286 aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
1292 aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1293 aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1294 param->condition_0_operand << 4;
1295 aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1296 param->condition_0_offset << 4;
1297 aso_ctrl->data_offset_condition_operand = param->data_offset |
1298 param->condition_operand << 6;
1299 aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1300 aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1301 aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1302 aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1303 aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1304 aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1307 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1310 u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1311 u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1312 u64 modify_field_select = 0;
1316 /* General object fields set */
1317 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1318 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1319 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1320 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1322 mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1327 obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1328 modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1331 if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1332 !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1333 mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1338 obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1339 MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1340 MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1341 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1342 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1344 /* General object fields set */
1345 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1347 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1350 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1351 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1352 struct mlx5e_macsec_aso_in *in)
1354 struct mlx5_aso_ctrl_param param = {};
1356 param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1357 param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1358 param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1359 if (in->mode == MLX5_MACSEC_EPN) {
1360 param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1361 param.bitwise_data = BIT_ULL(54);
1362 param.data_mask = param.bitwise_data;
1364 macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, ¶m);
1367 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1368 struct mlx5e_macsec_aso_in *in)
1370 struct mlx5e_macsec_aso *aso;
1371 struct mlx5_aso_wqe *aso_wqe;
1372 struct mlx5_aso *maso;
1378 mutex_lock(&aso->aso_lock);
1379 aso_wqe = mlx5_aso_get_wqe(maso);
1380 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1381 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1382 macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1383 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1384 err = mlx5_aso_poll_cq(maso, false);
1385 mutex_unlock(&aso->aso_lock);
1390 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1391 struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1393 struct mlx5e_macsec_aso *aso;
1394 struct mlx5_aso_wqe *aso_wqe;
1395 struct mlx5_aso *maso;
1401 mutex_lock(&aso->aso_lock);
1403 aso_wqe = mlx5_aso_get_wqe(maso);
1404 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1405 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1406 macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1408 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1409 err = mlx5_aso_poll_cq(maso, false);
1413 if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1414 out->event_arm |= MLX5E_ASO_EPN_ARM;
1416 out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1419 mutex_unlock(&aso->aso_lock);
1423 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1426 const struct list_head *device_list;
1427 struct mlx5e_macsec_sa *macsec_sa;
1428 struct mlx5e_macsec_device *iter;
1431 device_list = &macsec->macsec_device_list_head;
1433 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1434 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1435 macsec_sa = iter->tx_sa[i];
1436 if (!macsec_sa || !macsec_sa->active)
1438 if (macsec_sa->macsec_obj_id == obj_id)
1446 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1449 const struct list_head *device_list, *sc_list;
1450 struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1451 struct mlx5e_macsec_sa *macsec_sa;
1452 struct mlx5e_macsec_device *iter;
1455 device_list = &macsec->macsec_device_list_head;
1457 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1458 sc_list = &iter->macsec_rx_sc_list_head;
1459 list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1460 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1461 macsec_sa = mlx5e_rx_sc->rx_sa[i];
1462 if (!macsec_sa || !macsec_sa->active)
1464 if (macsec_sa->macsec_obj_id == obj_id)
1473 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1474 struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1476 struct mlx5_macsec_obj_attrs attrs = {};
1477 struct mlx5e_macsec_aso_in in = {};
1479 /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1480 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1481 * esn_overlap to OLD (1).
1482 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1483 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1484 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1487 if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1488 sa->epn_state.epn_msb++;
1489 sa->epn_state.overlap = 0;
1491 sa->epn_state.overlap = 1;
1494 macsec_build_accel_attrs(sa, &attrs);
1495 mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1497 /* Re-set EPN arm event */
1499 in.mode = MLX5_MACSEC_EPN;
1500 macsec_aso_set_arm_event(mdev, macsec, &in);
1503 static void macsec_async_event(struct work_struct *work)
1505 struct mlx5e_macsec_async_work *async_work;
1506 struct mlx5e_macsec_aso_out out = {};
1507 struct mlx5e_macsec_aso_in in = {};
1508 struct mlx5e_macsec_sa *macsec_sa;
1509 struct mlx5e_macsec *macsec;
1510 struct mlx5_core_dev *mdev;
1513 async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1514 macsec = async_work->macsec;
1515 mutex_lock(&macsec->lock);
1517 mdev = async_work->mdev;
1518 obj_id = async_work->obj_id;
1519 macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1521 macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1523 mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1524 goto out_async_work;
1528 /* Query MACsec ASO context */
1530 macsec_aso_query(mdev, macsec, &in, &out);
1533 if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1534 macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1538 mutex_unlock(&macsec->lock);
1541 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1543 struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1544 struct mlx5e_macsec_async_work *async_work;
1545 struct mlx5_eqe_obj_change *obj_change;
1546 struct mlx5_eqe *eqe = data;
1550 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1553 obj_change = &eqe->data.obj_change;
1554 obj_type = be16_to_cpu(obj_change->obj_type);
1555 obj_id = be32_to_cpu(obj_change->obj_id);
1557 if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1560 async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1564 async_work->macsec = macsec;
1565 async_work->mdev = macsec->mdev;
1566 async_work->obj_id = obj_id;
1568 INIT_WORK(&async_work->work, macsec_async_event);
1570 WARN_ON(!queue_work(macsec->wq, &async_work->work));
1575 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1577 struct mlx5_aso *maso;
1580 err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1583 "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1588 maso = mlx5_aso_create(mdev, aso->pdn);
1590 err = PTR_ERR(maso);
1594 err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1598 mutex_init(&aso->aso_lock);
1605 mlx5_aso_destroy(maso);
1607 mlx5_core_dealloc_pd(mdev, aso->pdn);
1611 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1616 mlx5e_macsec_aso_dereg_mr(mdev, aso);
1618 mlx5_aso_destroy(aso->maso);
1620 mlx5_core_dealloc_pd(mdev, aso->pdn);
1623 bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
1625 if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
1626 MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
1629 if (!MLX5_CAP_GEN(mdev, log_max_dek))
1632 if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
1635 if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
1636 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
1639 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
1640 !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
1643 if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
1644 !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
1647 if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
1648 !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
1654 void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
1656 mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
1659 struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
1664 return &macsec->stats;
1667 static const struct macsec_ops macsec_offload_ops = {
1668 .mdo_add_txsa = mlx5e_macsec_add_txsa,
1669 .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1670 .mdo_del_txsa = mlx5e_macsec_del_txsa,
1671 .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1672 .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1673 .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1674 .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1675 .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1676 .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1677 .mdo_add_secy = mlx5e_macsec_add_secy,
1678 .mdo_upd_secy = mlx5e_macsec_upd_secy,
1679 .mdo_del_secy = mlx5e_macsec_del_secy,
1682 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1684 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1687 fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1694 dev_kfree_skb_any(skb);
1698 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1699 struct sk_buff *skb,
1700 struct mlx5_wqe_eth_seg *eseg)
1702 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1705 fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1709 eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1712 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1713 struct sk_buff *skb,
1714 struct mlx5_cqe64 *cqe)
1716 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1717 u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1718 struct mlx5e_priv *priv = netdev_priv(netdev);
1719 struct mlx5e_macsec_rx_sc *rx_sc;
1720 struct mlx5e_macsec *macsec;
1723 macsec = priv->macsec;
1727 fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1730 sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1731 rx_sc = sc_xarray_element->rx_sc;
1733 dst_hold(&rx_sc->md_dst->dst);
1734 skb_dst_set(skb, &rx_sc->md_dst->dst);
1740 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1742 struct net_device *netdev = priv->netdev;
1744 if (!mlx5e_is_macsec_device(priv->mdev))
1748 mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1749 netdev->macsec_ops = &macsec_offload_ops;
1750 netdev->features |= NETIF_F_HW_MACSEC;
1751 netif_keep_dst(netdev);
1754 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1756 struct mlx5_core_dev *mdev = priv->mdev;
1757 struct mlx5e_macsec *macsec = NULL;
1758 struct mlx5e_macsec_fs *macsec_fs;
1761 if (!mlx5e_is_macsec_device(priv->mdev)) {
1762 mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1766 macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1770 INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1771 mutex_init(&macsec->lock);
1773 err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
1775 mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
1780 err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1782 mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1786 macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1792 xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1794 priv->macsec = macsec;
1796 macsec->mdev = mdev;
1798 macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
1804 macsec->macsec_fs = macsec_fs;
1806 macsec->nb.notifier_call = macsec_obj_change_event;
1807 mlx5_notifier_register(mdev, &macsec->nb);
1809 mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1814 destroy_workqueue(macsec->wq);
1816 mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1818 rhashtable_destroy(&macsec->sci_hash);
1821 priv->macsec = NULL;
1825 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1827 struct mlx5e_macsec *macsec = priv->macsec;
1828 struct mlx5_core_dev *mdev = priv->mdev;
1833 mlx5_notifier_unregister(mdev, &macsec->nb);
1834 mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
1835 destroy_workqueue(macsec->wq);
1836 mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1837 rhashtable_destroy(&macsec->sci_hash);
1838 mutex_destroy(&macsec->lock);