2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/rhashtable.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/fs_helpers.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/rbtree.h>
40 #include "mlx5_core.h"
42 #include "fpga/ipsec.h"
44 #include "fpga/core.h"
46 enum mlx5_fpga_ipsec_cmd_status {
47 MLX5_FPGA_IPSEC_CMD_PENDING,
48 MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
49 MLX5_FPGA_IPSEC_CMD_COMPLETE,
52 struct mlx5_fpga_ipsec_cmd_context {
53 struct mlx5_fpga_dma_buf buf;
54 enum mlx5_fpga_ipsec_cmd_status status;
55 struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
57 struct completion complete;
58 struct mlx5_fpga_device *dev;
59 struct list_head list; /* Item in pending_cmds */
63 struct mlx5_fpga_esp_xfrm;
65 struct mlx5_fpga_ipsec_sa_ctx {
66 struct rhash_head hash;
67 struct mlx5_ifc_fpga_ipsec_sa hw_sa;
69 struct mlx5_core_dev *dev;
70 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
73 struct mlx5_fpga_esp_xfrm {
74 unsigned int num_rules;
75 struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
76 struct mutex lock; /* xfrm lock */
77 struct mlx5_accel_esp_xfrm accel_xfrm;
80 struct mlx5_fpga_ipsec_rule {
83 struct mlx5_fpga_ipsec_sa_ctx *ctx;
86 static const struct rhashtable_params rhash_sa = {
87 /* Keep out "cmd" field from the key as it's
88 * value is not constant during the lifetime
91 .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
92 sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
93 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
94 sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
95 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
96 .automatic_shrinking = true,
100 struct mlx5_fpga_ipsec {
101 struct mlx5_fpga_device *fdev;
102 struct list_head pending_cmds;
103 spinlock_t pending_cmds_lock; /* Protects pending_cmds */
104 u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
105 struct mlx5_fpga_conn *conn;
107 struct notifier_block fs_notifier_ingress_bypass;
108 struct notifier_block fs_notifier_egress;
110 /* Map hardware SA --> SA context
111 * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
112 * We will use this hash to avoid SAs duplication in fpga which
115 struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
116 struct mutex sa_hash_lock;
118 /* Tree holding all rules for this fpga device
119 * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
121 struct rb_root rules_rb;
122 struct mutex rules_rb_lock; /* rules lock */
127 bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
129 if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
132 if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
133 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
136 if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
137 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
143 static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
144 struct mlx5_fpga_device *fdev,
145 struct mlx5_fpga_dma_buf *buf,
148 struct mlx5_fpga_ipsec_cmd_context *context;
151 context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
153 mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
155 context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
156 complete(&context->complete);
161 int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
164 case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
166 case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
168 case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
170 case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
176 static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
178 struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
179 struct mlx5_fpga_ipsec_cmd_context *context;
180 enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
181 struct mlx5_fpga_device *fdev = cb_arg;
184 if (buf->sg[0].size < sizeof(*resp)) {
185 mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
186 buf->sg[0].size, sizeof(*resp));
190 mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
191 ntohl(resp->syndrome));
193 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
194 context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
195 struct mlx5_fpga_ipsec_cmd_context,
198 list_del(&context->list);
199 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
202 mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
205 mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
207 syndrome = ntohl(resp->syndrome);
208 context->status_code = syndrome_to_errno(syndrome);
209 context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
210 memcpy(&context->resp, resp, sizeof(*resp));
212 if (context->status_code)
213 mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
216 complete(&context->complete);
219 static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
220 const void *cmd, int cmd_size)
222 struct mlx5_fpga_ipsec_cmd_context *context;
223 struct mlx5_fpga_device *fdev = mdev->fpga;
227 if (!fdev || !fdev->ipsec)
228 return ERR_PTR(-EOPNOTSUPP);
231 return ERR_PTR(-EINVAL);
233 context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
235 return ERR_PTR(-ENOMEM);
237 context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
239 context->buf.complete = mlx5_fpga_ipsec_send_complete;
240 init_completion(&context->complete);
241 memcpy(&context->command, cmd, cmd_size);
242 context->buf.sg[0].size = cmd_size;
243 context->buf.sg[0].data = &context->command;
245 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
246 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
248 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
249 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
252 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
257 /* Context should be freed by the caller after completion. */
261 static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
263 struct mlx5_fpga_ipsec_cmd_context *context = ctx;
264 unsigned long timeout =
265 msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
268 res = wait_for_completion_timeout(&context->complete, timeout);
270 mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
274 if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
275 res = context->status_code;
282 static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
284 if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
289 static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
290 struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
293 struct mlx5_core_dev *dev = fdev->mdev;
294 struct mlx5_ifc_fpga_ipsec_sa *sa;
295 struct mlx5_fpga_ipsec_cmd_context *cmd_context;
299 hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
300 if (is_v2_sadb_supported(fdev->ipsec))
301 sa_cmd_size = sizeof(*hw_sa);
303 sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
305 cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
306 mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
307 if (IS_ERR(cmd_context))
308 return PTR_ERR(cmd_context);
310 err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
314 sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
315 if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
316 mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
317 ntohl(sa->ipsec_sa_v1.sw_sa_handle),
318 ntohl(cmd_context->resp.sw_sa_handle));
327 u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
329 struct mlx5_fpga_device *fdev = mdev->fpga;
332 if (mlx5_fpga_is_ipsec_device(mdev)) {
333 ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
334 ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
342 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
343 ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
345 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
346 ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
348 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
349 ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
351 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
352 ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
354 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
355 ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
356 ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
362 static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
364 struct mlx5_fpga_device *fdev = mdev->fpga;
366 if (!fdev || !fdev->ipsec)
369 return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
370 number_of_ipsec_counters);
373 static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
374 unsigned int counters_count)
376 struct mlx5_fpga_device *fdev = mdev->fpga;
383 if (!fdev || !fdev->ipsec)
386 addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
387 ipsec_counters_addr_low) +
388 ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
389 ipsec_counters_addr_high) << 32);
391 count = mlx5_fpga_ipsec_counters_count(mdev);
393 data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
399 ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
400 MLX5_FPGA_ACCESS_TYPE_DONTCARE);
402 mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
408 if (count > counters_count)
409 count = counters_count;
411 /* Each counter is low word, then high. But each word is big-endian */
412 for (i = 0; i < count; i++)
413 counters[i] = (u64)ntohl(data[i * 2]) |
414 ((u64)ntohl(data[i * 2 + 1]) << 32);
421 static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
423 struct mlx5_fpga_ipsec_cmd_context *context;
424 struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
427 cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
428 cmd.flags = htonl(flags);
429 context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
431 return PTR_ERR(context);
433 err = mlx5_fpga_ipsec_cmd_wait(context);
437 if ((context->resp.flags & cmd.flags) != cmd.flags) {
438 mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
440 context->resp.flags);
449 static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
451 u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
454 if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
455 flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
457 return mlx5_fpga_ipsec_set_caps(mdev, flags);
461 mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
462 const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
463 struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
465 const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
468 memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
469 aes_gcm->key_len / 8);
470 /* Duplicate 128 bit key twice according to HW layout */
471 if (aes_gcm->key_len == 128)
472 memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
473 aes_gcm->aes_key, aes_gcm->key_len / 8);
475 /* salt and seq_iv */
476 memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
477 sizeof(aes_gcm->seq_iv));
478 memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
479 sizeof(aes_gcm->salt));
482 if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
483 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
484 hw_sa->ipsec_sa_v1.flags |=
486 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
487 MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
488 hw_sa->esn = htonl(xfrm_attrs->esn);
490 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
491 hw_sa->ipsec_sa_v1.flags &=
492 ~(xfrm_attrs->flags &
493 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
494 MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
499 hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
502 switch (aes_gcm->key_len) {
504 hw_sa->ipsec_sa_v1.enc_mode =
505 MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
508 hw_sa->ipsec_sa_v1.enc_mode =
509 MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
514 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
515 MLX5_FPGA_IPSEC_SA_SPI_EN |
516 MLX5_FPGA_IPSEC_SA_IP_ESP;
518 if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
519 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
521 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
525 mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
526 struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
527 const __be32 saddr[4],
528 const __be32 daddr[4],
529 const __be32 spi, bool is_ipv6,
530 struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
532 mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
535 memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
536 memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
539 hw_sa->ipsec_sa_v1.spi = spi;
543 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
546 static bool is_full_mask(const void *p, size_t len)
550 return !memchr_inv(p, 0xff, len);
553 static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
557 const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
560 const void *headers_c = MLX5_ADDR_OF(fte_match_param,
563 const void *headers_v = MLX5_ADDR_OF(fte_match_param,
567 if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
568 const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
570 src_ipv4_src_ipv6.ipv4_layout.ipv4);
571 const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
573 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
575 if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
577 !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
581 const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
583 src_ipv4_src_ipv6.ipv6_layout.ipv6);
584 const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
586 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
588 if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
590 !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
595 if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
597 MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
603 static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
604 u8 match_criteria_enable,
608 u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
611 ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
613 if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
614 mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
615 mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
616 mlx5_fs_is_vxlan_flow(match_c) ||
617 !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
621 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
624 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
625 mlx5_fs_is_outer_ipsec_flow(match_c))
628 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
632 if (!validate_fpga_full_mask(dev, match_c, match_v))
638 static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
639 u8 match_criteria_enable,
642 struct mlx5_flow_act *flow_act,
643 struct mlx5_flow_context *flow_context)
645 const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
647 bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
648 MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
649 bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
650 MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
653 ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
658 if (is_dmac || is_smac ||
659 (match_criteria_enable &
660 ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
661 (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
662 (flow_context->flags & FLOW_CONTEXT_HAS_TAG))
668 static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
669 struct mlx5_accel_esp_xfrm *accel_xfrm,
670 const __be32 saddr[4], const __be32 daddr[4],
671 const __be32 spi, bool is_ipv6, u32 *sa_handle)
673 struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
674 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
675 container_of(accel_xfrm, typeof(*fpga_xfrm),
677 struct mlx5_fpga_device *fdev = mdev->fpga;
678 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
683 sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
685 return ERR_PTR(-ENOMEM);
689 /* build candidate SA */
690 mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
691 saddr, daddr, spi, is_ipv6,
694 mutex_lock(&fpga_xfrm->lock);
696 if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
697 /* all rules must be with same IPs and SPI */
698 if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
699 sizeof(sa_ctx->hw_sa))) {
700 context = ERR_PTR(-EINVAL);
704 ++fpga_xfrm->num_rules;
705 context = fpga_xfrm->sa_ctx;
709 if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
710 err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
712 context = ERR_PTR(err);
716 sa_ctx->sa_handle = err;
718 *sa_handle = sa_ctx->sa_handle;
720 /* This is unbounded fpga_xfrm, try to add to hash */
721 mutex_lock(&fipsec->sa_hash_lock);
723 err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
726 /* Can't bound different accel_xfrm to already existing sa_ctx.
727 * This is because we can't support multiple ketmats for
730 context = ERR_PTR(-EEXIST);
734 /* Bound accel_xfrm to sa_ctx */
735 opcode = is_v2_sadb_supported(fdev->ipsec) ?
736 MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
737 MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
738 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
739 sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
741 context = ERR_PTR(err);
745 mutex_unlock(&fipsec->sa_hash_lock);
747 ++fpga_xfrm->num_rules;
748 fpga_xfrm->sa_ctx = sa_ctx;
749 sa_ctx->fpga_xfrm = fpga_xfrm;
751 mutex_unlock(&fpga_xfrm->lock);
756 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
759 mutex_unlock(&fipsec->sa_hash_lock);
760 if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
761 ida_free(&fipsec->halloc, sa_ctx->sa_handle);
763 mutex_unlock(&fpga_xfrm->lock);
769 mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
773 struct mlx5_accel_esp_xfrm *accel_xfrm;
774 __be32 saddr[4], daddr[4], spi;
775 struct mlx5_flow_group *fg;
776 bool is_ipv6 = false;
778 fs_get_obj(fg, fte->node.parent);
781 !mlx5_is_fpga_egress_ipsec_rule(mdev,
782 fg->mask.match_criteria_enable,
783 fg->mask.match_criteria,
787 return ERR_PTR(-EINVAL);
788 else if (!mlx5_is_fpga_ipsec_rule(mdev,
789 fg->mask.match_criteria_enable,
790 fg->mask.match_criteria,
792 return ERR_PTR(-EINVAL);
794 /* get xfrm context */
796 (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
799 if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
802 MLX5_ADDR_OF(fte_match_set_lyr_2_4,
804 src_ipv4_src_ipv6.ipv4_layout.ipv4),
807 MLX5_ADDR_OF(fte_match_set_lyr_2_4,
809 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
813 MLX5_ADDR_OF(fte_match_param,
815 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
818 MLX5_ADDR_OF(fte_match_param,
820 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
826 spi = MLX5_GET_BE(typeof(spi),
827 fte_match_param, fte->val,
828 misc_parameters.outer_esp_spi);
831 return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
837 mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
839 struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
840 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
841 int opcode = is_v2_sadb_supported(fdev->ipsec) ?
842 MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
843 MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
846 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
847 sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
853 if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
854 MLX5_ACCEL_ESP_ACTION_DECRYPT)
855 ida_free(&fipsec->halloc, sa_ctx->sa_handle);
857 mutex_lock(&fipsec->sa_hash_lock);
858 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
860 mutex_unlock(&fipsec->sa_hash_lock);
863 static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
865 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
866 ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
868 mutex_lock(&fpga_xfrm->lock);
869 if (!--fpga_xfrm->num_rules) {
870 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
871 kfree(fpga_xfrm->sa_ctx);
872 fpga_xfrm->sa_ctx = NULL;
874 mutex_unlock(&fpga_xfrm->lock);
877 static inline struct mlx5_fpga_ipsec_rule *
878 _rule_search(struct rb_root *root, struct fs_fte *fte)
880 struct rb_node *node = root->rb_node;
883 struct mlx5_fpga_ipsec_rule *rule =
884 container_of(node, struct mlx5_fpga_ipsec_rule,
888 node = node->rb_left;
889 else if (rule->fte > fte)
890 node = node->rb_right;
897 static struct mlx5_fpga_ipsec_rule *
898 rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
900 struct mlx5_fpga_ipsec_rule *rule;
902 mutex_lock(&ipsec_dev->rules_rb_lock);
903 rule = _rule_search(&ipsec_dev->rules_rb, fte);
904 mutex_unlock(&ipsec_dev->rules_rb_lock);
909 static inline int _rule_insert(struct rb_root *root,
910 struct mlx5_fpga_ipsec_rule *rule)
912 struct rb_node **new = &root->rb_node, *parent = NULL;
914 /* Figure out where to put new node */
916 struct mlx5_fpga_ipsec_rule *this =
917 container_of(*new, struct mlx5_fpga_ipsec_rule,
921 if (rule->fte < this->fte)
922 new = &((*new)->rb_left);
923 else if (rule->fte > this->fte)
924 new = &((*new)->rb_right);
929 /* Add new node and rebalance tree. */
930 rb_link_node(&rule->node, parent, new);
931 rb_insert_color(&rule->node, root);
936 static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
937 struct mlx5_fpga_ipsec_rule *rule)
941 mutex_lock(&ipsec_dev->rules_rb_lock);
942 ret = _rule_insert(&ipsec_dev->rules_rb, rule);
943 mutex_unlock(&ipsec_dev->rules_rb_lock);
948 static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
949 struct mlx5_fpga_ipsec_rule *rule)
951 struct rb_root *root = &ipsec_dev->rules_rb;
953 mutex_lock(&ipsec_dev->rules_rb_lock);
954 rb_erase(&rule->node, root);
955 mutex_unlock(&ipsec_dev->rules_rb_lock);
958 static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
959 struct mlx5_fpga_ipsec_rule *rule)
961 _rule_delete(ipsec_dev, rule);
966 uintptr_t saved_esp_id;
968 u32 saved_outer_esp_spi_value;
971 static void restore_spec_mailbox(struct fs_fte *fte,
972 struct mailbox_mod *mbox_mod)
974 char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
978 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
979 mbox_mod->saved_outer_esp_spi_value);
980 fte->action.action |= mbox_mod->saved_action;
981 fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
984 static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
986 struct mailbox_mod *mbox_mod)
988 char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
992 mbox_mod->saved_esp_id = fte->action.esp_id;
993 mbox_mod->saved_action = fte->action.action &
994 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
995 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
996 mbox_mod->saved_outer_esp_spi_value =
997 MLX5_GET(fte_match_set_misc, misc_params_v,
1000 fte->action.esp_id = 0;
1001 fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1002 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
1003 if (!MLX5_CAP_FLOWTABLE(mdev,
1004 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1005 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
1008 static enum fs_flow_table_type egress_to_fs_ft(bool egress)
1010 return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
1013 static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
1014 struct mlx5_flow_table *ft,
1016 struct mlx5_flow_group *fg,
1019 int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
1020 struct mlx5_flow_table *ft, u32 *in,
1021 struct mlx5_flow_group *fg) =
1022 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
1023 char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
1024 match_criteria.misc_parameters);
1025 struct mlx5_core_dev *dev = ns->dev;
1026 u32 saved_outer_esp_spi_mask;
1027 u8 match_criteria_enable;
1030 if (MLX5_CAP_FLOWTABLE(dev,
1031 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1032 return create_flow_group(ns, ft, in, fg);
1034 match_criteria_enable =
1035 MLX5_GET(create_flow_group_in, in, match_criteria_enable);
1036 saved_outer_esp_spi_mask =
1037 MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
1038 if (!match_criteria_enable || !saved_outer_esp_spi_mask)
1039 return create_flow_group(ns, ft, in, fg);
1041 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
1043 if (!(*misc_params_c) &&
1044 !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
1045 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1046 match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
1048 ret = create_flow_group(ns, ft, in, fg);
1050 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
1051 MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
1056 static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
1057 struct mlx5_flow_table *ft,
1058 struct mlx5_flow_group *fg,
1062 int (*create_fte)(struct mlx5_flow_root_namespace *ns,
1063 struct mlx5_flow_table *ft,
1064 struct mlx5_flow_group *fg,
1065 struct fs_fte *fte) =
1066 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
1067 struct mlx5_core_dev *dev = ns->dev;
1068 struct mlx5_fpga_device *fdev = dev->fpga;
1069 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1070 struct mlx5_fpga_ipsec_rule *rule;
1071 bool is_esp = fte->action.esp_id;
1072 struct mailbox_mod mbox_mod;
1076 !(fte->action.action &
1077 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1078 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1079 return create_fte(ns, ft, fg, fte);
1081 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1085 rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
1086 if (IS_ERR(rule->ctx)) {
1087 int err = PTR_ERR(rule->ctx);
1093 WARN_ON(rule_insert(fipsec, rule));
1095 modify_spec_mailbox(dev, fte, &mbox_mod);
1096 ret = create_fte(ns, ft, fg, fte);
1097 restore_spec_mailbox(fte, &mbox_mod);
1099 _rule_delete(fipsec, rule);
1100 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1107 static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
1108 struct mlx5_flow_table *ft,
1109 struct mlx5_flow_group *fg,
1114 int (*update_fte)(struct mlx5_flow_root_namespace *ns,
1115 struct mlx5_flow_table *ft,
1116 struct mlx5_flow_group *fg,
1118 struct fs_fte *fte) =
1119 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
1120 struct mlx5_core_dev *dev = ns->dev;
1121 bool is_esp = fte->action.esp_id;
1122 struct mailbox_mod mbox_mod;
1126 !(fte->action.action &
1127 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1128 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1129 return update_fte(ns, ft, fg, modify_mask, fte);
1131 modify_spec_mailbox(dev, fte, &mbox_mod);
1132 ret = update_fte(ns, ft, fg, modify_mask, fte);
1133 restore_spec_mailbox(fte, &mbox_mod);
1138 static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
1139 struct mlx5_flow_table *ft,
1143 int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
1144 struct mlx5_flow_table *ft,
1145 struct fs_fte *fte) =
1146 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
1147 struct mlx5_core_dev *dev = ns->dev;
1148 struct mlx5_fpga_device *fdev = dev->fpga;
1149 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1150 struct mlx5_fpga_ipsec_rule *rule;
1151 bool is_esp = fte->action.esp_id;
1152 struct mailbox_mod mbox_mod;
1156 !(fte->action.action &
1157 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1158 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1159 return delete_fte(ns, ft, fte);
1161 rule = rule_search(fipsec, fte);
1165 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1166 rule_delete(fipsec, rule);
1168 modify_spec_mailbox(dev, fte, &mbox_mod);
1169 ret = delete_fte(ns, ft, fte);
1170 restore_spec_mailbox(fte, &mbox_mod);
1176 mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
1177 struct mlx5_flow_table *ft,
1179 struct mlx5_flow_group *fg)
1181 return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
1185 mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
1186 struct mlx5_flow_table *ft,
1187 struct mlx5_flow_group *fg,
1190 return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
1194 mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
1195 struct mlx5_flow_table *ft,
1196 struct mlx5_flow_group *fg,
1200 return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
1205 mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
1206 struct mlx5_flow_table *ft,
1209 return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
1213 mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
1214 struct mlx5_flow_table *ft,
1216 struct mlx5_flow_group *fg)
1218 return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
1222 mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
1223 struct mlx5_flow_table *ft,
1224 struct mlx5_flow_group *fg,
1227 return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
1231 mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
1232 struct mlx5_flow_table *ft,
1233 struct mlx5_flow_group *fg,
1237 return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
1242 mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
1243 struct mlx5_flow_table *ft,
1246 return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
1249 static struct mlx5_flow_cmds fpga_ipsec_ingress;
1250 static struct mlx5_flow_cmds fpga_ipsec_egress;
1252 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
1256 return &fpga_ipsec_ingress;
1258 return &fpga_ipsec_egress;
1265 static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
1267 struct mlx5_fpga_conn_attr init_attr = {0};
1268 struct mlx5_fpga_device *fdev = mdev->fpga;
1269 struct mlx5_fpga_conn *conn;
1272 if (!mlx5_fpga_is_ipsec_device(mdev))
1275 fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
1279 fdev->ipsec->fdev = fdev;
1281 err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
1284 mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
1289 INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
1290 spin_lock_init(&fdev->ipsec->pending_cmds_lock);
1292 init_attr.rx_size = SBU_QP_QUEUE_SIZE;
1293 init_attr.tx_size = SBU_QP_QUEUE_SIZE;
1294 init_attr.recv_cb = mlx5_fpga_ipsec_recv;
1295 init_attr.cb_arg = fdev;
1296 conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
1298 err = PTR_ERR(conn);
1299 mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
1303 fdev->ipsec->conn = conn;
1305 err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
1307 goto err_destroy_conn;
1308 mutex_init(&fdev->ipsec->sa_hash_lock);
1310 fdev->ipsec->rules_rb = RB_ROOT;
1311 mutex_init(&fdev->ipsec->rules_rb_lock);
1313 err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
1315 mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
1317 goto err_destroy_hash;
1320 ida_init(&fdev->ipsec->halloc);
1325 rhashtable_destroy(&fdev->ipsec->sa_hash);
1328 mlx5_fpga_sbu_conn_destroy(conn);
1336 static void destroy_rules_rb(struct rb_root *root)
1338 struct mlx5_fpga_ipsec_rule *r, *tmp;
1340 rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
1341 rb_erase(&r->node, root);
1342 mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
1347 static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
1349 struct mlx5_fpga_device *fdev = mdev->fpga;
1351 if (!mlx5_fpga_is_ipsec_device(mdev))
1354 ida_destroy(&fdev->ipsec->halloc);
1355 destroy_rules_rb(&fdev->ipsec->rules_rb);
1356 rhashtable_destroy(&fdev->ipsec->sa_hash);
1358 mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
1363 void mlx5_fpga_ipsec_build_fs_cmds(void)
1366 fpga_ipsec_ingress.create_flow_table =
1367 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
1368 fpga_ipsec_ingress.destroy_flow_table =
1369 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
1370 fpga_ipsec_ingress.modify_flow_table =
1371 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
1372 fpga_ipsec_ingress.create_flow_group =
1373 mlx5_fpga_ipsec_fs_create_flow_group_ingress;
1374 fpga_ipsec_ingress.destroy_flow_group =
1375 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
1376 fpga_ipsec_ingress.create_fte =
1377 mlx5_fpga_ipsec_fs_create_fte_ingress;
1378 fpga_ipsec_ingress.update_fte =
1379 mlx5_fpga_ipsec_fs_update_fte_ingress;
1380 fpga_ipsec_ingress.delete_fte =
1381 mlx5_fpga_ipsec_fs_delete_fte_ingress;
1382 fpga_ipsec_ingress.update_root_ft =
1383 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
1386 fpga_ipsec_egress.create_flow_table =
1387 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
1388 fpga_ipsec_egress.destroy_flow_table =
1389 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
1390 fpga_ipsec_egress.modify_flow_table =
1391 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
1392 fpga_ipsec_egress.create_flow_group =
1393 mlx5_fpga_ipsec_fs_create_flow_group_egress;
1394 fpga_ipsec_egress.destroy_flow_group =
1395 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
1396 fpga_ipsec_egress.create_fte =
1397 mlx5_fpga_ipsec_fs_create_fte_egress;
1398 fpga_ipsec_egress.update_fte =
1399 mlx5_fpga_ipsec_fs_update_fte_egress;
1400 fpga_ipsec_egress.delete_fte =
1401 mlx5_fpga_ipsec_fs_delete_fte_egress;
1402 fpga_ipsec_egress.update_root_ft =
1403 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
1407 mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
1408 const struct mlx5_accel_esp_xfrm_attrs *attrs)
1410 if (attrs->tfc_pad) {
1411 mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
1415 if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
1416 mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
1420 if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
1421 mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
1425 if (attrs->keymat.aes_gcm.iv_algo !=
1426 MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
1427 mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
1431 if (attrs->keymat.aes_gcm.icv_len != 128) {
1432 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
1436 if (attrs->keymat.aes_gcm.key_len != 128 &&
1437 attrs->keymat.aes_gcm.key_len != 256) {
1438 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1442 if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
1443 (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
1445 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1452 static struct mlx5_accel_esp_xfrm *
1453 mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
1454 const struct mlx5_accel_esp_xfrm_attrs *attrs,
1457 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1459 if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
1460 mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
1461 return ERR_PTR(-EINVAL);
1464 if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1465 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1466 return ERR_PTR(-EOPNOTSUPP);
1469 fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
1471 return ERR_PTR(-ENOMEM);
1473 mutex_init(&fpga_xfrm->lock);
1474 memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
1475 sizeof(fpga_xfrm->accel_xfrm.attrs));
1477 return &fpga_xfrm->accel_xfrm;
1480 static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
1482 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
1483 container_of(xfrm, struct mlx5_fpga_esp_xfrm,
1485 /* assuming no sa_ctx are connected to this xfrm_ctx */
1489 static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
1490 const struct mlx5_accel_esp_xfrm_attrs *attrs)
1492 struct mlx5_core_dev *mdev = xfrm->mdev;
1493 struct mlx5_fpga_device *fdev = mdev->fpga;
1494 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1495 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1496 struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
1500 if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
1503 if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1504 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1508 if (is_v2_sadb_supported(fipsec)) {
1509 mlx5_core_warn(mdev, "Modify esp is not supported\n");
1513 fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
1515 mutex_lock(&fpga_xfrm->lock);
1517 if (!fpga_xfrm->sa_ctx)
1518 /* Unbounded xfrm, chane only sw attrs */
1519 goto change_sw_xfrm_attrs;
1521 /* copy original hw sa */
1522 memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
1523 mutex_lock(&fipsec->sa_hash_lock);
1524 /* remove original hw sa from hash */
1525 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1526 &fpga_xfrm->sa_ctx->hash, rhash_sa));
1527 /* update hw_sa with new xfrm attrs*/
1528 mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
1529 &fpga_xfrm->sa_ctx->hw_sa);
1530 /* try to insert new hw_sa to hash */
1531 err = rhashtable_insert_fast(&fipsec->sa_hash,
1532 &fpga_xfrm->sa_ctx->hash, rhash_sa);
1536 /* modify device with new hw_sa */
1537 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
1538 MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
1539 fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
1541 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1542 &fpga_xfrm->sa_ctx->hash,
1546 /* return original hw_sa to hash */
1547 memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
1549 WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
1550 &fpga_xfrm->sa_ctx->hash,
1553 mutex_unlock(&fipsec->sa_hash_lock);
1555 change_sw_xfrm_attrs:
1557 memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
1558 mutex_unlock(&fpga_xfrm->lock);
1562 static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
1563 .device_caps = mlx5_fpga_ipsec_device_caps,
1564 .counters_count = mlx5_fpga_ipsec_counters_count,
1565 .counters_read = mlx5_fpga_ipsec_counters_read,
1566 .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
1567 .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
1568 .init = mlx5_fpga_ipsec_init,
1569 .cleanup = mlx5_fpga_ipsec_cleanup,
1570 .esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
1571 .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
1572 .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
1575 const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
1577 if (!mlx5_fpga_is_ipsec_device(mdev))
1580 return &fpga_ipsec_ops;