net/mlx5e: kTLS, Check ICOSQ WQE size in advance
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Tue, 27 Sep 2022 20:36:05 +0000 (13:36 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 29 Sep 2022 02:36:37 +0000 (19:36 -0700)
Instead of WARNing in runtime when TLS offload WQEs posted to ICOSQ are
over the hardware limit, check their size before enabling TLS RX
offload, and block the offload if the condition fails. It also allows to
drop a u16 field from struct mlx5e_icosq.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index fc595a8ef11f97b27ae287630ec6f916f0744acc..4778298f464529a2a2c99385385efc67bac9b940 100644 (file)
@@ -609,7 +609,6 @@ struct mlx5e_icosq {
        /* control path */
        struct mlx5_wq_ctrl        wq_ctrl;
        struct mlx5e_channel      *channel;
-       u16                        max_sq_wqebbs;
 
        struct work_struct         recover_work;
 } ____cacheline_aligned_in_smp;
index 8751e48e283d7dd03443976a87dd01fc90ab9ddb..f4f306bb8e6db588111127dbb7417a9f9b1a72b7 100644 (file)
@@ -448,13 +448,7 @@ static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
 
 static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
 {
-       u16 room = sq->reserved_room;
-
-       WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
-                 "wqe_size %u is greater than max SQ WQEBBs %u",
-                 wqe_size, sq->max_sq_wqebbs);
-
-       room += MLX5E_STOP_ROOM(wqe_size);
+       u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);
 
        return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
 }
index c0b77963cc7cd56f7e3da5b25c2d2275ce8b857b..da2184c94203654c7bf9d288c15f022844b76eb3 100644 (file)
@@ -92,6 +92,24 @@ static const struct tlsdev_ops mlx5e_ktls_ops = {
        .tls_dev_resync = mlx5e_ktls_resync,
 };
 
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+       u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+
+       if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
+               return false;
+
+       /* Check the possibility to post the required ICOSQ WQEs. */
+       if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS))
+               return false;
+       if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS))
+               return false;
+       if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_KTLS_GET_PROGRESS_WQEBBS))
+               return false;
+
+       return true;
+}
+
 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
 {
        struct net_device *netdev = priv->netdev;
index 299334b2f935b9ca5a560c8cf891bed50d9fe4ee..1c35045e41fb6d6def9f6f06481b10d4bdfe280d 100644 (file)
@@ -61,10 +61,7 @@ static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
        return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
 }
 
-static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
-       return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
-}
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
 
 struct mlx5e_tls_sw_stats {
        atomic64_t tx_tls_ctx;
index 9b48ae61f6922c85370b197425021f364ad2a53c..ab9bd250e7a98b614dec69cd59418fafabea001d 100644 (file)
@@ -1232,7 +1232,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.hw_objs.bfreg.map;
        sq->reserved_room = param->stop_room;
-       sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);