net/mlx5: Use generic definition for UMR KLM alignment
authorTariq Toukan <tariqt@nvidia.com>
Mon, 31 Oct 2022 12:24:02 +0000 (14:24 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Wed, 30 Nov 2022 05:09:44 +0000 (21:09 -0800)
MLX5_UMR_KLM_ALIGNMENT is in units of number of entries, while
MLX5_UMR_MTT_ALIGNMENT (generalized and renamed to
MLX5_UMR_FLEX_ALIGNMENT) is in byte units. This is misleading and
confusing.
Replace this KLM definition with one based on the generic definition.

Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
include/linux/mlx5/device.h

index 3cad59ac1b4863ae257aeae9cbe7cd521e3f5774..65790ff58a743abdf9f7c6e38ead3d1f70c26392 100644 (file)
@@ -160,7 +160,7 @@ struct page_pool;
        (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
 
 #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
-       ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
+       ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
 
 #define MLX5E_MAX_KLM_PER_WQE(mdev) \
        MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
index 8d71736116e0b7ec66a1f9b7c6af812711338aa3..c8820ab221694aae3afb8472640ccdf5fe65b5b2 100644 (file)
@@ -593,8 +593,8 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
        int headroom, i;
 
        headroom = rq->buff.headroom;
-       new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
-       entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT);
+       new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
+       entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
        wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
        pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
        umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
@@ -603,7 +603,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
        for (i = 0; i < entries; i++, index++) {
                dma_info = &shampo->info[index];
                if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
-                                        MLX5_UMR_KLM_ALIGNMENT))
+                                        MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
                        goto update_klm;
                header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
                        MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
@@ -668,8 +668,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
        if (!klm_entries)
                return 0;
 
-       klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
-       index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT);
+       klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
+       index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
        entries_before = shampo->hd_per_wq - index;
 
        if (unlikely(entries_before < klm_entries))
index a02f779f5c5b70d7e48c848693eaa40f5375a5b5..5fe5d198b57ade8b1ec02435f63da2fa0e17aebd 100644 (file)
@@ -290,9 +290,9 @@ enum {
        MLX5_UMR_INLINE                 = (1 << 7),
 };
 
-#define MLX5_UMR_KLM_ALIGNMENT 4
 #define MLX5_UMR_FLEX_ALIGNMENT 0x40
 #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
+#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
 
 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)