net/mlx5e: Drop cqe_bcnt32 from mlx5e_skb_from_cqe_mpwrq_linear
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Mon, 14 Feb 2022 18:57:56 +0000 (20:57 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Thu, 17 Mar 2022 18:51:56 +0000 (11:51 -0700)
The packet size in mlx5e_skb_from_cqe_mpwrq_linear can't overflow u16,
since the maximum packet size in linear striding RQ is 2^13 bytes. Drop
the unneeded u32 variable.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 7c490c0..4b8699f 100644 (file)
@@ -1848,7 +1848,6 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 {
        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
        u16 rx_headroom = rq->buff.headroom;
-       u32 cqe_bcnt32 = cqe_bcnt;
        struct bpf_prog *prog;
        struct sk_buff *skb;
        u32 metasize = 0;
@@ -1863,7 +1862,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 
        va             = page_address(di->page) + head_offset;
        data           = va + rx_headroom;
-       frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
+       frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
 
        dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
                                      frag_size, DMA_FROM_DEVICE);
@@ -1874,7 +1873,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
                struct xdp_buff xdp;
 
                net_prefetchw(va); /* xdp_frame data area */
-               mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
+               mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
                if (mlx5e_xdp_handle(rq, di, prog, &xdp)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
                                __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
@@ -1883,10 +1882,10 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 
                rx_headroom = xdp.data - xdp.data_hard_start;
                metasize = xdp.data - xdp.data_meta;
-               cqe_bcnt32 = xdp.data_end - xdp.data;
+               cqe_bcnt = xdp.data_end - xdp.data;
        }
-       frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
-       skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize);
+       frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
+       skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
        if (unlikely(!skb))
                return NULL;