net/mlx5e: RX, Generalize mlx5e_fill_mxbuf()
authorTariq Toukan <tariqt@nvidia.com>
Mon, 17 Apr 2023 12:19:01 +0000 (15:19 +0300)
committerDavid S. Miller <davem@davemloft.net>
Wed, 19 Apr 2023 07:59:27 +0000 (08:59 +0100)
Make the function more generic. Let it get an additional frame_sz
parameter instead of deriving it from the RQ struct.

No functional change here, just a preparation for a downstream patch.

Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 1118327..a2c4b3d 100644 (file)
@@ -1630,10 +1630,10 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
 }
 
 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
-                            void *va, u16 headroom, u32 len,
+                            void *va, u16 headroom, u32 frame_sz, u32 len,
                             struct mlx5e_xdp_buff *mxbuf)
 {
-       xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+       xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
        xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
        mxbuf->cqe = cqe;
        mxbuf->rq = rq;
@@ -1666,7 +1666,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
                struct mlx5e_xdp_buff mxbuf;
 
                net_prefetchw(va); /* xdp_frame data area */
-               mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+               mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
+                                cqe_bcnt, &mxbuf);
                if (mlx5e_xdp_handle(rq, prog, &mxbuf))
                        return NULL; /* page/packet was consumed by XDP */
 
@@ -1714,7 +1715,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
        net_prefetchw(va); /* xdp_frame data area */
        net_prefetch(va + rx_headroom);
 
-       mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, frag_consumed_bytes, &mxbuf);
+       mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
+                        frag_consumed_bytes, &mxbuf);
        sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
        truesize = 0;
 
@@ -2042,7 +2044,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
                struct mlx5e_xdp_buff mxbuf;
 
                net_prefetchw(va); /* xdp_frame data area */
-               mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+               mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
+                                cqe_bcnt, &mxbuf);
                if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
                                __set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */