net/mlx5e: Remove redundant page argument in mlx5e_xdp_handle()
authorTariq Toukan <tariqt@nvidia.com>
Thu, 16 Feb 2023 00:09:12 +0000 (16:09 -0800)
committerSaeed Mahameed <saeedm@nvidia.com>
Sat, 18 Feb 2023 09:01:33 +0000 (01:01 -0800)
Remove the page parameter, it can be derived from the xdp_buff member
of mlx5e_xdp_buff.

Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 4b9cd8e..bcd6370 100644 (file)
@@ -186,7 +186,7 @@ const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
 };
 
 /* returns true if packet was consumed by xdp */
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
                      struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
 {
        struct xdp_buff *xdp = &mxbuf->xdp;
@@ -210,7 +210,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
                __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
                __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
                if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
-                       mlx5e_page_dma_unmap(rq, page);
+                       mlx5e_page_dma_unmap(rq, virt_to_page(xdp->data));
                rq->stats->xdp_redirect++;
                return true;
        default:
index 69f338b..10bcfa6 100644 (file)
@@ -52,7 +52,7 @@ struct mlx5e_xdp_buff {
 
 struct mlx5e_xsk_param;
 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
                      struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
index b7c84eb..fab7876 100644 (file)
@@ -289,7 +289,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
         */
 
        prog = rcu_dereference(rq->xdp_prog);
-       if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf))) {
+       if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) {
                if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
                        __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
                return NULL; /* page/packet was consumed by XDP */
@@ -323,7 +323,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
        net_prefetch(mxbuf->xdp.data);
 
        prog = rcu_dereference(rq->xdp_prog);
-       if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf)))
+       if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf)))
                return NULL; /* page/packet was consumed by XDP */
 
        /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
index 0af02cc..8e64f4b 100644 (file)
@@ -1610,7 +1610,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
 
                net_prefetchw(va); /* xdp_frame data area */
                mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
-               if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf))
+               if (mlx5e_xdp_handle(rq, prog, &mxbuf))
                        return NULL; /* page/packet was consumed by XDP */
 
                rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
@@ -1698,10 +1698,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
                wi++;
        }
 
-       au = head_wi->au;
-
        prog = rcu_dereference(rq->xdp_prog);
-       if (prog && mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
+       if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
                if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                        int i;
 
@@ -1718,7 +1716,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
        if (unlikely(!skb))
                return NULL;
 
-       page_ref_inc(au->page);
+       page_ref_inc(head_wi->au->page);
 
        if (unlikely(xdp_buff_has_frags(&mxbuf.xdp))) {
                int i;
@@ -2013,7 +2011,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 
                net_prefetchw(va); /* xdp_frame data area */
                mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
-               if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
+               if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
                                __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
                        return NULL; /* page/packet was consumed by XDP */