net/mlx5e: RX, Fix page_pool page fragment tracking for XDP
authorDragos Tatulea <dtatulea@nvidia.com>
Wed, 31 May 2023 18:18:49 +0000 (21:18 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Wed, 5 Jul 2023 17:57:04 +0000 (10:57 -0700)
Currently mlx5e releases pages directly to the page_pool for XDP_TX and
does page fragment counting for XDP_REDIRECT. RX pages from the
page_pool are leaking on XDP_REDIRECT because the xdp core will release
only one fragment out of MLX5E_PAGECNT_BIAS_MAX and subsequently the page
is marked as "skip release" which avoids the driver release.

A fix would be to take an extra fragment for XDP_REDIRECT and not set the
"skip release" bit so that the release on the driver side can handle the
remaining bias fragments. But this would be a shortsighted solution.
Instead, this patch converges the two XDP paths (XDP_TX and XDP_REDIRECT) to
always do fragment tracking. The "skip release" bit is no longer
necessary for XDP.

Fixes: 6f5742846053 ("net/mlx5e: RX, Enable skb page recycling through the page_pool")
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index f0e6095..40589ce 100644 (file)
@@ -662,8 +662,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
                                /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
                                 * as we know this is a page_pool page.
                                 */
-                               page_pool_put_defragged_page(page->pp,
-                                                            page, -1, true);
+                               page_pool_recycle_direct(page->pp, page);
                        } while (++n < num);
 
                        break;
index a957521..41d3715 100644 (file)
@@ -1751,11 +1751,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
 
        prog = rcu_dereference(rq->xdp_prog);
        if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
-               if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                        struct mlx5e_wqe_frag_info *pwi;
 
                        for (pwi = head_wi; pwi < wi; pwi++)
-                               pwi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
+                               pwi->frag_page->frags++;
                }
                return NULL; /* page/packet was consumed by XDP */
        }
@@ -1825,12 +1825,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
                              rq, wi, cqe, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
-               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
-                       /* do not return page to cache,
-                        * it will be returned on XDP_TX completion.
-                        */
-                       wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
-               }
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+                       wi->frag_page->frags++;
                goto wq_cyc_pop;
        }
 
@@ -1876,12 +1872,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
                              rq, wi, cqe, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
-               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
-                       /* do not return page to cache,
-                        * it will be returned on XDP_TX completion.
-                        */
-                       wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
-               }
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+                       wi->frag_page->frags++;
                goto wq_cyc_pop;
        }
 
@@ -2060,12 +2052,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
        if (prog) {
                if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
-                               int i;
+                               struct mlx5e_frag_page *pfp;
+
+                               for (pfp = head_page; pfp < frag_page; pfp++)
+                                       pfp->frags++;
 
-                               for (i = 0; i < sinfo->nr_frags; i++)
-                                       /* non-atomic */
-                                       __set_bit(page_idx + i, wi->skip_release_bitmap);
-                               return NULL;
+                               wi->linear_page.frags++;
                        }
                        mlx5e_page_release_fragmented(rq, &wi->linear_page);
                        return NULL; /* page/packet was consumed by XDP */
@@ -2163,7 +2155,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
                                 cqe_bcnt, &mxbuf);
                if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
-                               __set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */
+                               frag_page->frags++;
                        return NULL; /* page/packet was consumed by XDP */
                }