net/mlx5e: Drop error CQE handling from the XSK RX handler
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Thu, 20 Jan 2022 09:32:04 +0000 (11:32 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 3 May 2022 04:21:12 +0000 (21:21 -0700)
This commit removes the redundant check and removes the unused cqe parameter
of skb_from_cqe handlers.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 5081808..b90902d 100644 (file)
@@ -648,8 +648,8 @@ typedef struct sk_buff *
 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 typedef struct sk_buff *
-(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
-                        struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
+(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+                        u32 cqe_bcnt);
 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
index 021da08..9a15535 100644 (file)
@@ -80,7 +80,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
 }
 
 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
-                                             struct mlx5_cqe64 *cqe,
                                              struct mlx5e_wqe_frag_info *wi,
                                              u32 cqe_bcnt)
 {
@@ -99,11 +98,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
        xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
        net_prefetch(xdp->data);
 
-       if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
-               rq->stats->wqe_err++;
-               return NULL;
-       }
-
        prog = rcu_dereference(rq->xdp_prog);
        if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
                return NULL; /* page/packet was consumed by XDP */
index 7f88ccf..a8cfab4 100644 (file)
@@ -15,7 +15,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
                                                    u32 head_offset,
                                                    u32 page_idx);
 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
-                                             struct mlx5_cqe64 *cqe,
                                              struct mlx5e_wqe_frag_info *wi,
                                              u32 cqe_bcnt);
 
index a5f6fd1..2dea9e4 100644 (file)
@@ -1521,8 +1521,8 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
 }
 
 static struct sk_buff *
-mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
-                         struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
+mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+                         u32 cqe_bcnt)
 {
        struct mlx5e_dma_info *di = wi->di;
        u16 rx_headroom = rq->buff.headroom;
@@ -1565,8 +1565,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 }
 
 static struct sk_buff *
-mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
-                            struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
+mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+                            u32 cqe_bcnt)
 {
        struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
        struct mlx5e_wqe_frag_info *head_wi = wi;
@@ -1709,7 +1709,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
                              mlx5e_skb_from_cqe_linear,
                              mlx5e_skb_from_cqe_nonlinear,
-                             rq, cqe, wi, cqe_bcnt);
+                             rq, wi, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1762,7 +1762,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
                              mlx5e_skb_from_cqe_linear,
                              mlx5e_skb_from_cqe_nonlinear,
-                             rq, cqe, wi, cqe_bcnt);
+                             rq, wi, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -2361,7 +2361,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
                              mlx5e_skb_from_cqe_linear,
                              mlx5e_skb_from_cqe_nonlinear,
-                             rq, cqe, wi, cqe_bcnt);
+                             rq, wi, cqe_bcnt);
        if (!skb)
                goto wq_free_wqe;
 
@@ -2453,7 +2453,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
                goto free_wqe;
        }
 
-       skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
+       skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt);
        if (!skb)
                goto free_wqe;