net/mlx5e: Support RX XDP metadata
authorToke Høiland-Jørgensen <toke@redhat.com>
Thu, 19 Jan 2023 22:15:35 +0000 (14:15 -0800)
committerMartin KaFai Lau <martin.lau@kernel.org>
Mon, 23 Jan 2023 17:58:23 +0000 (09:58 -0800)
Support RX hash and timestamp metadata kfuncs. We need to pass in the cqe
pointer to the mlx5e_skb_from* functions so it can be retrieved from the
XDP ctx to do this.

Cc: Tariq Toukan <tariqt@nvidia.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Willem de Bruijn <willemb@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Anatoly Burakov <anatoly.burakov@intel.com>
Cc: Alexander Lobakin <alexandr.lobakin@intel.com>
Cc: Magnus Karlsson <magnus.karlsson@gmail.com>
Cc: Maryam Tahhan <mtahhan@redhat.com>
Cc: xdp-hints@xdp-project.net
Cc: netdev@vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20230119221536.3349901-17-sdf@google.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 2d77fb8..711dc88 100644 (file)
@@ -626,10 +626,11 @@ struct mlx5e_rq;
 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
 typedef struct sk_buff *
 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-                              u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+                              struct mlx5_cqe64 *cqe, u16 cqe_bcnt,
+                              u32 head_offset, u32 page_idx);
 typedef struct sk_buff *
 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
-                        u32 cqe_bcnt);
+                        struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
index 853f312..757c012 100644 (file)
@@ -73,6 +73,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
 
+static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
+{
+       return config->rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
 /* TX */
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
index 31bb680..f7d52b1 100644 (file)
@@ -156,6 +156,34 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
        return true;
 }
 
+static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+       const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+
+       if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
+               return -EOPNOTSUPP;
+
+       *timestamp =  mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
+                                        _ctx->rq->clock, get_cqe_ts(_ctx->cqe));
+       return 0;
+}
+
+static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+       const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+
+       if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
+               return -EOPNOTSUPP;
+
+       *hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
+       return 0;
+}
+
+const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
+       .xmo_rx_timestamp               = mlx5e_xdp_rx_timestamp,
+       .xmo_rx_hash                    = mlx5e_xdp_rx_hash,
+};
+
 /* returns true if packet was consumed by xdp */
 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
                      struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
index 389818b..69f338b 100644 (file)
@@ -46,6 +46,8 @@
 
 struct mlx5e_xdp_buff {
        struct xdp_buff xdp;
+       struct mlx5_cqe64 *cqe;
+       struct mlx5e_rq *rq;
 };
 
 struct mlx5e_xsk_param;
@@ -60,6 +62,8 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
                   u32 flags);
 
+extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
+
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
                                                          struct mlx5e_xmit_data *xdptxd,
                                                          struct skb_shared_info *sinfo,
index 08d4e5c..b7c84eb 100644 (file)
@@ -52,25 +52,30 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 
        if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
                for (i = 0; i < batch; i++) {
+                       struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
                        dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
 
                        umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
                                .ptag = cpu_to_be64(addr | MLX5_EN_WR),
                        };
+                       mxbuf->rq = rq;
                }
        } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
                for (i = 0; i < batch; i++) {
+                       struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
                        dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
 
                        umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
                                .key = rq->mkey_be,
                                .va = cpu_to_be64(addr),
                        };
+                       mxbuf->rq = rq;
                }
        } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) {
                u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
 
                for (i = 0; i < batch; i++) {
+                       struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
                        dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
 
                        umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) {
@@ -89,6 +94,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
                                .key = rq->mkey_be,
                                .va = cpu_to_be64(rq->wqe_overflow.addr),
                        };
+                       mxbuf->rq = rq;
                }
        } else {
                __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) -
@@ -96,6 +102,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
                __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
 
                for (i = 0; i < batch; i++) {
+                       struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
                        dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
 
                        umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) {
@@ -108,6 +115,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
                                .va = cpu_to_be64(rq->wqe_overflow.addr),
                                .bcount = pad_size,
                        };
+                       mxbuf->rq = rq;
                }
        }
 
@@ -238,6 +246,7 @@ static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_b
 
 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
                                                    struct mlx5e_mpw_info *wi,
+                                                   struct mlx5_cqe64 *cqe,
                                                    u16 cqe_bcnt,
                                                    u32 head_offset,
                                                    u32 page_idx)
@@ -258,6 +267,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
         */
        WARN_ON_ONCE(head_offset);
 
+       /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
+       mxbuf->cqe = cqe;
        xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
        xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
        net_prefetch(mxbuf->xdp.data);
@@ -292,6 +303,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
 
 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
                                              struct mlx5e_wqe_frag_info *wi,
+                                             struct mlx5_cqe64 *cqe,
                                              u32 cqe_bcnt)
 {
        struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk);
@@ -304,6 +316,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
         */
        WARN_ON_ONCE(wi->offset);
 
+       /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
+       mxbuf->cqe = cqe;
        xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
        xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
        net_prefetch(mxbuf->xdp.data);
index 087c943..cefc0ef 100644 (file)
@@ -13,11 +13,13 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
 int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
                                                    struct mlx5e_mpw_info *wi,
+                                                   struct mlx5_cqe64 *cqe,
                                                    u16 cqe_bcnt,
                                                    u32 head_offset,
                                                    u32 page_idx);
 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
                                              struct mlx5e_wqe_frag_info *wi,
+                                             struct mlx5_cqe64 *cqe,
                                              u32 cqe_bcnt);
 
 #endif /* __MLX5_EN_XSK_RX_H__ */
index cff5f2e..3370c8b 100644 (file)
@@ -5053,6 +5053,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        SET_NETDEV_DEV(netdev, mdev->device);
 
        netdev->netdev_ops = &mlx5e_netdev_ops;
+       netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
 
        mlx5e_dcbnl_build_netdev(netdev);
 
index c6810ca..7b08653 100644 (file)
 
 static struct sk_buff *
 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+                               struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+                               u32 page_idx);
 static struct sk_buff *
 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-                                  u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+                                  struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+                                  u32 page_idx);
 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -76,11 +78,6 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
        .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
 };
 
-static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
-{
-       return config->rx_filter == HWTSTAMP_FILTER_ALL;
-}
-
 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
                                       u32 cqcc, void *data)
 {
@@ -1575,16 +1572,19 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
        return skb;
 }
 
-static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, void *va, u16 headroom,
-                            u32 len, struct mlx5e_xdp_buff *mxbuf)
+static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+                            void *va, u16 headroom, u32 len,
+                            struct mlx5e_xdp_buff *mxbuf)
 {
        xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
        xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
+       mxbuf->cqe = cqe;
+       mxbuf->rq = rq;
 }
 
 static struct sk_buff *
 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
-                         u32 cqe_bcnt)
+                         struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
 {
        union mlx5e_alloc_unit *au = wi->au;
        u16 rx_headroom = rq->buff.headroom;
@@ -1609,7 +1609,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
                struct mlx5e_xdp_buff mxbuf;
 
                net_prefetchw(va); /* xdp_frame data area */
-               mlx5e_fill_mxbuf(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
+               mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
                if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf))
                        return NULL; /* page/packet was consumed by XDP */
 
@@ -1630,7 +1630,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
 
 static struct sk_buff *
 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
-                            u32 cqe_bcnt)
+                            struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
 {
        struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
        struct mlx5e_wqe_frag_info *head_wi = wi;
@@ -1654,7 +1654,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
        net_prefetchw(va); /* xdp_frame data area */
        net_prefetch(va + rx_headroom);
 
-       mlx5e_fill_mxbuf(rq, va, rx_headroom, frag_consumed_bytes, &mxbuf);
+       mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, frag_consumed_bytes, &mxbuf);
        sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
        truesize = 0;
 
@@ -1777,7 +1777,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
                              mlx5e_skb_from_cqe_linear,
                              mlx5e_skb_from_cqe_nonlinear,
                              mlx5e_xsk_skb_from_cqe_linear,
-                             rq, wi, cqe_bcnt);
+                             rq, wi, cqe, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1830,7 +1830,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
                              mlx5e_skb_from_cqe_linear,
                              mlx5e_skb_from_cqe_nonlinear,
-                             rq, wi, cqe_bcnt);
+                             rq, wi, cqe, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1889,7 +1889,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
        skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
                              mlx5e_skb_from_cqe_mpwrq_linear,
                              mlx5e_skb_from_cqe_mpwrq_nonlinear,
-                             rq, wi, cqe_bcnt, head_offset, page_idx);
+                             rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
        if (!skb)
                goto mpwrq_cqe_out;
 
@@ -1940,7 +1940,8 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
 
 static struct sk_buff *
 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-                                  u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+                                  struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+                                  u32 page_idx)
 {
        union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
        u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
@@ -1979,7 +1980,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
 
 static struct sk_buff *
 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-                               u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+                               struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+                               u32 page_idx)
 {
        union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
        u16 rx_headroom = rq->buff.headroom;
@@ -2010,7 +2012,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
                struct mlx5e_xdp_buff mxbuf;
 
                net_prefetchw(va); /* xdp_frame data area */
-               mlx5e_fill_mxbuf(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
+               mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
                if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
                                __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
@@ -2174,8 +2176,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
                if (likely(head_size))
                        *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
                else
-                       *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
-                                                                 page_idx);
+                       *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
+                                                                 data_offset, page_idx);
                if (unlikely(!*skb))
                        goto free_hd_entry;
 
@@ -2249,7 +2251,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
                              mlx5e_skb_from_cqe_mpwrq_linear,
                              mlx5e_skb_from_cqe_mpwrq_nonlinear,
                              mlx5e_xsk_skb_from_cqe_mpwrq_linear,
-                             rq, wi, cqe_bcnt, head_offset, page_idx);
+                             rq, wi, cqe, cqe_bcnt, head_offset,
+                             page_idx);
        if (!skb)
                goto mpwrq_cqe_out;
 
@@ -2494,7 +2497,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
                              mlx5e_skb_from_cqe_linear,
                              mlx5e_skb_from_cqe_nonlinear,
-                             rq, wi, cqe_bcnt);
+                             rq, wi, cqe, cqe_bcnt);
        if (!skb)
                goto wq_free_wqe;
 
@@ -2586,7 +2589,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
                goto free_wqe;
        }
 
-       skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt);
+       skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
        if (!skb)
                goto free_wqe;