powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <linux/bitmap.h>
37 #include <linux/filter.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool/helpers.h>
40 #include <net/inet_ecn.h>
41 #include <net/gro.h>
42 #include <net/udp.h>
43 #include <net/tcp.h>
44 #include <net/xdp_sock_drv.h>
45 #include "en.h"
46 #include "en/txrx.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "en_rep.h"
50 #include "en/rep/tc.h"
51 #include "ipoib/ipoib.h"
52 #include "en_accel/ipsec.h"
53 #include "en_accel/macsec.h"
54 #include "en_accel/ipsec_rxtx.h"
55 #include "en_accel/ktls_txrx.h"
56 #include "en/xdp.h"
57 #include "en/xsk/rx.h"
58 #include "en/health.h"
59 #include "en/params.h"
60 #include "devlink.h"
61 #include "en/devlink.h"
62
63 static struct sk_buff *
64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
65                                 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
66                                 u32 page_idx);
67 static struct sk_buff *
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
69                                    struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
70                                    u32 page_idx);
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74
75 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
76         .handle_rx_cqe       = mlx5e_handle_rx_cqe,
77         .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
78         .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
79 };
80
81 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
82                                        u32 cqcc, void *data)
83 {
84         u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
85
86         memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
87 }
88
89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
90                                            struct mlx5_cqe64 *cqe)
91 {
92         struct mlx5e_cq_decomp *cqd = &rq->cqd;
93         struct mlx5_cqe64 *title = &cqd->title;
94
95         memcpy(title, cqe, sizeof(struct mlx5_cqe64));
96
97         if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
98                 return;
99
100         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
101                 cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
102                         mpwrq_get_cqe_consumed_strides(title);
103         else
104                 cqd->wqe_counter =
105                         mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
106 }
107
108 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
109                                          struct mlx5_cqwq *wq,
110                                          u32 cqcc)
111 {
112         struct mlx5e_cq_decomp *cqd = &rq->cqd;
113         struct mlx5_cqe64 *title = &cqd->title;
114
115         mlx5e_read_cqe_slot(wq, cqcc, title);
116         cqd->left        = be32_to_cpu(title->byte_cnt);
117         cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
118         rq->stats->cqe_compress_blks++;
119 }
120
121 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
122                                             struct mlx5e_cq_decomp *cqd,
123                                             u32 cqcc)
124 {
125         mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
126         cqd->mini_arr_idx = 0;
127 }
128
129 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
130 {
131         u32 cqcc   = wq->cc;
132         u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
133         u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
134         u32 wq_sz  = mlx5_cqwq_get_size(wq);
135         u32 ci_top = min_t(u32, wq_sz, ci + n);
136
137         for (; ci < ci_top; ci++, n--) {
138                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
139
140                 cqe->op_own = op_own;
141         }
142
143         if (unlikely(ci == wq_sz)) {
144                 op_own = !op_own;
145                 for (ci = 0; ci < n; ci++) {
146                         struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
147
148                         cqe->op_own = op_own;
149                 }
150         }
151 }
152
153 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
154                                         struct mlx5_cqwq *wq,
155                                         u32 cqcc)
156 {
157         struct mlx5e_cq_decomp *cqd = &rq->cqd;
158         struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
159         struct mlx5_cqe64 *title = &cqd->title;
160
161         title->byte_cnt     = mini_cqe->byte_cnt;
162         title->check_sum    = mini_cqe->checksum;
163         title->op_own      &= 0xf0;
164         title->op_own      |= 0x01 & (cqcc >> wq->fbc.log_sz);
165
166         /* state bit set implies linked-list striding RQ wq type and
167          * HW stride index capability supported
168          */
169         if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
170                 title->wqe_counter = mini_cqe->stridx;
171                 return;
172         }
173
174         /* HW stride index capability not supported */
175         title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
176         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
177                 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
178         else
179                 cqd->wqe_counter =
180                         mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
181 }
182
183 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
184                                                 struct mlx5_cqwq *wq,
185                                                 u32 cqcc)
186 {
187         struct mlx5e_cq_decomp *cqd = &rq->cqd;
188
189         mlx5e_decompress_cqe(rq, wq, cqcc);
190         cqd->title.rss_hash_type   = 0;
191         cqd->title.rss_hash_result = 0;
192 }
193
194 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
195                                          struct mlx5_cqwq *wq,
196                                          struct mlx5_cqe64 *cqe,
197                                          int budget_rem)
198 {
199         struct mlx5e_cq_decomp *cqd = &rq->cqd;
200         u32 cqcc, left;
201         u32 i;
202
203         left = get_cqe_enhanced_num_mini_cqes(cqe);
204         /* Here we avoid breaking the cqe compression session in the middle
205          * in case budget is not sufficient to handle all of it. In this case
206          * we return work_done == budget_rem to give 'busy' napi indication.
207          */
208         if (unlikely(left > budget_rem))
209                 return budget_rem;
210
211         cqcc = wq->cc;
212         cqd->mini_arr_idx = 0;
213         memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
214         for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
215                 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
216                 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
217                                 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
218                                 rq, &cqd->title);
219         }
220         wq->cc = cqcc;
221         rq->stats->cqe_compress_pkts += left;
222
223         return left;
224 }
225
226 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
227                                              struct mlx5_cqwq *wq,
228                                              int update_owner_only,
229                                              int budget_rem)
230 {
231         struct mlx5e_cq_decomp *cqd = &rq->cqd;
232         u32 cqcc = wq->cc + update_owner_only;
233         u32 cqe_count;
234         u32 i;
235
236         cqe_count = min_t(u32, cqd->left, budget_rem);
237
238         for (i = update_owner_only; i < cqe_count;
239              i++, cqd->mini_arr_idx++, cqcc++) {
240                 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
241                         mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
242
243                 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
244                 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
245                                 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
246                                 rq, &cqd->title);
247         }
248         mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
249         wq->cc = cqcc;
250         cqd->left -= cqe_count;
251         rq->stats->cqe_compress_pkts += cqe_count;
252
253         return cqe_count;
254 }
255
256 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
257                                               struct mlx5_cqwq *wq,
258                                               int budget_rem)
259 {
260         struct mlx5e_cq_decomp *cqd = &rq->cqd;
261         u32 cc = wq->cc;
262
263         mlx5e_read_title_slot(rq, wq, cc);
264         mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
265         mlx5e_decompress_cqe(rq, wq, cc);
266         INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
267                         mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
268                         rq, &cqd->title);
269         cqd->mini_arr_idx++;
270
271         return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
272 }
273
274 #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
275
276 static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
277                                        struct mlx5e_frag_page *frag_page)
278 {
279         struct page *page;
280
281         page = page_pool_dev_alloc_pages(rq->page_pool);
282         if (unlikely(!page))
283                 return -ENOMEM;
284
285         page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX);
286
287         *frag_page = (struct mlx5e_frag_page) {
288                 .page   = page,
289                 .frags  = 0,
290         };
291
292         return 0;
293 }
294
295 static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
296                                           struct mlx5e_frag_page *frag_page)
297 {
298         u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
299         struct page *page = frag_page->page;
300
301         if (page_pool_defrag_page(page, drain_count) == 0)
302                 page_pool_put_defragged_page(rq->page_pool, page, -1, true);
303 }
304
305 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
306                                     struct mlx5e_wqe_frag_info *frag)
307 {
308         int err = 0;
309
310         if (!frag->offset)
311                 /* On first frag (offset == 0), replenish page.
312                  * Other frags that point to the same page (with a different
313                  * offset) should just use the new one without replenishing again
314                  * by themselves.
315                  */
316                 err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
317
318         return err;
319 }
320
321 static bool mlx5e_frag_can_release(struct mlx5e_wqe_frag_info *frag)
322 {
323 #define CAN_RELEASE_MASK \
324         (BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
325
326 #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
327
328         return (frag->flags & CAN_RELEASE_MASK) == CAN_RELEASE_VALUE;
329 }
330
331 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
332                                      struct mlx5e_wqe_frag_info *frag)
333 {
334         if (mlx5e_frag_can_release(frag))
335                 mlx5e_page_release_fragmented(rq, frag->frag_page);
336 }
337
338 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
339 {
340         return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
341 }
342
343 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
344                               u16 ix)
345 {
346         struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
347         int err;
348         int i;
349
350         for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
351                 dma_addr_t addr;
352                 u16 headroom;
353
354                 err = mlx5e_get_rx_frag(rq, frag);
355                 if (unlikely(err))
356                         goto free_frags;
357
358                 frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
359
360                 headroom = i == 0 ? rq->buff.headroom : 0;
361                 addr = page_pool_get_dma_addr(frag->frag_page->page);
362                 wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
363         }
364
365         return 0;
366
367 free_frags:
368         while (--i >= 0)
369                 mlx5e_put_rx_frag(rq, --frag);
370
371         return err;
372 }
373
374 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
375                                      struct mlx5e_wqe_frag_info *wi)
376 {
377         int i;
378
379         for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
380                 mlx5e_put_rx_frag(rq, wi);
381 }
382
383 static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info *wi)
384 {
385         if (!(wi->flags & BIT(MLX5E_WQE_FRAG_SKIP_RELEASE)))
386                 xsk_buff_free(*wi->xskp);
387 }
388
389 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
390 {
391         struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
392
393         if (rq->xsk_pool) {
394                 mlx5e_xsk_free_rx_wqe(wi);
395         } else {
396                 mlx5e_free_rx_wqe(rq, wi);
397
398                 /* Avoid a second release of the wqe pages: dealloc is called
399                  * for the same missing wqes on regular RQ flush and on regular
400                  * RQ close. This happens when XSK RQs come into play.
401                  */
402                 for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
403                         wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
404         }
405 }
406
407 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
408 {
409         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
410         int i;
411
412         for (i = 0; i < wqe_bulk; i++) {
413                 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
414                 struct mlx5e_wqe_frag_info *wi;
415
416                 wi = get_frag(rq, j);
417                 /* The page is always put into the Reuse Ring, because there
418                  * is no way to return the page to the userspace when the
419                  * interface goes down.
420                  */
421                 mlx5e_xsk_free_rx_wqe(wi);
422         }
423 }
424
425 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
426 {
427         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
428         int i;
429
430         for (i = 0; i < wqe_bulk; i++) {
431                 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
432                 struct mlx5e_wqe_frag_info *wi;
433
434                 wi = get_frag(rq, j);
435                 mlx5e_free_rx_wqe(rq, wi);
436         }
437 }
438
439 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
440 {
441         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
442         int i;
443
444         for (i = 0; i < wqe_bulk; i++) {
445                 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
446                 struct mlx5e_rx_wqe_cyc *wqe;
447
448                 wqe = mlx5_wq_cyc_get_wqe(wq, j);
449
450                 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
451                         break;
452         }
453
454         return i;
455 }
456
457 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
458 {
459         int remaining = wqe_bulk;
460         int i = 0;
461
462         /* The WQE bulk is split into smaller bulks that are sized
463          * according to the page pool cache refill size to avoid overflowing
464          * the page pool cache due to too many page releases at once.
465          */
466         do {
467                 int refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
468                 int alloc_count;
469
470                 mlx5e_free_rx_wqes(rq, ix + i, refill);
471                 alloc_count = mlx5e_alloc_rx_wqes(rq, ix + i, refill);
472                 i += alloc_count;
473                 if (unlikely(alloc_count != refill))
474                         break;
475
476                 remaining -= refill;
477         } while (remaining);
478
479         return i;
480 }
481
482 static void
483 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
484                                struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
485                                u32 frag_offset, u32 len)
486 {
487         skb_frag_t *frag;
488
489         dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
490
491         dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
492         if (!xdp_buff_has_frags(xdp)) {
493                 /* Init on the first fragment to avoid cold cache access
494                  * when possible.
495                  */
496                 sinfo->nr_frags = 0;
497                 sinfo->xdp_frags_size = 0;
498                 xdp_buff_set_frags_flag(xdp);
499         }
500
501         frag = &sinfo->frags[sinfo->nr_frags++];
502         skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
503
504         if (page_is_pfmemalloc(frag_page->page))
505                 xdp_buff_set_frag_pfmemalloc(xdp);
506         sinfo->xdp_frags_size += len;
507 }
508
509 static inline void
510 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
511                    struct page *page, u32 frag_offset, u32 len,
512                    unsigned int truesize)
513 {
514         dma_addr_t addr = page_pool_get_dma_addr(page);
515
516         dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
517                                 rq->buff.map_dir);
518         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
519                         page, frag_offset, len, truesize);
520 }
521
522 static inline void
523 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
524                       struct page *page, dma_addr_t addr,
525                       int offset_from, int dma_offset, u32 headlen)
526 {
527         const void *from = page_address(page) + offset_from;
528         /* Aligning len to sizeof(long) optimizes memcpy performance */
529         unsigned int len = ALIGN(headlen, sizeof(long));
530
531         dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
532                                 rq->buff.map_dir);
533         skb_copy_to_linear_data(skb, from, len);
534 }
535
536 static void
537 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
538 {
539         bool no_xdp_xmit;
540         int i;
541
542         /* A common case for AF_XDP. */
543         if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
544                 return;
545
546         no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
547
548         if (rq->xsk_pool) {
549                 struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
550
551                 /* The page is always put into the Reuse Ring, because there
552                  * is no way to return the page to userspace when the interface
553                  * goes down.
554                  */
555                 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
556                         if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap))
557                                 xsk_buff_free(xsk_buffs[i]);
558         } else {
559                 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
560                         if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap)) {
561                                 struct mlx5e_frag_page *frag_page;
562
563                                 frag_page = &wi->alloc_units.frag_pages[i];
564                                 mlx5e_page_release_fragmented(rq, frag_page);
565                         }
566                 }
567         }
568 }
569
570 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
571 {
572         struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
573
574         do {
575                 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
576
577                 mlx5_wq_ll_push(wq, next_wqe_index);
578         } while (--n);
579
580         /* ensure wqes are visible to device before updating doorbell record */
581         dma_wmb();
582
583         mlx5_wq_ll_update_db_record(wq);
584 }
585
586 /* This function returns the size of the continuous free space inside a bitmap
587  * that starts from first and no longer than len including circular ones.
588  */
589 static int bitmap_find_window(unsigned long *bitmap, int len,
590                               int bitmap_size, int first)
591 {
592         int next_one, count;
593
594         next_one = find_next_bit(bitmap, bitmap_size, first);
595         if (next_one == bitmap_size) {
596                 if (bitmap_size - first >= len)
597                         return len;
598                 next_one = find_next_bit(bitmap, bitmap_size, 0);
599                 count = next_one + bitmap_size - first;
600         } else {
601                 count = next_one - first;
602         }
603
604         return min(len, count);
605 }
606
607 static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
608                           __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
609 {
610         memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
611         umr_wqe->ctrl.opmod_idx_opcode =
612                 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
613                              MLX5_OPCODE_UMR);
614         umr_wqe->ctrl.umr_mkey = key;
615         umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
616                                             | MLX5E_KLM_UMR_DS_CNT(klm_len));
617         umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
618         umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
619         umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
620         umr_wqe->uctrl.mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
621 }
622
623 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
624                                      struct mlx5e_icosq *sq,
625                                      u16 klm_entries, u16 index)
626 {
627         struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
628         u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
629         u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
630         u16 page_index = shampo->curr_page_index;
631         struct mlx5e_frag_page *frag_page;
632         u64 addr = shampo->last_addr;
633         struct mlx5e_dma_info *dma_info;
634         struct mlx5e_umr_wqe *umr_wqe;
635         int headroom, i;
636
637         headroom = rq->buff.headroom;
638         new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
639         entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
640         wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
641         pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
642         umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
643         build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
644
645         frag_page = &shampo->pages[page_index];
646
647         for (i = 0; i < entries; i++, index++) {
648                 dma_info = &shampo->info[index];
649                 if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
650                                          MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
651                         goto update_klm;
652                 header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
653                         MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
654                 if (!(header_offset & (PAGE_SIZE - 1))) {
655                         page_index = (page_index + 1) & (shampo->hd_per_wq - 1);
656                         frag_page = &shampo->pages[page_index];
657
658                         err = mlx5e_page_alloc_fragmented(rq, frag_page);
659                         if (unlikely(err))
660                                 goto err_unmap;
661
662                         addr = page_pool_get_dma_addr(frag_page->page);
663
664                         dma_info->addr = addr;
665                         dma_info->frag_page = frag_page;
666                 } else {
667                         dma_info->addr = addr + header_offset;
668                         dma_info->frag_page = frag_page;
669                 }
670
671 update_klm:
672                 umr_wqe->inline_klms[i].bcount =
673                         cpu_to_be32(MLX5E_RX_MAX_HEAD);
674                 umr_wqe->inline_klms[i].key    = cpu_to_be32(lkey);
675                 umr_wqe->inline_klms[i].va     =
676                         cpu_to_be64(dma_info->addr + headroom);
677         }
678
679         sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
680                 .wqe_type       = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
681                 .num_wqebbs     = wqe_bbs,
682                 .shampo.len     = new_entries,
683         };
684
685         shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
686         shampo->curr_page_index = page_index;
687         shampo->last_addr = addr;
688         sq->pc += wqe_bbs;
689         sq->doorbell_cseg = &umr_wqe->ctrl;
690
691         return 0;
692
693 err_unmap:
694         while (--i >= 0) {
695                 dma_info = &shampo->info[--index];
696                 if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
697                         dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
698                         mlx5e_page_release_fragmented(rq, dma_info->frag_page);
699                 }
700         }
701         rq->stats->buff_alloc_err++;
702         return err;
703 }
704
705 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
706 {
707         struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
708         u16 klm_entries, num_wqe, index, entries_before;
709         struct mlx5e_icosq *sq = rq->icosq;
710         int i, err, max_klm_entries, len;
711
712         max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
713         klm_entries = bitmap_find_window(shampo->bitmap,
714                                          shampo->hd_per_wqe,
715                                          shampo->hd_per_wq, shampo->pi);
716         if (!klm_entries)
717                 return 0;
718
719         klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
720         index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
721         entries_before = shampo->hd_per_wq - index;
722
723         if (unlikely(entries_before < klm_entries))
724                 num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
725                           DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
726         else
727                 num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
728
729         for (i = 0; i < num_wqe; i++) {
730                 len = (klm_entries > max_klm_entries) ? max_klm_entries :
731                                                         klm_entries;
732                 if (unlikely(index + len > shampo->hd_per_wq))
733                         len = shampo->hd_per_wq - index;
734                 err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
735                 if (unlikely(err))
736                         return err;
737                 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
738                 klm_entries -= len;
739         }
740
741         return 0;
742 }
743
744 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
745 {
746         struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
747         struct mlx5e_icosq *sq = rq->icosq;
748         struct mlx5e_frag_page *frag_page;
749         struct mlx5_wq_cyc *wq = &sq->wq;
750         struct mlx5e_umr_wqe *umr_wqe;
751         u32 offset; /* 17-bit value with MTT. */
752         u16 pi;
753         int err;
754         int i;
755
756         if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
757                 err = mlx5e_alloc_rx_hd_mpwqe(rq);
758                 if (unlikely(err))
759                         goto err;
760         }
761
762         pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
763         umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
764         memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
765
766         frag_page = &wi->alloc_units.frag_pages[0];
767
768         for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
769                 dma_addr_t addr;
770
771                 err = mlx5e_page_alloc_fragmented(rq, frag_page);
772                 if (unlikely(err))
773                         goto err_unmap;
774                 addr = page_pool_get_dma_addr(frag_page->page);
775                 umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
776                         .ptag = cpu_to_be64(addr | MLX5_EN_WR),
777                 };
778         }
779
780         /* Pad if needed, in case the value set to ucseg->xlt_octowords
781          * in mlx5e_build_umr_wqe() needed alignment.
782          */
783         if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
784                 int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
785                         rq->mpwqe.pages_per_wqe;
786
787                 memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
788                        sizeof(*umr_wqe->inline_mtts) * pad);
789         }
790
791         bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
792         wi->consumed_strides = 0;
793
794         umr_wqe->ctrl.opmod_idx_opcode =
795                 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
796                             MLX5_OPCODE_UMR);
797
798         offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
799         umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
800
801         sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
802                 .wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
803                 .num_wqebbs = rq->mpwqe.umr_wqebbs,
804                 .umr.rq     = rq,
805         };
806
807         sq->pc += rq->mpwqe.umr_wqebbs;
808
809         sq->doorbell_cseg = &umr_wqe->ctrl;
810
811         return 0;
812
813 err_unmap:
814         while (--i >= 0) {
815                 frag_page--;
816                 mlx5e_page_release_fragmented(rq, frag_page);
817         }
818
819 err:
820         rq->stats->buff_alloc_err++;
821
822         return err;
823 }
824
825 /* This function is responsible to dealloc SHAMPO header buffer.
826  * close == true specifies that we are in the middle of closing RQ operation so
827  * we go over all the entries and if they are not in use we free them,
828  * otherwise we only go over a specific range inside the header buffer that are
829  * not in use.
830  */
831 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
832 {
833         struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
834         struct mlx5e_frag_page *deleted_page = NULL;
835         int hd_per_wq = shampo->hd_per_wq;
836         struct mlx5e_dma_info *hd_info;
837         int i, index = start;
838
839         for (i = 0; i < len; i++, index++) {
840                 if (index == hd_per_wq)
841                         index = 0;
842
843                 if (close && !test_bit(index, shampo->bitmap))
844                         continue;
845
846                 hd_info = &shampo->info[index];
847                 hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
848                 if (hd_info->frag_page && hd_info->frag_page != deleted_page) {
849                         deleted_page = hd_info->frag_page;
850                         mlx5e_page_release_fragmented(rq, hd_info->frag_page);
851                 }
852
853                 hd_info->frag_page = NULL;
854         }
855
856         if (start + len > hd_per_wq) {
857                 len -= hd_per_wq - start;
858                 bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
859                 start = 0;
860         }
861
862         bitmap_clear(shampo->bitmap, start, len);
863 }
864
865 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
866 {
867         struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
868         /* This function is called on rq/netdev close. */
869         mlx5e_free_rx_mpwqe(rq, wi);
870
871         /* Avoid a second release of the wqe pages: dealloc is called also
872          * for missing wqes on an already flushed RQ.
873          */
874         bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
875 }
876
877 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
878 {
879         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
880         int wqe_bulk, count;
881         bool busy = false;
882         u16 head;
883
884         if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
885                 return false;
886
887         if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
888                 return false;
889
890         if (rq->page_pool)
891                 page_pool_nid_changed(rq->page_pool, numa_mem_id());
892
893         wqe_bulk = mlx5_wq_cyc_missing(wq);
894         head = mlx5_wq_cyc_get_head(wq);
895
896         /* Don't allow any newly allocated WQEs to share the same page with old
897          * WQEs that aren't completed yet. Stop earlier.
898          */
899         wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
900
901         if (!rq->xsk_pool) {
902                 count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
903         } else if (likely(!rq->xsk_pool->dma_need_sync)) {
904                 mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
905                 count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
906         } else {
907                 mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
908                 /* If dma_need_sync is true, it's more efficient to call
909                  * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
910                  * because the latter does the same check and returns only one
911                  * frame.
912                  */
913                 count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
914         }
915
916         mlx5_wq_cyc_push_n(wq, count);
917         if (unlikely(count != wqe_bulk)) {
918                 rq->stats->buff_alloc_err++;
919                 busy = true;
920         }
921
922         /* ensure wqes are visible to device before updating doorbell record */
923         dma_wmb();
924
925         mlx5_wq_cyc_update_db_record(wq);
926
927         return busy;
928 }
929
930 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
931 {
932         u16 sqcc;
933
934         sqcc = sq->cc;
935
936         while (sqcc != sq->pc) {
937                 struct mlx5e_icosq_wqe_info *wi;
938                 u16 ci;
939
940                 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
941                 wi = &sq->db.wqe_info[ci];
942                 sqcc += wi->num_wqebbs;
943 #ifdef CONFIG_MLX5_EN_TLS
944                 switch (wi->wqe_type) {
945                 case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
946                         mlx5e_ktls_handle_ctx_completion(wi);
947                         break;
948                 case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
949                         mlx5e_ktls_handle_get_psv_completion(wi, sq);
950                         break;
951                 }
952 #endif
953         }
954         sq->cc = sqcc;
955 }
956
957 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
958                                        struct mlx5e_icosq *sq)
959 {
960         struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
961         struct mlx5e_shampo_hd *shampo;
962         /* assume 1:1 relationship between RQ and icosq */
963         struct mlx5e_rq *rq = &c->rq;
964         int end, from, len = umr.len;
965
966         shampo = rq->mpwqe.shampo;
967         end = shampo->hd_per_wq;
968         from = shampo->ci;
969         if (from + len > shampo->hd_per_wq) {
970                 len -= end - from;
971                 bitmap_set(shampo->bitmap, from, end - from);
972                 from = 0;
973         }
974
975         bitmap_set(shampo->bitmap, from, len);
976         shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
977 }
978
979 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
980 {
981         struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
982         struct mlx5_cqe64 *cqe;
983         u16 sqcc;
984         int i;
985
986         if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
987                 return 0;
988
989         cqe = mlx5_cqwq_get_cqe(&cq->wq);
990         if (likely(!cqe))
991                 return 0;
992
993         /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
994          * otherwise a cq overrun may occur
995          */
996         sqcc = sq->cc;
997
998         i = 0;
999         do {
1000                 u16 wqe_counter;
1001                 bool last_wqe;
1002
1003                 mlx5_cqwq_pop(&cq->wq);
1004
1005                 wqe_counter = be16_to_cpu(cqe->wqe_counter);
1006
1007                 do {
1008                         struct mlx5e_icosq_wqe_info *wi;
1009                         u16 ci;
1010
1011                         last_wqe = (sqcc == wqe_counter);
1012
1013                         ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
1014                         wi = &sq->db.wqe_info[ci];
1015                         sqcc += wi->num_wqebbs;
1016
1017                         if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
1018                                 netdev_WARN_ONCE(cq->netdev,
1019                                                  "Bad OP in ICOSQ CQE: 0x%x\n",
1020                                                  get_cqe_opcode(cqe));
1021                                 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
1022                                                      (struct mlx5_err_cqe *)cqe);
1023                                 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
1024                                 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
1025                                         queue_work(cq->priv->wq, &sq->recover_work);
1026                                 break;
1027                         }
1028
1029                         switch (wi->wqe_type) {
1030                         case MLX5E_ICOSQ_WQE_UMR_RX:
1031                                 wi->umr.rq->mpwqe.umr_completed++;
1032                                 break;
1033                         case MLX5E_ICOSQ_WQE_NOP:
1034                                 break;
1035                         case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
1036                                 mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
1037                                 break;
1038 #ifdef CONFIG_MLX5_EN_TLS
1039                         case MLX5E_ICOSQ_WQE_UMR_TLS:
1040                                 break;
1041                         case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
1042                                 mlx5e_ktls_handle_ctx_completion(wi);
1043                                 break;
1044                         case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
1045                                 mlx5e_ktls_handle_get_psv_completion(wi, sq);
1046                                 break;
1047 #endif
1048                         default:
1049                                 netdev_WARN_ONCE(cq->netdev,
1050                                                  "Bad WQE type in ICOSQ WQE info: 0x%x\n",
1051                                                  wi->wqe_type);
1052                         }
1053                 } while (!last_wqe);
1054         } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1055
1056         sq->cc = sqcc;
1057
1058         mlx5_cqwq_update_db_record(&cq->wq);
1059
1060         return i;
1061 }
1062
1063 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
1064 {
1065         struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1066         u8  umr_completed = rq->mpwqe.umr_completed;
1067         struct mlx5e_icosq *sq = rq->icosq;
1068         int alloc_err = 0;
1069         u8  missing, i;
1070         u16 head;
1071
1072         if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1073                 return false;
1074
1075         if (umr_completed) {
1076                 mlx5e_post_rx_mpwqe(rq, umr_completed);
1077                 rq->mpwqe.umr_in_progress -= umr_completed;
1078                 rq->mpwqe.umr_completed = 0;
1079         }
1080
1081         missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
1082
1083         if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
1084                 rq->stats->congst_umr++;
1085
1086         if (likely(missing < rq->mpwqe.min_wqe_bulk))
1087                 return false;
1088
1089         if (rq->page_pool)
1090                 page_pool_nid_changed(rq->page_pool, numa_mem_id());
1091
1092         head = rq->mpwqe.actual_wq_head;
1093         i = missing;
1094         do {
1095                 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
1096
1097                 /* Deferred free for better page pool cache usage. */
1098                 mlx5e_free_rx_mpwqe(rq, wi);
1099
1100                 alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
1101                                            mlx5e_alloc_rx_mpwqe(rq, head);
1102
1103                 if (unlikely(alloc_err))
1104                         break;
1105                 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
1106         } while (--i);
1107
1108         rq->mpwqe.umr_last_bulk    = missing - i;
1109         if (sq->doorbell_cseg) {
1110                 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
1111                 sq->doorbell_cseg = NULL;
1112         }
1113
1114         rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
1115         rq->mpwqe.actual_wq_head   = head;
1116
1117         /* If XSK Fill Ring doesn't have enough frames, report the error, so
1118          * that one of the actions can be performed:
1119          * 1. If need_wakeup is used, signal that the application has to kick
1120          * the driver when it refills the Fill Ring.
1121          * 2. Otherwise, busy poll by rescheduling the NAPI poll.
1122          */
1123         if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
1124                 return true;
1125
1126         return false;
1127 }
1128
1129 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
1130 {
1131         u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
1132         u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
1133                          (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
1134
1135         tcp->check                      = 0;
1136         tcp->psh                        = get_cqe_lro_tcppsh(cqe);
1137
1138         if (tcp_ack) {
1139                 tcp->ack                = 1;
1140                 tcp->ack_seq            = cqe->lro.ack_seq_num;
1141                 tcp->window             = cqe->lro.tcp_win;
1142         }
1143 }
1144
1145 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
1146                                  u32 cqe_bcnt)
1147 {
1148         struct ethhdr   *eth = (struct ethhdr *)(skb->data);
1149         struct tcphdr   *tcp;
1150         int network_depth = 0;
1151         __wsum check;
1152         __be16 proto;
1153         u16 tot_len;
1154         void *ip_p;
1155
1156         proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1157
1158         tot_len = cqe_bcnt - network_depth;
1159         ip_p = skb->data + network_depth;
1160
1161         if (proto == htons(ETH_P_IP)) {
1162                 struct iphdr *ipv4 = ip_p;
1163
1164                 tcp = ip_p + sizeof(struct iphdr);
1165                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1166
1167                 ipv4->ttl               = cqe->lro.min_ttl;
1168                 ipv4->tot_len           = cpu_to_be16(tot_len);
1169                 ipv4->check             = 0;
1170                 ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
1171                                                        ipv4->ihl);
1172
1173                 mlx5e_lro_update_tcp_hdr(cqe, tcp);
1174                 check = csum_partial(tcp, tcp->doff * 4,
1175                                      csum_unfold((__force __sum16)cqe->check_sum));
1176                 /* Almost done, don't forget the pseudo header */
1177                 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
1178                                                tot_len - sizeof(struct iphdr),
1179                                                IPPROTO_TCP, check);
1180         } else {
1181                 u16 payload_len = tot_len - sizeof(struct ipv6hdr);
1182                 struct ipv6hdr *ipv6 = ip_p;
1183
1184                 tcp = ip_p + sizeof(struct ipv6hdr);
1185                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1186
1187                 ipv6->hop_limit         = cqe->lro.min_ttl;
1188                 ipv6->payload_len       = cpu_to_be16(payload_len);
1189
1190                 mlx5e_lro_update_tcp_hdr(cqe, tcp);
1191                 check = csum_partial(tcp, tcp->doff * 4,
1192                                      csum_unfold((__force __sum16)cqe->check_sum));
1193                 /* Almost done, don't forget the pseudo header */
1194                 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
1195                                              IPPROTO_TCP, check);
1196         }
1197 }
1198
1199 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
1200 {
1201         struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
1202         u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
1203
1204         return page_address(last_head->frag_page->page) + head_offset;
1205 }
1206
1207 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
1208 {
1209         int udp_off = rq->hw_gro_data->fk.control.thoff;
1210         struct sk_buff *skb = rq->hw_gro_data->skb;
1211         struct udphdr *uh;
1212
1213         uh = (struct udphdr *)(skb->data + udp_off);
1214         uh->len = htons(skb->len - udp_off);
1215
1216         if (uh->check)
1217                 uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
1218                                           ipv4->daddr, 0);
1219
1220         skb->csum_start = (unsigned char *)uh - skb->head;
1221         skb->csum_offset = offsetof(struct udphdr, check);
1222
1223         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1224 }
1225
1226 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
1227 {
1228         int udp_off = rq->hw_gro_data->fk.control.thoff;
1229         struct sk_buff *skb = rq->hw_gro_data->skb;
1230         struct udphdr *uh;
1231
1232         uh = (struct udphdr *)(skb->data + udp_off);
1233         uh->len = htons(skb->len - udp_off);
1234
1235         if (uh->check)
1236                 uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
1237                                           &ipv6->daddr, 0);
1238
1239         skb->csum_start = (unsigned char *)uh - skb->head;
1240         skb->csum_offset = offsetof(struct udphdr, check);
1241
1242         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1243 }
1244
1245 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1246                                               struct tcphdr *skb_tcp_hd)
1247 {
1248         u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
1249         struct tcphdr *last_tcp_hd;
1250         void *last_hd_addr;
1251
1252         last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
1253         last_tcp_hd =  last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
1254         tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
1255 }
1256
1257 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
1258                                              struct mlx5_cqe64 *cqe, bool match)
1259 {
1260         int tcp_off = rq->hw_gro_data->fk.control.thoff;
1261         struct sk_buff *skb = rq->hw_gro_data->skb;
1262         struct tcphdr *tcp;
1263
1264         tcp = (struct tcphdr *)(skb->data + tcp_off);
1265         if (match)
1266                 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1267
1268         tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
1269                                    ipv4->daddr, 0);
1270         skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
1271         if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
1272                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
1273
1274         skb->csum_start = (unsigned char *)tcp - skb->head;
1275         skb->csum_offset = offsetof(struct tcphdr, check);
1276
1277         if (tcp->cwr)
1278                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1279 }
1280
1281 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
1282                                              struct mlx5_cqe64 *cqe, bool match)
1283 {
1284         int tcp_off = rq->hw_gro_data->fk.control.thoff;
1285         struct sk_buff *skb = rq->hw_gro_data->skb;
1286         struct tcphdr *tcp;
1287
1288         tcp = (struct tcphdr *)(skb->data + tcp_off);
1289         if (match)
1290                 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1291
1292         tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
1293                                    &ipv6->daddr, 0);
1294         skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1295         skb->csum_start = (unsigned char *)tcp - skb->head;
1296         skb->csum_offset = offsetof(struct tcphdr, check);
1297
1298         if (tcp->cwr)
1299                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1300 }
1301
1302 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
1303 {
1304         bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
1305         struct sk_buff *skb = rq->hw_gro_data->skb;
1306
1307         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
1308         skb->ip_summed = CHECKSUM_PARTIAL;
1309
1310         if (is_ipv4) {
1311                 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
1312                 struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
1313                 __be16 newlen = htons(skb->len - nhoff);
1314
1315                 csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
1316                 ipv4->tot_len = newlen;
1317
1318                 if (ipv4->protocol == IPPROTO_TCP)
1319                         mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
1320                 else
1321                         mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
1322         } else {
1323                 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
1324                 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
1325
1326                 ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
1327
1328                 if (ipv6->nexthdr == IPPROTO_TCP)
1329                         mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
1330                 else
1331                         mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
1332         }
1333 }
1334
1335 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
1336                                       struct sk_buff *skb)
1337 {
1338         u8 cht = cqe->rss_hash_type;
1339         int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
1340                  (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
1341                                             PKT_HASH_TYPE_NONE;
1342         skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
1343 }
1344
1345 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
1346                                         __be16 *proto)
1347 {
1348         *proto = ((struct ethhdr *)skb->data)->h_proto;
1349         *proto = __vlan_get_protocol(skb, *proto, network_depth);
1350
1351         if (*proto == htons(ETH_P_IP))
1352                 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
1353
1354         if (*proto == htons(ETH_P_IPV6))
1355                 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
1356
1357         return false;
1358 }
1359
1360 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
1361 {
1362         int network_depth = 0;
1363         __be16 proto;
1364         void *ip;
1365         int rc;
1366
1367         if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
1368                 return;
1369
1370         ip = skb->data + network_depth;
1371         rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
1372                                          IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
1373
1374         rq->stats->ecn_mark += !!rc;
1375 }
1376
1377 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
1378 {
1379         void *ip_p = skb->data + network_depth;
1380
1381         return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
1382                                             ((struct ipv6hdr *)ip_p)->nexthdr;
1383 }
1384
1385 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1386
1387 #define MAX_PADDING 8
1388
1389 static void
1390 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
1391                        struct mlx5e_rq_stats *stats)
1392 {
1393         stats->csum_complete_tail_slow++;
1394         skb->csum = csum_block_add(skb->csum,
1395                                    skb_checksum(skb, offset, len, 0),
1396                                    offset);
1397 }
1398
1399 static void
1400 tail_padding_csum(struct sk_buff *skb, int offset,
1401                   struct mlx5e_rq_stats *stats)
1402 {
1403         u8 tail_padding[MAX_PADDING];
1404         int len = skb->len - offset;
1405         void *tail;
1406
1407         if (unlikely(len > MAX_PADDING)) {
1408                 tail_padding_csum_slow(skb, offset, len, stats);
1409                 return;
1410         }
1411
1412         tail = skb_header_pointer(skb, offset, len, tail_padding);
1413         if (unlikely(!tail)) {
1414                 tail_padding_csum_slow(skb, offset, len, stats);
1415                 return;
1416         }
1417
1418         stats->csum_complete_tail++;
1419         skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
1420 }
1421
1422 static void
1423 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
1424                      struct mlx5e_rq_stats *stats)
1425 {
1426         struct ipv6hdr *ip6;
1427         struct iphdr   *ip4;
1428         int pkt_len;
1429
1430         /* Fixup vlan headers, if any */
1431         if (network_depth > ETH_HLEN)
1432                 /* CQE csum is calculated from the IP header and does
1433                  * not cover VLAN headers (if present). This will add
1434                  * the checksum manually.
1435                  */
1436                 skb->csum = csum_partial(skb->data + ETH_HLEN,
1437                                          network_depth - ETH_HLEN,
1438                                          skb->csum);
1439
1440         /* Fixup tail padding, if any */
1441         switch (proto) {
1442         case htons(ETH_P_IP):
1443                 ip4 = (struct iphdr *)(skb->data + network_depth);
1444                 pkt_len = network_depth + ntohs(ip4->tot_len);
1445                 break;
1446         case htons(ETH_P_IPV6):
1447                 ip6 = (struct ipv6hdr *)(skb->data + network_depth);
1448                 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
1449                 break;
1450         default:
1451                 return;
1452         }
1453
1454         if (likely(pkt_len >= skb->len))
1455                 return;
1456
1457         tail_padding_csum(skb, pkt_len, stats);
1458 }
1459
1460 static inline void mlx5e_handle_csum(struct net_device *netdev,
1461                                      struct mlx5_cqe64 *cqe,
1462                                      struct mlx5e_rq *rq,
1463                                      struct sk_buff *skb,
1464                                      bool   lro)
1465 {
1466         struct mlx5e_rq_stats *stats = rq->stats;
1467         int network_depth = 0;
1468         __be16 proto;
1469
1470         if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
1471                 goto csum_none;
1472
1473         if (lro) {
1474                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1475                 stats->csum_unnecessary++;
1476                 return;
1477         }
1478
1479         /* True when explicitly set via priv flag, or XDP prog is loaded */
1480         if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
1481             get_cqe_tls_offload(cqe))
1482                 goto csum_unnecessary;
1483
1484         /* CQE csum doesn't cover padding octets in short ethernet
1485          * frames. And the pad field is appended prior to calculating
1486          * and appending the FCS field.
1487          *
1488          * Detecting these padded frames requires to verify and parse
1489          * IP headers, so we simply force all those small frames to be
1490          * CHECKSUM_UNNECESSARY even if they are not padded.
1491          */
1492         if (short_frame(skb->len))
1493                 goto csum_unnecessary;
1494
1495         if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1496                 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1497                         goto csum_unnecessary;
1498
1499                 stats->csum_complete++;
1500                 skb->ip_summed = CHECKSUM_COMPLETE;
1501                 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1502
1503                 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1504                         return; /* CQE csum covers all received bytes */
1505
1506                 /* csum might need some fixups ...*/
1507                 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1508                 return;
1509         }
1510
1511 csum_unnecessary:
1512         if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1513                    (cqe->hds_ip_ext & CQE_L4_OK))) {
1514                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1515                 if (cqe_is_tunneled(cqe)) {
1516                         skb->csum_level = 1;
1517                         skb->encapsulation = 1;
1518                         stats->csum_unnecessary_inner++;
1519                         return;
1520                 }
1521                 stats->csum_unnecessary++;
1522                 return;
1523         }
1524 csum_none:
1525         skb->ip_summed = CHECKSUM_NONE;
1526         stats->csum_none++;
1527 }
1528
1529 #define MLX5E_CE_BIT_MASK 0x80
1530
1531 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1532                                       u32 cqe_bcnt,
1533                                       struct mlx5e_rq *rq,
1534                                       struct sk_buff *skb)
1535 {
1536         u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
1537         struct mlx5e_rq_stats *stats = rq->stats;
1538         struct net_device *netdev = rq->netdev;
1539
1540         skb->mac_len = ETH_HLEN;
1541
1542         if (unlikely(get_cqe_tls_offload(cqe)))
1543                 mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1544
1545         if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1546                 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
1547                                                   be32_to_cpu(cqe->ft_metadata));
1548
1549         if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
1550                 mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
1551
1552         if (lro_num_seg > 1) {
1553                 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1554                 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
1555                 /* Subtract one since we already counted this as one
1556                  * "regular" packet in mlx5e_complete_rx_cqe()
1557                  */
1558                 stats->packets += lro_num_seg - 1;
1559                 stats->lro_packets++;
1560                 stats->lro_bytes += cqe_bcnt;
1561         }
1562
1563         if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1564                 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1565                                                                   rq->clock, get_cqe_ts(cqe));
1566         skb_record_rx_queue(skb, rq->ix);
1567
1568         if (likely(netdev->features & NETIF_F_RXHASH))
1569                 mlx5e_skb_set_hash(cqe, skb);
1570
1571         if (cqe_has_vlan(cqe)) {
1572                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1573                                        be16_to_cpu(cqe->vlan_info));
1574                 stats->removed_vlan_packets++;
1575         }
1576
1577         skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1578
1579         mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1580         /* checking CE bit in cqe - MSB in ml_path field */
1581         if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1582                 mlx5e_enable_ecn(rq, skb);
1583
1584         skb->protocol = eth_type_trans(skb, netdev);
1585
1586         if (unlikely(mlx5e_skb_is_multicast(skb)))
1587                 stats->mcast_packets++;
1588 }
1589
1590 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
1591                                          struct mlx5_cqe64 *cqe,
1592                                          u32 cqe_bcnt,
1593                                          struct sk_buff *skb)
1594 {
1595         struct mlx5e_rq_stats *stats = rq->stats;
1596
1597         stats->packets++;
1598         stats->gro_packets++;
1599         stats->bytes += cqe_bcnt;
1600         stats->gro_bytes += cqe_bcnt;
1601         if (NAPI_GRO_CB(skb)->count != 1)
1602                 return;
1603         mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1604         skb_reset_network_header(skb);
1605         if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
1606                 napi_gro_receive(rq->cq.napi, skb);
1607                 rq->hw_gro_data->skb = NULL;
1608         }
1609 }
1610
1611 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1612                                          struct mlx5_cqe64 *cqe,
1613                                          u32 cqe_bcnt,
1614                                          struct sk_buff *skb)
1615 {
1616         struct mlx5e_rq_stats *stats = rq->stats;
1617
1618         stats->packets++;
1619         stats->bytes += cqe_bcnt;
1620         mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1621 }
1622
1623 static inline
1624 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1625                                        u32 frag_size, u16 headroom,
1626                                        u32 cqe_bcnt, u32 metasize)
1627 {
1628         struct sk_buff *skb = napi_build_skb(va, frag_size);
1629
1630         if (unlikely(!skb)) {
1631                 rq->stats->buff_alloc_err++;
1632                 return NULL;
1633         }
1634
1635         skb_reserve(skb, headroom);
1636         skb_put(skb, cqe_bcnt);
1637
1638         if (metasize)
1639                 skb_metadata_set(skb, metasize);
1640
1641         return skb;
1642 }
1643
1644 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1645                              void *va, u16 headroom, u32 frame_sz, u32 len,
1646                              struct mlx5e_xdp_buff *mxbuf)
1647 {
1648         xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
1649         xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
1650         mxbuf->cqe = cqe;
1651         mxbuf->rq = rq;
1652 }
1653
1654 static struct sk_buff *
1655 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1656                           struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1657 {
1658         struct mlx5e_frag_page *frag_page = wi->frag_page;
1659         u16 rx_headroom = rq->buff.headroom;
1660         struct bpf_prog *prog;
1661         struct sk_buff *skb;
1662         u32 metasize = 0;
1663         void *va, *data;
1664         dma_addr_t addr;
1665         u32 frag_size;
1666
1667         va             = page_address(frag_page->page) + wi->offset;
1668         data           = va + rx_headroom;
1669         frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1670
1671         addr = page_pool_get_dma_addr(frag_page->page);
1672         dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1673                                       frag_size, rq->buff.map_dir);
1674         net_prefetch(data);
1675
1676         prog = rcu_dereference(rq->xdp_prog);
1677         if (prog) {
1678                 struct mlx5e_xdp_buff mxbuf;
1679
1680                 net_prefetchw(va); /* xdp_frame data area */
1681                 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1682                                  cqe_bcnt, &mxbuf);
1683                 if (mlx5e_xdp_handle(rq, prog, &mxbuf))
1684                         return NULL; /* page/packet was consumed by XDP */
1685
1686                 rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
1687                 metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
1688                 cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
1689         }
1690         frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1691         skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
1692         if (unlikely(!skb))
1693                 return NULL;
1694
1695         /* queue up for recycling/reuse */
1696         skb_mark_for_recycle(skb);
1697         frag_page->frags++;
1698
1699         return skb;
1700 }
1701
1702 static struct sk_buff *
1703 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1704                              struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1705 {
1706         struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1707         struct mlx5e_wqe_frag_info *head_wi = wi;
1708         u16 rx_headroom = rq->buff.headroom;
1709         struct mlx5e_frag_page *frag_page;
1710         struct skb_shared_info *sinfo;
1711         struct mlx5e_xdp_buff mxbuf;
1712         u32 frag_consumed_bytes;
1713         struct bpf_prog *prog;
1714         struct sk_buff *skb;
1715         dma_addr_t addr;
1716         u32 truesize;
1717         void *va;
1718
1719         frag_page = wi->frag_page;
1720
1721         va = page_address(frag_page->page) + wi->offset;
1722         frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1723
1724         addr = page_pool_get_dma_addr(frag_page->page);
1725         dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1726                                       rq->buff.frame0_sz, rq->buff.map_dir);
1727         net_prefetchw(va); /* xdp_frame data area */
1728         net_prefetch(va + rx_headroom);
1729
1730         mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1731                          frag_consumed_bytes, &mxbuf);
1732         sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
1733         truesize = 0;
1734
1735         cqe_bcnt -= frag_consumed_bytes;
1736         frag_info++;
1737         wi++;
1738
1739         while (cqe_bcnt) {
1740                 frag_page = wi->frag_page;
1741
1742                 frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1743
1744                 mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
1745                                                wi->offset, frag_consumed_bytes);
1746                 truesize += frag_info->frag_stride;
1747
1748                 cqe_bcnt -= frag_consumed_bytes;
1749                 frag_info++;
1750                 wi++;
1751         }
1752
1753         prog = rcu_dereference(rq->xdp_prog);
1754         if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
1755                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1756                         struct mlx5e_wqe_frag_info *pwi;
1757
1758                         for (pwi = head_wi; pwi < wi; pwi++)
1759                                 pwi->frag_page->frags++;
1760                 }
1761                 return NULL; /* page/packet was consumed by XDP */
1762         }
1763
1764         skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
1765                                      mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
1766                                      mxbuf.xdp.data_end - mxbuf.xdp.data,
1767                                      mxbuf.xdp.data - mxbuf.xdp.data_meta);
1768         if (unlikely(!skb))
1769                 return NULL;
1770
1771         skb_mark_for_recycle(skb);
1772         head_wi->frag_page->frags++;
1773
1774         if (xdp_buff_has_frags(&mxbuf.xdp)) {
1775                 /* sinfo->nr_frags is reset by build_skb, calculate again. */
1776                 xdp_update_skb_shared_info(skb, wi - head_wi - 1,
1777                                            sinfo->xdp_frags_size, truesize,
1778                                            xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
1779
1780                 for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
1781                         pwi->frag_page->frags++;
1782         }
1783
1784         return skb;
1785 }
1786
1787 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1788 {
1789         struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1790         struct mlx5e_priv *priv = rq->priv;
1791
1792         if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1793             !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1794                 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1795                 queue_work(priv->wq, &rq->recover_work);
1796         }
1797 }
1798
1799 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1800 {
1801         trigger_report(rq, cqe);
1802         rq->stats->wqe_err++;
1803 }
1804
1805 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1806 {
1807         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1808         struct mlx5e_wqe_frag_info *wi;
1809         struct sk_buff *skb;
1810         u32 cqe_bcnt;
1811         u16 ci;
1812
1813         ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1814         wi       = get_frag(rq, ci);
1815         cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1816
1817         if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1818                 mlx5e_handle_rx_err_cqe(rq, cqe);
1819                 goto wq_cyc_pop;
1820         }
1821
1822         skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
1823                               mlx5e_skb_from_cqe_linear,
1824                               mlx5e_skb_from_cqe_nonlinear,
1825                               mlx5e_xsk_skb_from_cqe_linear,
1826                               rq, wi, cqe, cqe_bcnt);
1827         if (!skb) {
1828                 /* probably for XDP */
1829                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1830                         wi->frag_page->frags++;
1831                 goto wq_cyc_pop;
1832         }
1833
1834         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1835
1836         if (mlx5e_cqe_regb_chain(cqe))
1837                 if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
1838                         dev_kfree_skb_any(skb);
1839                         goto wq_cyc_pop;
1840                 }
1841
1842         napi_gro_receive(rq->cq.napi, skb);
1843
1844 wq_cyc_pop:
1845         mlx5_wq_cyc_pop(wq);
1846 }
1847
1848 #ifdef CONFIG_MLX5_ESWITCH
1849 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1850 {
1851         struct net_device *netdev = rq->netdev;
1852         struct mlx5e_priv *priv = netdev_priv(netdev);
1853         struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1854         struct mlx5_eswitch_rep *rep = rpriv->rep;
1855         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1856         struct mlx5e_wqe_frag_info *wi;
1857         struct sk_buff *skb;
1858         u32 cqe_bcnt;
1859         u16 ci;
1860
1861         ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1862         wi       = get_frag(rq, ci);
1863         cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1864
1865         if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1866                 mlx5e_handle_rx_err_cqe(rq, cqe);
1867                 goto wq_cyc_pop;
1868         }
1869
1870         skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1871                               mlx5e_skb_from_cqe_linear,
1872                               mlx5e_skb_from_cqe_nonlinear,
1873                               rq, wi, cqe, cqe_bcnt);
1874         if (!skb) {
1875                 /* probably for XDP */
1876                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1877                         wi->frag_page->frags++;
1878                 goto wq_cyc_pop;
1879         }
1880
1881         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1882
1883         if (rep->vlan && skb_vlan_tag_present(skb))
1884                 skb_vlan_pop(skb);
1885
1886         mlx5e_rep_tc_receive(cqe, rq, skb);
1887
1888 wq_cyc_pop:
1889         mlx5_wq_cyc_pop(wq);
1890 }
1891
1892 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1893 {
1894         u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1895         u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1896         struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
1897         u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1898         u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1899         u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
1900         u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
1901         struct mlx5e_rx_wqe_ll *wqe;
1902         struct mlx5_wq_ll *wq;
1903         struct sk_buff *skb;
1904         u16 cqe_bcnt;
1905
1906         wi->consumed_strides += cstrides;
1907
1908         if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1909                 mlx5e_handle_rx_err_cqe(rq, cqe);
1910                 goto mpwrq_cqe_out;
1911         }
1912
1913         if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1914                 struct mlx5e_rq_stats *stats = rq->stats;
1915
1916                 stats->mpwqe_filler_cqes++;
1917                 stats->mpwqe_filler_strides += cstrides;
1918                 goto mpwrq_cqe_out;
1919         }
1920
1921         cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1922
1923         skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1924                               mlx5e_skb_from_cqe_mpwrq_linear,
1925                               mlx5e_skb_from_cqe_mpwrq_nonlinear,
1926                               rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
1927         if (!skb)
1928                 goto mpwrq_cqe_out;
1929
1930         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1931
1932         mlx5e_rep_tc_receive(cqe, rq, skb);
1933
1934 mpwrq_cqe_out:
1935         if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1936                 return;
1937
1938         wq  = &rq->mpwqe.wq;
1939         wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1940         mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1941 }
1942
1943 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1944         .handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1945         .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1946 };
1947 #endif
1948
1949 static void
1950 mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
1951                     struct mlx5e_frag_page *frag_page,
1952                     u32 data_bcnt, u32 data_offset)
1953 {
1954         net_prefetchw(skb->data);
1955
1956         while (data_bcnt) {
1957                 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1958                 u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
1959                 unsigned int truesize;
1960
1961                 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1962                         truesize = pg_consumed_bytes;
1963                 else
1964                         truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1965
1966                 frag_page->frags++;
1967                 mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
1968                                    pg_consumed_bytes, truesize);
1969
1970                 data_bcnt -= pg_consumed_bytes;
1971                 data_offset = 0;
1972                 frag_page++;
1973         }
1974 }
1975
1976 static struct sk_buff *
1977 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1978                                    struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
1979                                    u32 page_idx)
1980 {
1981         struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
1982         u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1983         struct mlx5e_frag_page *head_page = frag_page;
1984         u32 frag_offset    = head_offset;
1985         u32 byte_cnt       = cqe_bcnt;
1986         struct skb_shared_info *sinfo;
1987         struct mlx5e_xdp_buff mxbuf;
1988         unsigned int truesize = 0;
1989         struct bpf_prog *prog;
1990         struct sk_buff *skb;
1991         u32 linear_frame_sz;
1992         u16 linear_data_len;
1993         u16 linear_hr;
1994         void *va;
1995
1996         prog = rcu_dereference(rq->xdp_prog);
1997
1998         if (prog) {
1999                 /* area for bpf_xdp_[store|load]_bytes */
2000                 net_prefetchw(page_address(frag_page->page) + frag_offset);
2001                 if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
2002                         rq->stats->buff_alloc_err++;
2003                         return NULL;
2004                 }
2005                 va = page_address(wi->linear_page.page);
2006                 net_prefetchw(va); /* xdp_frame data area */
2007                 linear_hr = XDP_PACKET_HEADROOM;
2008                 linear_data_len = 0;
2009                 linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
2010         } else {
2011                 skb = napi_alloc_skb(rq->cq.napi,
2012                                      ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
2013                 if (unlikely(!skb)) {
2014                         rq->stats->buff_alloc_err++;
2015                         return NULL;
2016                 }
2017                 skb_mark_for_recycle(skb);
2018                 va = skb->head;
2019                 net_prefetchw(va); /* xdp_frame data area */
2020                 net_prefetchw(skb->data);
2021
2022                 frag_offset += headlen;
2023                 byte_cnt -= headlen;
2024                 linear_hr = skb_headroom(skb);
2025                 linear_data_len = headlen;
2026                 linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
2027                 if (unlikely(frag_offset >= PAGE_SIZE)) {
2028                         frag_page++;
2029                         frag_offset -= PAGE_SIZE;
2030                 }
2031         }
2032
2033         mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
2034
2035         sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
2036
2037         while (byte_cnt) {
2038                 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2039                 u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
2040
2041                 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
2042                         truesize += pg_consumed_bytes;
2043                 else
2044                         truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
2045
2046                 mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
2047                                                pg_consumed_bytes);
2048                 byte_cnt -= pg_consumed_bytes;
2049                 frag_offset = 0;
2050                 frag_page++;
2051         }
2052
2053         if (prog) {
2054                 if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
2055                         if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
2056                                 struct mlx5e_frag_page *pfp;
2057
2058                                 for (pfp = head_page; pfp < frag_page; pfp++)
2059                                         pfp->frags++;
2060
2061                                 wi->linear_page.frags++;
2062                         }
2063                         mlx5e_page_release_fragmented(rq, &wi->linear_page);
2064                         return NULL; /* page/packet was consumed by XDP */
2065                 }
2066
2067                 skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
2068                                              linear_frame_sz,
2069                                              mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
2070                                              mxbuf.xdp.data - mxbuf.xdp.data_meta);
2071                 if (unlikely(!skb)) {
2072                         mlx5e_page_release_fragmented(rq, &wi->linear_page);
2073                         return NULL;
2074                 }
2075
2076                 skb_mark_for_recycle(skb);
2077                 wi->linear_page.frags++;
2078                 mlx5e_page_release_fragmented(rq, &wi->linear_page);
2079
2080                 if (xdp_buff_has_frags(&mxbuf.xdp)) {
2081                         struct mlx5e_frag_page *pagep;
2082
2083                         /* sinfo->nr_frags is reset by build_skb, calculate again. */
2084                         xdp_update_skb_shared_info(skb, frag_page - head_page,
2085                                                    sinfo->xdp_frags_size, truesize,
2086                                                    xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
2087
2088                         pagep = head_page;
2089                         do
2090                                 pagep->frags++;
2091                         while (++pagep < frag_page);
2092                 }
2093                 __pskb_pull_tail(skb, headlen);
2094         } else {
2095                 dma_addr_t addr;
2096
2097                 if (xdp_buff_has_frags(&mxbuf.xdp)) {
2098                         struct mlx5e_frag_page *pagep;
2099
2100                         xdp_update_skb_shared_info(skb, sinfo->nr_frags,
2101                                                    sinfo->xdp_frags_size, truesize,
2102                                                    xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
2103
2104                         pagep = frag_page - sinfo->nr_frags;
2105                         do
2106                                 pagep->frags++;
2107                         while (++pagep < frag_page);
2108                 }
2109                 /* copy header */
2110                 addr = page_pool_get_dma_addr(head_page->page);
2111                 mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
2112                                       head_offset, head_offset, headlen);
2113                 /* skb linear part was allocated with headlen and aligned to long */
2114                 skb->tail += headlen;
2115                 skb->len  += headlen;
2116         }
2117
2118         return skb;
2119 }
2120
2121 static struct sk_buff *
2122 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2123                                 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
2124                                 u32 page_idx)
2125 {
2126         struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2127         u16 rx_headroom = rq->buff.headroom;
2128         struct bpf_prog *prog;
2129         struct sk_buff *skb;
2130         u32 metasize = 0;
2131         void *va, *data;
2132         dma_addr_t addr;
2133         u32 frag_size;
2134
2135         /* Check packet size. Note LRO doesn't use linear SKB */
2136         if (unlikely(cqe_bcnt > rq->hw_mtu)) {
2137                 rq->stats->oversize_pkts_sw_drop++;
2138                 return NULL;
2139         }
2140
2141         va             = page_address(frag_page->page) + head_offset;
2142         data           = va + rx_headroom;
2143         frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2144
2145         addr = page_pool_get_dma_addr(frag_page->page);
2146         dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
2147                                       frag_size, rq->buff.map_dir);
2148         net_prefetch(data);
2149
2150         prog = rcu_dereference(rq->xdp_prog);
2151         if (prog) {
2152                 struct mlx5e_xdp_buff mxbuf;
2153
2154                 net_prefetchw(va); /* xdp_frame data area */
2155                 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
2156                                  cqe_bcnt, &mxbuf);
2157                 if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
2158                         if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2159                                 frag_page->frags++;
2160                         return NULL; /* page/packet was consumed by XDP */
2161                 }
2162
2163                 rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
2164                 metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
2165                 cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
2166         }
2167         frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2168         skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
2169         if (unlikely(!skb))
2170                 return NULL;
2171
2172         /* queue up for recycling/reuse */
2173         skb_mark_for_recycle(skb);
2174         frag_page->frags++;
2175
2176         return skb;
2177 }
2178
2179 static struct sk_buff *
2180 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2181                           struct mlx5_cqe64 *cqe, u16 header_index)
2182 {
2183         struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
2184         u16 head_offset = head->addr & (PAGE_SIZE - 1);
2185         u16 head_size = cqe->shampo.header_size;
2186         u16 rx_headroom = rq->buff.headroom;
2187         struct sk_buff *skb = NULL;
2188         void *hdr, *data;
2189         u32 frag_size;
2190
2191         hdr             = page_address(head->frag_page->page) + head_offset;
2192         data            = hdr + rx_headroom;
2193         frag_size       = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
2194
2195         if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
2196                 /* build SKB around header */
2197                 dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
2198                 prefetchw(hdr);
2199                 prefetch(data);
2200                 skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
2201
2202                 if (unlikely(!skb))
2203                         return NULL;
2204
2205                 head->frag_page->frags++;
2206         } else {
2207                 /* allocate SKB and copy header for large header */
2208                 rq->stats->gro_large_hds++;
2209                 skb = napi_alloc_skb(rq->cq.napi,
2210                                      ALIGN(head_size, sizeof(long)));
2211                 if (unlikely(!skb)) {
2212                         rq->stats->buff_alloc_err++;
2213                         return NULL;
2214                 }
2215
2216                 prefetchw(skb->data);
2217                 mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
2218                                       head_offset + rx_headroom,
2219                                       rx_headroom, head_size);
2220                 /* skb linear part was allocated with headlen and aligned to long */
2221                 skb->tail += head_size;
2222                 skb->len  += head_size;
2223         }
2224
2225         /* queue up for recycling/reuse */
2226         skb_mark_for_recycle(skb);
2227
2228         return skb;
2229 }
2230
2231 static void
2232 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
2233 {
2234         skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2235         unsigned int frag_size = skb_frag_size(last_frag);
2236         unsigned int frag_truesize;
2237
2238         frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
2239         skb->truesize += frag_truesize - frag_size;
2240 }
2241
2242 static void
2243 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
2244 {
2245         struct sk_buff *skb = rq->hw_gro_data->skb;
2246         struct mlx5e_rq_stats *stats = rq->stats;
2247
2248         stats->gro_skbs++;
2249         if (likely(skb_shinfo(skb)->nr_frags))
2250                 mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
2251         if (NAPI_GRO_CB(skb)->count > 1)
2252                 mlx5e_shampo_update_hdr(rq, cqe, match);
2253         napi_gro_receive(rq->cq.napi, skb);
2254         rq->hw_gro_data->skb = NULL;
2255 }
2256
2257 static bool
2258 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
2259 {
2260         int nr_frags = skb_shinfo(skb)->nr_frags;
2261
2262         return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2263 }
2264
2265 static void
2266 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
2267 {
2268         struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
2269         u64 addr = shampo->info[header_index].addr;
2270
2271         if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
2272                 struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
2273
2274                 dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
2275                 mlx5e_page_release_fragmented(rq, dma_info->frag_page);
2276         }
2277         bitmap_clear(shampo->bitmap, header_index, 1);
2278 }
2279
2280 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2281 {
2282         u16 data_bcnt           = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
2283         u16 header_index        = mlx5e_shampo_get_cqe_header_index(rq, cqe);
2284         u32 wqe_offset          = be32_to_cpu(cqe->shampo.data_offset);
2285         u16 cstrides            = mpwrq_get_cqe_consumed_strides(cqe);
2286         u32 data_offset         = wqe_offset & (PAGE_SIZE - 1);
2287         u32 cqe_bcnt            = mpwrq_get_cqe_byte_cnt(cqe);
2288         u16 wqe_id              = be16_to_cpu(cqe->wqe_id);
2289         u32 page_idx            = wqe_offset >> PAGE_SHIFT;
2290         u16 head_size           = cqe->shampo.header_size;
2291         struct sk_buff **skb    = &rq->hw_gro_data->skb;
2292         bool flush              = cqe->shampo.flush;
2293         bool match              = cqe->shampo.match;
2294         struct mlx5e_rq_stats *stats = rq->stats;
2295         struct mlx5e_rx_wqe_ll *wqe;
2296         struct mlx5e_mpw_info *wi;
2297         struct mlx5_wq_ll *wq;
2298
2299         wi = mlx5e_get_mpw_info(rq, wqe_id);
2300         wi->consumed_strides += cstrides;
2301
2302         if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2303                 mlx5e_handle_rx_err_cqe(rq, cqe);
2304                 goto mpwrq_cqe_out;
2305         }
2306
2307         if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2308                 stats->mpwqe_filler_cqes++;
2309                 stats->mpwqe_filler_strides += cstrides;
2310                 goto mpwrq_cqe_out;
2311         }
2312
2313         stats->gro_match_packets += match;
2314
2315         if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
2316                 match = false;
2317                 mlx5e_shampo_flush_skb(rq, cqe, match);
2318         }
2319
2320         if (!*skb) {
2321                 if (likely(head_size))
2322                         *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2323                 else
2324                         *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
2325                                                                   data_offset, page_idx);
2326                 if (unlikely(!*skb))
2327                         goto free_hd_entry;
2328
2329                 NAPI_GRO_CB(*skb)->count = 1;
2330                 skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
2331         } else {
2332                 NAPI_GRO_CB(*skb)->count++;
2333                 if (NAPI_GRO_CB(*skb)->count == 2 &&
2334                     rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
2335                         void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
2336                         int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
2337                                     sizeof(struct iphdr);
2338                         struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
2339
2340                         rq->hw_gro_data->second_ip_id = ntohs(iph->id);
2341                 }
2342         }
2343
2344         if (likely(head_size)) {
2345                 struct mlx5e_frag_page *frag_page;
2346
2347                 frag_page = &wi->alloc_units.frag_pages[page_idx];
2348                 mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
2349         }
2350
2351         mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
2352         if (flush)
2353                 mlx5e_shampo_flush_skb(rq, cqe, match);
2354 free_hd_entry:
2355         mlx5e_free_rx_shampo_hd_entry(rq, header_index);
2356 mpwrq_cqe_out:
2357         if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2358                 return;
2359
2360         wq  = &rq->mpwqe.wq;
2361         wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2362         mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2363 }
2364
2365 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2366 {
2367         u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
2368         u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
2369         struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
2370         u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
2371         u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
2372         u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
2373         u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
2374         struct mlx5e_rx_wqe_ll *wqe;
2375         struct mlx5_wq_ll *wq;
2376         struct sk_buff *skb;
2377         u16 cqe_bcnt;
2378
2379         wi->consumed_strides += cstrides;
2380
2381         if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2382                 mlx5e_handle_rx_err_cqe(rq, cqe);
2383                 goto mpwrq_cqe_out;
2384         }
2385
2386         if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2387                 struct mlx5e_rq_stats *stats = rq->stats;
2388
2389                 stats->mpwqe_filler_cqes++;
2390                 stats->mpwqe_filler_strides += cstrides;
2391                 goto mpwrq_cqe_out;
2392         }
2393
2394         cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
2395
2396         skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
2397                               mlx5e_skb_from_cqe_mpwrq_linear,
2398                               mlx5e_skb_from_cqe_mpwrq_nonlinear,
2399                               mlx5e_xsk_skb_from_cqe_mpwrq_linear,
2400                               rq, wi, cqe, cqe_bcnt, head_offset,
2401                               page_idx);
2402         if (!skb)
2403                 goto mpwrq_cqe_out;
2404
2405         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2406
2407         if (mlx5e_cqe_regb_chain(cqe))
2408                 if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
2409                         dev_kfree_skb_any(skb);
2410                         goto mpwrq_cqe_out;
2411                 }
2412
2413         napi_gro_receive(rq->cq.napi, skb);
2414
2415 mpwrq_cqe_out:
2416         if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2417                 return;
2418
2419         wq  = &rq->mpwqe.wq;
2420         wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2421         mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2422 }
2423
2424 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
2425                                                  struct mlx5_cqwq *cqwq,
2426                                                  int budget_rem)
2427 {
2428         struct mlx5_cqe64 *cqe, *title_cqe = NULL;
2429         struct mlx5e_cq_decomp *cqd = &rq->cqd;
2430         int work_done = 0;
2431
2432         cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq);
2433         if (!cqe)
2434                 return work_done;
2435
2436         if (cqd->last_cqe_title &&
2437             (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
2438                 rq->stats->cqe_compress_blks++;
2439                 cqd->last_cqe_title = false;
2440         }
2441
2442         do {
2443                 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2444                         if (title_cqe) {
2445                                 mlx5e_read_enhanced_title_slot(rq, title_cqe);
2446                                 title_cqe = NULL;
2447                                 rq->stats->cqe_compress_blks++;
2448                         }
2449                         work_done +=
2450                                 mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
2451                                                               budget_rem - work_done);
2452                         continue;
2453                 }
2454                 title_cqe = cqe;
2455                 mlx5_cqwq_pop(cqwq);
2456
2457                 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2458                                 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2459                                 rq, cqe);
2460                 work_done++;
2461         } while (work_done < budget_rem &&
2462                  (cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)));
2463
2464         /* last cqe might be title on next poll bulk */
2465         if (title_cqe) {
2466                 mlx5e_read_enhanced_title_slot(rq, title_cqe);
2467                 cqd->last_cqe_title = true;
2468         }
2469
2470         return work_done;
2471 }
2472
2473 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
2474                                               struct mlx5_cqwq *cqwq,
2475                                               int budget_rem)
2476 {
2477         struct mlx5_cqe64 *cqe;
2478         int work_done = 0;
2479
2480         if (rq->cqd.left)
2481                 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
2482
2483         while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
2484                 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2485                         work_done +=
2486                                 mlx5e_decompress_cqes_start(rq, cqwq,
2487                                                             budget_rem - work_done);
2488                         continue;
2489                 }
2490
2491                 mlx5_cqwq_pop(cqwq);
2492                 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2493                                 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2494                                 rq, cqe);
2495                 work_done++;
2496         }
2497
2498         return work_done;
2499 }
2500
2501 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2502 {
2503         struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2504         struct mlx5_cqwq *cqwq = &cq->wq;
2505         int work_done;
2506
2507         if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
2508                 return 0;
2509
2510         if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
2511                 work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
2512                                                                   budget);
2513         else
2514                 work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
2515                                                                budget);
2516
2517         if (work_done == 0)
2518                 return 0;
2519
2520         if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
2521                 mlx5e_shampo_flush_skb(rq, NULL, false);
2522
2523         if (rcu_access_pointer(rq->xdp_prog))
2524                 mlx5e_xdp_rx_poll_complete(rq);
2525
2526         mlx5_cqwq_update_db_record(cqwq);
2527
2528         /* ensure cq space is freed before enabling more cqes */
2529         wmb();
2530
2531         return work_done;
2532 }
2533
2534 #ifdef CONFIG_MLX5_CORE_IPOIB
2535
2536 #define MLX5_IB_GRH_SGID_OFFSET 8
2537 #define MLX5_IB_GRH_DGID_OFFSET 24
2538 #define MLX5_GID_SIZE           16
2539
2540 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
2541                                          struct mlx5_cqe64 *cqe,
2542                                          u32 cqe_bcnt,
2543                                          struct sk_buff *skb)
2544 {
2545         struct hwtstamp_config *tstamp;
2546         struct mlx5e_rq_stats *stats;
2547         struct net_device *netdev;
2548         struct mlx5e_priv *priv;
2549         char *pseudo_header;
2550         u32 flags_rqpn;
2551         u32 qpn;
2552         u8 *dgid;
2553         u8 g;
2554
2555         qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
2556         netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
2557
2558         /* No mapping present, cannot process SKB. This might happen if a child
2559          * interface is going down while having unprocessed CQEs on parent RQ
2560          */
2561         if (unlikely(!netdev)) {
2562                 /* TODO: add drop counters support */
2563                 skb->dev = NULL;
2564                 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
2565                 return;
2566         }
2567
2568         priv = mlx5i_epriv(netdev);
2569         tstamp = &priv->tstamp;
2570         stats = &priv->channel_stats[rq->ix]->rq;
2571
2572         flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
2573         g = (flags_rqpn >> 28) & 3;
2574         dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
2575         if ((!g) || dgid[0] != 0xff)
2576                 skb->pkt_type = PACKET_HOST;
2577         else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
2578                 skb->pkt_type = PACKET_BROADCAST;
2579         else
2580                 skb->pkt_type = PACKET_MULTICAST;
2581
2582         /* Drop packets that this interface sent, ie multicast packets
2583          * that the HCA has replicated.
2584          */
2585         if (g && (qpn == (flags_rqpn & 0xffffff)) &&
2586             (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
2587                     MLX5_GID_SIZE) == 0)) {
2588                 skb->dev = NULL;
2589                 return;
2590         }
2591
2592         skb_pull(skb, MLX5_IB_GRH_BYTES);
2593
2594         skb->protocol = *((__be16 *)(skb->data));
2595
2596         if (netdev->features & NETIF_F_RXCSUM) {
2597                 skb->ip_summed = CHECKSUM_COMPLETE;
2598                 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
2599                 stats->csum_complete++;
2600         } else {
2601                 skb->ip_summed = CHECKSUM_NONE;
2602                 stats->csum_none++;
2603         }
2604
2605         if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
2606                 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
2607                                                                   rq->clock, get_cqe_ts(cqe));
2608         skb_record_rx_queue(skb, rq->ix);
2609
2610         if (likely(netdev->features & NETIF_F_RXHASH))
2611                 mlx5e_skb_set_hash(cqe, skb);
2612
2613         /* 20 bytes of ipoib header and 4 for encap existing */
2614         pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
2615         memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
2616         skb_reset_mac_header(skb);
2617         skb_pull(skb, MLX5_IPOIB_HARD_LEN);
2618
2619         skb->dev = netdev;
2620
2621         stats->packets++;
2622         stats->bytes += cqe_bcnt;
2623 }
2624
2625 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2626 {
2627         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2628         struct mlx5e_wqe_frag_info *wi;
2629         struct sk_buff *skb;
2630         u32 cqe_bcnt;
2631         u16 ci;
2632
2633         ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2634         wi       = get_frag(rq, ci);
2635         cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2636
2637         if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2638                 rq->stats->wqe_err++;
2639                 goto wq_cyc_pop;
2640         }
2641
2642         skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
2643                               mlx5e_skb_from_cqe_linear,
2644                               mlx5e_skb_from_cqe_nonlinear,
2645                               rq, wi, cqe, cqe_bcnt);
2646         if (!skb)
2647                 goto wq_cyc_pop;
2648
2649         mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2650         if (unlikely(!skb->dev)) {
2651                 dev_kfree_skb_any(skb);
2652                 goto wq_cyc_pop;
2653         }
2654         napi_gro_receive(rq->cq.napi, skb);
2655
2656 wq_cyc_pop:
2657         mlx5_wq_cyc_pop(wq);
2658 }
2659
2660 const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
2661         .handle_rx_cqe       = mlx5i_handle_rx_cqe,
2662         .handle_rx_cqe_mpwqe = NULL, /* Not supported */
2663 };
2664 #endif /* CONFIG_MLX5_CORE_IPOIB */
2665
2666 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
2667 {
2668         struct net_device *netdev = rq->netdev;
2669         struct mlx5_core_dev *mdev = rq->mdev;
2670         struct mlx5e_priv *priv = rq->priv;
2671
2672         switch (rq->wq_type) {
2673         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2674                 rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
2675                         mlx5e_xsk_skb_from_cqe_mpwrq_linear :
2676                         mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
2677                                 mlx5e_skb_from_cqe_mpwrq_linear :
2678                                 mlx5e_skb_from_cqe_mpwrq_nonlinear;
2679                 rq->post_wqes = mlx5e_post_rx_mpwqes;
2680                 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
2681
2682                 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
2683                         rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
2684                         if (!rq->handle_rx_cqe) {
2685                                 netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
2686                                 return -EINVAL;
2687                         }
2688                 } else {
2689                         rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
2690                         if (!rq->handle_rx_cqe) {
2691                                 netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
2692                                 return -EINVAL;
2693                         }
2694                 }
2695
2696                 break;
2697         default: /* MLX5_WQ_TYPE_CYCLIC */
2698                 rq->wqe.skb_from_cqe = xsk ?
2699                         mlx5e_xsk_skb_from_cqe_linear :
2700                         mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
2701                                 mlx5e_skb_from_cqe_linear :
2702                                 mlx5e_skb_from_cqe_nonlinear;
2703                 rq->post_wqes = mlx5e_post_rx_wqes;
2704                 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2705                 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
2706                 if (!rq->handle_rx_cqe) {
2707                         netdev_err(netdev, "RX handler of RQ is not set\n");
2708                         return -EINVAL;
2709                 }
2710         }
2711
2712         return 0;
2713 }
2714
2715 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2716 {
2717         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2718         struct mlx5e_wqe_frag_info *wi;
2719         struct sk_buff *skb;
2720         u32 cqe_bcnt;
2721         u16 trap_id;
2722         u16 ci;
2723
2724         trap_id  = get_cqe_flow_tag(cqe);
2725         ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2726         wi       = get_frag(rq, ci);
2727         cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2728
2729         if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2730                 rq->stats->wqe_err++;
2731                 goto wq_cyc_pop;
2732         }
2733
2734         skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
2735         if (!skb)
2736                 goto wq_cyc_pop;
2737
2738         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2739         skb_push(skb, ETH_HLEN);
2740
2741         mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
2742                                  rq->netdev->devlink_port);
2743         dev_kfree_skb_any(skb);
2744
2745 wq_cyc_pop:
2746         mlx5_wq_cyc_pop(wq);
2747 }
2748
2749 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
2750 {
2751         rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
2752                                mlx5e_skb_from_cqe_linear :
2753                                mlx5e_skb_from_cqe_nonlinear;
2754         rq->post_wqes = mlx5e_post_rx_wqes;
2755         rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2756         rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
2757 }