1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
4 #include <linux/bpf_trace.h>
5 #include <linux/netdevice.h>
6 #include <linux/overflow.h>
7 #include <linux/sizes.h>
8 #include <linux/bitfield.h>
10 #include "../nfp_app.h"
11 #include "../nfp_net.h"
12 #include "../nfp_net_dp.h"
13 #include "../crypto/crypto.h"
14 #include "../crypto/fw.h"
17 static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
19 return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2);
22 static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
24 return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT);
27 static void nfp_nfdk_tx_ring_stop(struct netdev_queue *nd_q,
28 struct nfp_net_tx_ring *tx_ring)
30 netif_tx_stop_queue(nd_q);
32 /* We can race with the TX completion out of NAPI so recheck */
34 if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring)))
35 netif_tx_start_queue(nd_q);
39 nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
42 u32 segs, hdrlen, l3_offset, l4_offset;
43 struct nfp_nfdk_tx_desc txd;
46 if (!skb->encapsulation) {
47 l3_offset = skb_network_offset(skb);
48 l4_offset = skb_transport_offset(skb);
49 hdrlen = skb_tcp_all_headers(skb);
51 l3_offset = skb_inner_network_offset(skb);
52 l4_offset = skb_inner_transport_offset(skb);
53 hdrlen = skb_inner_tcp_all_headers(skb);
56 segs = skb_shinfo(skb)->gso_segs;
57 mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
59 /* Note: TSO of the packet with metadata prepended to skb is not
60 * supported yet, in which case l3/l4_offset and lso_hdrlen need
61 * be correctly handled here.
63 * The driver doesn't have md_bytes easily available at this point.
64 * The PCI.IN PD ME won't have md_bytes bytes to add to lso_hdrlen,
65 * so it needs the full length there. The app MEs might prefer
66 * l3_offset and l4_offset relative to the start of packet data,
67 * but could probably cope with it being relative to the CTM buf
70 txd.l3_offset = l3_offset;
71 txd.l4_offset = l4_offset;
73 txd.mss = cpu_to_le16(mss);
74 txd.lso_hdrlen = hdrlen;
75 txd.lso_totsegs = segs;
77 txbuf->pkt_cnt = segs;
78 txbuf->real_len = skb->len + hdrlen * (txbuf->pkt_cnt - 1);
80 u64_stats_update_begin(&r_vec->tx_sync);
82 u64_stats_update_end(&r_vec->tx_sync);
88 nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
89 unsigned int pkt_cnt, struct sk_buff *skb, u64 flags)
91 struct ipv6hdr *ipv6h;
94 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
97 if (skb->ip_summed != CHECKSUM_PARTIAL)
100 flags |= NFDK_DESC_TX_L4_CSUM;
102 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
103 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
105 /* L3 checksum offloading flag is not required for ipv6 */
106 if (iph->version == 4) {
107 flags |= NFDK_DESC_TX_L3_CSUM;
108 } else if (ipv6h->version != 6) {
109 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
113 u64_stats_update_begin(&r_vec->tx_sync);
114 if (!skb->encapsulation) {
115 r_vec->hw_csum_tx += pkt_cnt;
117 flags |= NFDK_DESC_TX_ENCAP;
118 r_vec->hw_csum_tx_inner += pkt_cnt;
120 u64_stats_update_end(&r_vec->tx_sync);
126 nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
127 unsigned int nr_frags, struct sk_buff *skb)
129 unsigned int n_descs, wr_p, nop_slots;
130 const skb_frag_t *frag, *fend;
131 struct nfp_nfdk_tx_desc *txd;
136 n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
138 frag = skb_shinfo(skb)->frags;
139 fend = frag + nr_frags;
140 for (; frag < fend; frag++)
141 n_descs += DIV_ROUND_UP(skb_frag_size(frag),
142 NFDK_TX_MAX_DATA_PER_DESC);
144 if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX)) {
145 if (skb_is_nonlinear(skb)) {
146 err = skb_linearize(skb);
154 /* Under count by 1 (don't count meta) for the round down to work out */
155 n_descs += !!skb_is_gso(skb);
157 if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
158 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
161 if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK)
167 wr_p = tx_ring->wr_p;
168 nop_slots = D_BLOCK_CPL(wr_p);
170 wr_idx = D_IDX(tx_ring, wr_p);
171 tx_ring->ktxbufs[wr_idx].skb = NULL;
172 txd = &tx_ring->ktxds[wr_idx];
174 memset(txd, 0, array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
176 tx_ring->data_pending = 0;
177 tx_ring->wr_p += nop_slots;
178 tx_ring->wr_ptr_add += nop_slots;
183 static int nfp_nfdk_prep_port_id(struct sk_buff *skb)
185 struct metadata_dst *md_dst = skb_metadata_dst(skb);
190 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
193 /* Note: Unsupported case when TSO a skb with metedata prepended.
194 * See the comments in `nfp_nfdk_tx_tso` for details.
196 if (unlikely(md_dst && skb_is_gso(skb)))
199 if (unlikely(skb_cow_head(skb, sizeof(md_dst->u.port_info.port_id))))
202 data = skb_push(skb, sizeof(md_dst->u.port_info.port_id));
203 put_unaligned_be32(md_dst->u.port_info.port_id, data);
205 return sizeof(md_dst->u.port_info.port_id);
209 nfp_nfdk_prep_tx_meta(struct nfp_app *app, struct sk_buff *skb,
210 struct nfp_net_r_vector *r_vec)
216 res = nfp_nfdk_prep_port_id(skb);
217 if (unlikely(res <= 0))
221 meta_id = NFP_NET_META_PORTID;
223 if (unlikely(skb_cow_head(skb, sizeof(meta_id))))
226 md_bytes += sizeof(meta_id);
228 meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
229 FIELD_PREP(NFDK_META_FIELDS, meta_id);
231 data = skb_push(skb, sizeof(meta_id));
232 put_unaligned_be32(meta_id, data);
234 return NFDK_DESC_TX_CHAIN_META;
238 * nfp_nfdk_tx() - Main transmit entry point
239 * @skb: SKB to transmit
240 * @netdev: netdev structure
242 * Return: NETDEV_TX_OK on success.
244 netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
246 struct nfp_net *nn = netdev_priv(netdev);
247 struct nfp_nfdk_tx_buf *txbuf, *etxbuf;
248 u32 cnt, tmp_dlen, dlen_type = 0;
249 struct nfp_net_tx_ring *tx_ring;
250 struct nfp_net_r_vector *r_vec;
251 const skb_frag_t *frag, *fend;
252 struct nfp_nfdk_tx_desc *txd;
253 unsigned int real_len, qidx;
254 unsigned int dma_len, type;
255 struct netdev_queue *nd_q;
256 struct nfp_net_dp *dp;
257 int nr_frags, wr_idx;
262 qidx = skb_get_queue_mapping(skb);
263 tx_ring = &dp->tx_rings[qidx];
264 r_vec = tx_ring->r_vec;
265 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
267 /* Don't bother counting frags, assume the worst */
268 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
269 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
270 qidx, tx_ring->wr_p, tx_ring->rd_p);
271 netif_tx_stop_queue(nd_q);
272 nfp_net_tx_xmit_more_flush(tx_ring);
273 u64_stats_update_begin(&r_vec->tx_sync);
275 u64_stats_update_end(&r_vec->tx_sync);
276 return NETDEV_TX_BUSY;
279 metadata = nfp_nfdk_prep_tx_meta(nn->app, skb, r_vec);
280 if (unlikely((int)metadata < 0))
283 nr_frags = skb_shinfo(skb)->nr_frags;
284 if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb))
288 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
289 txd = &tx_ring->ktxds[wr_idx];
290 txbuf = &tx_ring->ktxbufs[wr_idx];
292 dma_len = skb_headlen(skb);
294 type = NFDK_DESC_TX_TYPE_TSO;
295 else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
296 type = NFDK_DESC_TX_TYPE_SIMPLE;
298 type = NFDK_DESC_TX_TYPE_GATHER;
300 dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
301 if (dma_mapping_error(dp->dev, dma_addr))
307 txbuf->dma_addr = dma_addr;
310 /* FIELD_PREP() implicitly truncates to chunk */
312 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
313 FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
315 txd->dma_len_type = cpu_to_le16(dlen_type);
316 nfp_desc_set_dma_addr_48b(txd, dma_addr);
318 /* starts at bit 0 */
319 BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
321 /* Preserve the original dlen_type, this way below the EOP logic
324 tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
326 dma_addr += tmp_dlen + 1;
329 /* The rest of the data (if any) will be in larger dma descritors
330 * and is handled with the fragment loop.
332 frag = skb_shinfo(skb)->frags;
333 fend = frag + nr_frags;
336 while (dma_len > 0) {
338 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
340 txd->dma_len_type = cpu_to_le16(dlen_type);
341 nfp_desc_set_dma_addr_48b(txd, dma_addr);
343 dma_len -= dlen_type;
344 dma_addr += dlen_type + 1;
351 dma_len = skb_frag_size(frag);
352 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, dma_len,
354 if (dma_mapping_error(dp->dev, dma_addr))
357 txbuf->dma_addr = dma_addr;
363 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
365 if (!skb_is_gso(skb)) {
368 metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
369 txd->raw = cpu_to_le64(metadata);
372 /* lso desc should be placed after metadata desc */
373 (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
374 real_len = txbuf->real_len;
376 metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
377 txd->raw = cpu_to_le64(metadata);
382 cnt = txd - tx_ring->ktxds - wr_idx;
383 if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
384 round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
385 goto err_warn_overflow;
387 skb_tx_timestamp(skb);
389 tx_ring->wr_p += cnt;
390 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
391 tx_ring->data_pending += skb->len;
393 tx_ring->data_pending = 0;
395 if (nfp_nfdk_tx_ring_should_stop(tx_ring))
396 nfp_nfdk_tx_ring_stop(nd_q, tx_ring);
398 tx_ring->wr_ptr_add += cnt;
399 if (__netdev_tx_sent_queue(nd_q, real_len, netdev_xmit_more()))
400 nfp_net_tx_xmit_more_flush(tx_ring);
405 WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
406 wr_idx, skb_headlen(skb), nr_frags, cnt);
410 /* txbuf pointed to the next-to-use */
412 /* first txbuf holds the skb */
413 txbuf = &tx_ring->ktxbufs[wr_idx + 1];
414 if (txbuf < etxbuf) {
415 dma_unmap_single(dp->dev, txbuf->dma_addr,
416 skb_headlen(skb), DMA_TO_DEVICE);
420 frag = skb_shinfo(skb)->frags;
421 while (etxbuf < txbuf) {
422 dma_unmap_page(dp->dev, txbuf->dma_addr,
423 skb_frag_size(frag), DMA_TO_DEVICE);
429 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
431 nfp_net_tx_xmit_more_flush(tx_ring);
432 u64_stats_update_begin(&r_vec->tx_sync);
434 u64_stats_update_end(&r_vec->tx_sync);
435 dev_kfree_skb_any(skb);
440 * nfp_nfdk_tx_complete() - Handled completed TX packets
441 * @tx_ring: TX ring structure
442 * @budget: NAPI budget (only used as bool to determine if in NAPI context)
444 static void nfp_nfdk_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
446 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
447 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
448 u32 done_pkts = 0, done_bytes = 0;
449 struct nfp_nfdk_tx_buf *ktxbufs;
450 struct device *dev = dp->dev;
451 struct netdev_queue *nd_q;
455 rd_p = tx_ring->rd_p;
456 if (tx_ring->wr_p == rd_p)
459 /* Work out how many descriptors have been transmitted */
460 qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
462 if (qcp_rd_p == tx_ring->qcp_rd_p)
465 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
466 ktxbufs = tx_ring->ktxbufs;
469 const skb_frag_t *frag, *fend;
470 unsigned int size, n_descs = 1;
471 struct nfp_nfdk_tx_buf *txbuf;
474 txbuf = &ktxbufs[D_IDX(tx_ring, rd_p)];
480 n_descs = D_BLOCK_CPL(rd_p);
485 size = skb_headlen(skb);
486 n_descs += nfp_nfdk_headlen_to_segs(size);
487 dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
491 frag = skb_shinfo(skb)->frags;
492 fend = frag + skb_shinfo(skb)->nr_frags;
493 for (; frag < fend; frag++) {
494 size = skb_frag_size(frag);
495 n_descs += DIV_ROUND_UP(size,
496 NFDK_TX_MAX_DATA_PER_DESC);
497 dma_unmap_page(dev, txbuf->dma_addr,
498 skb_frag_size(frag), DMA_TO_DEVICE);
502 if (!skb_is_gso(skb)) {
503 done_bytes += skb->len;
506 done_bytes += txbuf->real_len;
507 done_pkts += txbuf->pkt_cnt;
511 napi_consume_skb(skb, budget);
517 tx_ring->rd_p = rd_p;
518 tx_ring->qcp_rd_p = qcp_rd_p;
520 u64_stats_update_begin(&r_vec->tx_sync);
521 r_vec->tx_bytes += done_bytes;
522 r_vec->tx_pkts += done_pkts;
523 u64_stats_update_end(&r_vec->tx_sync);
528 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
529 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
530 if (nfp_nfdk_tx_ring_should_wake(tx_ring)) {
531 /* Make sure TX thread will see updated tx_ring->rd_p */
534 if (unlikely(netif_tx_queue_stopped(nd_q)))
535 netif_tx_wake_queue(nd_q);
538 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
539 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
540 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
543 /* Receive processing */
545 nfp_nfdk_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
550 frag = napi_alloc_frag(dp->fl_bufsz);
556 page = dev_alloc_page();
559 frag = page_address(page);
562 *dma_addr = nfp_net_dma_map_rx(dp, frag);
563 if (dma_mapping_error(dp->dev, *dma_addr)) {
564 nfp_net_free_frag(frag, dp->xdp_prog);
565 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
573 * nfp_nfdk_rx_give_one() - Put mapped skb on the software and hardware rings
574 * @dp: NFP Net data path struct
575 * @rx_ring: RX ring structure
576 * @frag: page fragment buffer
577 * @dma_addr: DMA address of skb mapping
580 nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
581 struct nfp_net_rx_ring *rx_ring,
582 void *frag, dma_addr_t dma_addr)
586 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
588 nfp_net_dma_sync_dev_rx(dp, dma_addr);
590 /* Stash SKB and DMA address away */
591 rx_ring->rxbufs[wr_idx].frag = frag;
592 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
594 /* Fill freelist descriptor */
595 rx_ring->rxds[wr_idx].fld.reserved = 0;
596 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
597 nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
598 dma_addr + dp->rx_dma_off);
601 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
602 /* Update write pointer of the freelist queue. Make
603 * sure all writes are flushed before telling the hardware.
606 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
611 * nfp_nfdk_rx_ring_fill_freelist() - Give buffers from the ring to FW
612 * @dp: NFP Net data path struct
613 * @rx_ring: RX ring to fill
615 void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
616 struct nfp_net_rx_ring *rx_ring)
620 for (i = 0; i < rx_ring->cnt - 1; i++)
621 nfp_nfdk_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
622 rx_ring->rxbufs[i].dma_addr);
626 * nfp_nfdk_rx_csum_has_errors() - group check if rxd has any csum errors
627 * @flags: RX descriptor flags field in CPU byte order
629 static int nfp_nfdk_rx_csum_has_errors(u16 flags)
631 u16 csum_all_checked, csum_all_ok;
633 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
634 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
636 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
640 * nfp_nfdk_rx_csum() - set SKB checksum field based on RX descriptor flags
641 * @dp: NFP Net data path struct
642 * @r_vec: per-ring structure
643 * @rxd: Pointer to RX descriptor
644 * @meta: Parsed metadata prepend
645 * @skb: Pointer to SKB
648 nfp_nfdk_rx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
649 struct nfp_net_rx_desc *rxd, struct nfp_meta_parsed *meta,
652 skb_checksum_none_assert(skb);
654 if (!(dp->netdev->features & NETIF_F_RXCSUM))
657 if (meta->csum_type) {
658 skb->ip_summed = meta->csum_type;
659 skb->csum = meta->csum;
660 u64_stats_update_begin(&r_vec->rx_sync);
661 r_vec->hw_csum_rx_complete++;
662 u64_stats_update_end(&r_vec->rx_sync);
666 if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
667 u64_stats_update_begin(&r_vec->rx_sync);
668 r_vec->hw_csum_rx_error++;
669 u64_stats_update_end(&r_vec->rx_sync);
673 /* Assume that the firmware will never report inner CSUM_OK unless outer
674 * L4 headers were successfully parsed. FW will always report zero UDP
675 * checksum as CSUM_OK.
677 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
678 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
679 __skb_incr_checksum_unnecessary(skb);
680 u64_stats_update_begin(&r_vec->rx_sync);
681 r_vec->hw_csum_rx_ok++;
682 u64_stats_update_end(&r_vec->rx_sync);
685 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
686 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
687 __skb_incr_checksum_unnecessary(skb);
688 u64_stats_update_begin(&r_vec->rx_sync);
689 r_vec->hw_csum_rx_inner_ok++;
690 u64_stats_update_end(&r_vec->rx_sync);
695 nfp_nfdk_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
696 unsigned int type, __be32 *hash)
698 if (!(netdev->features & NETIF_F_RXHASH))
702 case NFP_NET_RSS_IPV4:
703 case NFP_NET_RSS_IPV6:
704 case NFP_NET_RSS_IPV6_EX:
705 meta->hash_type = PKT_HASH_TYPE_L3;
708 meta->hash_type = PKT_HASH_TYPE_L4;
712 meta->hash = get_unaligned_be32(hash);
716 nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
717 void *data, void *pkt, unsigned int pkt_len, int meta_len)
721 meta_info = get_unaligned_be32(data);
725 switch (meta_info & NFP_NET_META_FIELD_MASK) {
726 case NFP_NET_META_HASH:
727 meta_info >>= NFP_NET_META_FIELD_SIZE;
728 nfp_nfdk_set_hash(netdev, meta,
729 meta_info & NFP_NET_META_FIELD_MASK,
733 case NFP_NET_META_MARK:
734 meta->mark = get_unaligned_be32(data);
737 case NFP_NET_META_PORTID:
738 meta->portid = get_unaligned_be32(data);
741 case NFP_NET_META_CSUM:
742 meta->csum_type = CHECKSUM_COMPLETE;
744 (__force __wsum)__get_unaligned_cpu32(data);
747 case NFP_NET_META_RESYNC_INFO:
748 if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
751 data += sizeof(struct nfp_net_tls_resync_req);
757 meta_info >>= NFP_NET_META_FIELD_SIZE;
764 nfp_nfdk_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
765 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
768 u64_stats_update_begin(&r_vec->rx_sync);
770 /* If we have both skb and rxbuf the replacement buffer allocation
771 * must have failed, count this as an alloc failure.
774 r_vec->rx_replace_buf_alloc_fail++;
775 u64_stats_update_end(&r_vec->rx_sync);
777 /* skb is build based on the frag, free_skb() would free the frag
778 * so to be able to reuse it we need an extra ref.
780 if (skb && rxbuf && skb->head == rxbuf->frag)
781 page_ref_inc(virt_to_head_page(rxbuf->frag));
783 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
785 dev_kfree_skb_any(skb);
788 static bool nfp_nfdk_xdp_complete(struct nfp_net_tx_ring *tx_ring)
790 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
791 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
792 struct nfp_net_rx_ring *rx_ring;
793 u32 qcp_rd_p, done = 0;
797 /* Work out how many descriptors have been transmitted */
798 qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
799 if (qcp_rd_p == tx_ring->qcp_rd_p)
802 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
804 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
805 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
807 rx_ring = r_vec->rx_ring;
809 int idx = D_IDX(tx_ring, tx_ring->rd_p + done);
810 struct nfp_nfdk_tx_buf *txbuf;
811 unsigned int step = 1;
813 txbuf = &tx_ring->ktxbufs[idx];
817 if (NFDK_TX_BUF_INFO(txbuf->val) != NFDK_TX_BUF_INFO_SOP) {
818 WARN_ONCE(1, "Unexpected TX buffer in XDP TX ring\n");
822 /* Two successive txbufs are used to stash virtual and dma
823 * address respectively, recycle and clean them here.
825 nfp_nfdk_rx_give_one(dp, rx_ring,
826 (void *)NFDK_TX_BUF_PTR(txbuf[0].val),
832 u64_stats_update_begin(&r_vec->tx_sync);
833 /* Note: tx_bytes not accumulated. */
835 u64_stats_update_end(&r_vec->tx_sync);
841 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + done);
842 tx_ring->rd_p += done;
844 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
845 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
846 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
852 nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
853 struct nfp_net_tx_ring *tx_ring,
854 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
855 unsigned int pkt_len, bool *completed)
857 unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
858 unsigned int dma_len, type, cnt, dlen_type, tmp_dlen;
859 struct nfp_nfdk_tx_buf *txbuf;
860 struct nfp_nfdk_tx_desc *txd;
861 unsigned int n_descs;
865 /* Reject if xdp_adjust_tail grow packet beyond DMA area */
866 if (pkt_len + dma_off > dma_map_sz)
869 /* Make sure there's still at least one block available after
870 * aligning to block boundary, so that the txds used below
871 * won't wrap around the tx_ring.
873 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
875 nfp_nfdk_xdp_complete(tx_ring);
879 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
880 nfp_nfdk_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
886 /* Check if cross block boundary */
887 n_descs = nfp_nfdk_headlen_to_segs(pkt_len);
888 if ((round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
889 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) ||
890 ((u32)tx_ring->data_pending + pkt_len >
891 NFDK_TX_MAX_DATA_PER_BLOCK)) {
892 unsigned int nop_slots = D_BLOCK_CPL(tx_ring->wr_p);
894 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
895 txd = &tx_ring->ktxds[wr_idx];
897 array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
899 tx_ring->data_pending = 0;
900 tx_ring->wr_p += nop_slots;
901 tx_ring->wr_ptr_add += nop_slots;
904 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
906 txbuf = &tx_ring->ktxbufs[wr_idx];
908 txbuf[0].val = (unsigned long)rxbuf->frag | NFDK_TX_BUF_INFO_SOP;
909 txbuf[1].dma_addr = rxbuf->dma_addr;
910 /* Note: pkt len not stored */
912 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
913 pkt_len, DMA_BIDIRECTIONAL);
915 /* Build TX descriptor */
916 txd = &tx_ring->ktxds[wr_idx];
918 dma_addr = rxbuf->dma_addr + dma_off;
920 if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
921 type = NFDK_DESC_TX_TYPE_SIMPLE;
923 type = NFDK_DESC_TX_TYPE_GATHER;
925 /* FIELD_PREP() implicitly truncates to chunk */
927 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
928 FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
930 txd->dma_len_type = cpu_to_le16(dlen_type);
931 nfp_desc_set_dma_addr_48b(txd, dma_addr);
933 tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
935 dma_addr += tmp_dlen + 1;
938 while (dma_len > 0) {
940 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
941 txd->dma_len_type = cpu_to_le16(dlen_type);
942 nfp_desc_set_dma_addr_48b(txd, dma_addr);
944 dlen_type &= NFDK_DESC_TX_DMA_LEN;
945 dma_len -= dlen_type;
946 dma_addr += dlen_type + 1;
950 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
956 cnt = txd - tx_ring->ktxds - wr_idx;
957 tx_ring->wr_p += cnt;
958 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
959 tx_ring->data_pending += pkt_len;
961 tx_ring->data_pending = 0;
963 tx_ring->wr_ptr_add += cnt;
968 * nfp_nfdk_rx() - receive up to @budget packets on @rx_ring
969 * @rx_ring: RX ring to receive from
970 * @budget: NAPI budget
972 * Note, this function is separated out from the napi poll function to
973 * more cleanly separate packet receive code from other bookkeeping
974 * functions performed in the napi poll function.
976 * Return: Number of packets received.
978 static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
980 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
981 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
982 struct nfp_net_tx_ring *tx_ring;
983 struct bpf_prog *xdp_prog;
984 bool xdp_tx_cmpl = false;
985 unsigned int true_bufsz;
991 xdp_prog = READ_ONCE(dp->xdp_prog);
992 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
993 xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
995 tx_ring = r_vec->xdp_ring;
997 while (pkts_polled < budget) {
998 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
999 struct nfp_net_rx_buf *rxbuf;
1000 struct nfp_net_rx_desc *rxd;
1001 struct nfp_meta_parsed meta;
1002 bool redir_egress = false;
1003 struct net_device *netdev;
1004 dma_addr_t new_dma_addr;
1005 u32 meta_len_xdp = 0;
1008 idx = D_IDX(rx_ring, rx_ring->rd_p);
1010 rxd = &rx_ring->rxds[idx];
1011 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1014 /* Memory barrier to ensure that we won't do other reads
1015 * before the DD bit.
1019 memset(&meta, 0, sizeof(meta));
1024 rxbuf = &rx_ring->rxbufs[idx];
1026 * <-- [rx_offset] -->
1027 * ---------------------------------------------------------
1028 * | [XX] | metadata | packet | XXXX |
1029 * ---------------------------------------------------------
1030 * <---------------- data_len --------------->
1032 * The rx_offset is fixed for all packets, the meta_len can vary
1033 * on a packet by packet basis. If rx_offset is set to zero
1034 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1035 * buffer and is immediately followed by the packet (no [XX]).
1037 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1038 data_len = le16_to_cpu(rxd->rxd.data_len);
1039 pkt_len = data_len - meta_len;
1041 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1042 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1043 pkt_off += meta_len;
1045 pkt_off += dp->rx_offset;
1046 meta_off = pkt_off - meta_len;
1049 u64_stats_update_begin(&r_vec->rx_sync);
1051 r_vec->rx_bytes += pkt_len;
1052 u64_stats_update_end(&r_vec->rx_sync);
1054 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1055 (dp->rx_offset && meta_len > dp->rx_offset))) {
1056 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1058 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1062 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1066 if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta,
1067 rxbuf->frag + meta_off,
1068 rxbuf->frag + pkt_off,
1069 pkt_len, meta_len))) {
1070 nn_dp_warn(dp, "invalid RX packet metadata\n");
1071 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
1077 if (xdp_prog && !meta.portid) {
1078 void *orig_data = rxbuf->frag + pkt_off;
1079 unsigned int dma_off;
1082 xdp_prepare_buff(&xdp,
1083 rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
1084 pkt_off - NFP_NET_RX_BUF_HEADROOM,
1087 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1089 pkt_len = xdp.data_end - xdp.data;
1090 pkt_off += xdp.data - orig_data;
1094 meta_len_xdp = xdp.data - xdp.data_meta;
1097 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1098 if (unlikely(!nfp_nfdk_tx_xdp_buf(dp, rx_ring,
1104 trace_xdp_exception(dp->netdev,
1108 bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
1111 trace_xdp_exception(dp->netdev, xdp_prog, act);
1114 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
1120 if (likely(!meta.portid)) {
1121 netdev = dp->netdev;
1122 } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
1123 struct nfp_net *nn = netdev_priv(dp->netdev);
1125 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
1127 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
1133 nn = netdev_priv(dp->netdev);
1134 netdev = nfp_app_dev_get(nn->app, meta.portid,
1136 if (unlikely(!netdev)) {
1137 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
1142 if (nfp_netdev_is_nfp_repr(netdev))
1143 nfp_repr_inc_rx_stats(netdev, pkt_len);
1146 skb = build_skb(rxbuf->frag, true_bufsz);
1147 if (unlikely(!skb)) {
1148 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1151 new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
1152 if (unlikely(!new_frag)) {
1153 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1157 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1159 nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1161 skb_reserve(skb, pkt_off);
1162 skb_put(skb, pkt_len);
1164 skb->mark = meta.mark;
1165 skb_set_hash(skb, meta.hash, meta.hash_type);
1167 skb_record_rx_queue(skb, rx_ring->idx);
1168 skb->protocol = eth_type_trans(skb, netdev);
1170 nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
1172 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1173 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1174 le16_to_cpu(rxd->rxd.vlan));
1176 skb_metadata_set(skb, meta_len_xdp);
1178 if (likely(!redir_egress)) {
1179 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1182 skb_reset_network_header(skb);
1183 __skb_push(skb, ETH_HLEN);
1184 dev_queue_xmit(skb);
1189 if (tx_ring->wr_ptr_add)
1190 nfp_net_tx_xmit_more_flush(tx_ring);
1191 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1193 if (!nfp_nfdk_xdp_complete(tx_ring))
1194 pkts_polled = budget;
1201 * nfp_nfdk_poll() - napi poll function
1202 * @napi: NAPI structure
1203 * @budget: NAPI budget
1205 * Return: number of packets polled.
1207 int nfp_nfdk_poll(struct napi_struct *napi, int budget)
1209 struct nfp_net_r_vector *r_vec =
1210 container_of(napi, struct nfp_net_r_vector, napi);
1211 unsigned int pkts_polled = 0;
1214 nfp_nfdk_tx_complete(r_vec->tx_ring, budget);
1216 pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget);
1218 if (pkts_polled < budget)
1219 if (napi_complete_done(napi, pkts_polled))
1220 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1222 if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
1223 struct dim_sample dim_sample = {};
1228 start = u64_stats_fetch_begin(&r_vec->rx_sync);
1229 pkts = r_vec->rx_pkts;
1230 bytes = r_vec->rx_bytes;
1231 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
1233 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
1234 net_dim(&r_vec->rx_dim, dim_sample);
1237 if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
1238 struct dim_sample dim_sample = {};
1243 start = u64_stats_fetch_begin(&r_vec->tx_sync);
1244 pkts = r_vec->tx_pkts;
1245 bytes = r_vec->tx_bytes;
1246 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
1248 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
1249 net_dim(&r_vec->tx_dim, dim_sample);
1255 /* Control device data path
1259 nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1260 struct sk_buff *skb, bool old)
1262 u32 cnt, tmp_dlen, dlen_type = 0;
1263 struct nfp_net_tx_ring *tx_ring;
1264 struct nfp_nfdk_tx_buf *txbuf;
1265 struct nfp_nfdk_tx_desc *txd;
1266 unsigned int dma_len, type;
1267 struct nfp_net_dp *dp;
1268 dma_addr_t dma_addr;
1272 dp = &r_vec->nfp_net->dp;
1273 tx_ring = r_vec->tx_ring;
1275 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1276 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1280 /* Don't bother counting frags, assume the worst */
1281 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
1282 u64_stats_update_begin(&r_vec->tx_sync);
1284 u64_stats_update_end(&r_vec->tx_sync);
1286 __skb_queue_tail(&r_vec->queue, skb);
1288 __skb_queue_head(&r_vec->queue, skb);
1289 return NETDEV_TX_BUSY;
1292 if (nfp_app_ctrl_has_meta(nn->app)) {
1293 if (unlikely(skb_headroom(skb) < 8)) {
1294 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1297 metadata = NFDK_DESC_TX_CHAIN_META;
1298 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1299 put_unaligned_be32(FIELD_PREP(NFDK_META_LEN, 8) |
1300 FIELD_PREP(NFDK_META_FIELDS,
1301 NFP_NET_META_PORTID),
1305 if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb))
1309 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1310 txd = &tx_ring->ktxds[wr_idx];
1311 txbuf = &tx_ring->ktxbufs[wr_idx];
1313 dma_len = skb_headlen(skb);
1314 if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
1315 type = NFDK_DESC_TX_TYPE_SIMPLE;
1317 type = NFDK_DESC_TX_TYPE_GATHER;
1319 dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
1320 if (dma_mapping_error(dp->dev, dma_addr))
1326 txbuf->dma_addr = dma_addr;
1330 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
1331 FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
1333 txd->dma_len_type = cpu_to_le16(dlen_type);
1334 nfp_desc_set_dma_addr_48b(txd, dma_addr);
1336 tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
1337 dma_len -= tmp_dlen;
1338 dma_addr += tmp_dlen + 1;
1341 while (dma_len > 0) {
1343 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
1344 txd->dma_len_type = cpu_to_le16(dlen_type);
1345 nfp_desc_set_dma_addr_48b(txd, dma_addr);
1347 dlen_type &= NFDK_DESC_TX_DMA_LEN;
1348 dma_len -= dlen_type;
1349 dma_addr += dlen_type + 1;
1353 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
1356 txd->raw = cpu_to_le64(metadata);
1359 cnt = txd - tx_ring->ktxds - wr_idx;
1360 if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
1361 round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
1362 goto err_warn_overflow;
1364 tx_ring->wr_p += cnt;
1365 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
1366 tx_ring->data_pending += skb->len;
1368 tx_ring->data_pending = 0;
1370 tx_ring->wr_ptr_add += cnt;
1371 nfp_net_tx_xmit_more_flush(tx_ring);
1373 return NETDEV_TX_OK;
1376 WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
1377 wr_idx, skb_headlen(skb), 0, cnt);
1379 dma_unmap_single(dp->dev, txbuf->dma_addr,
1380 skb_headlen(skb), DMA_TO_DEVICE);
1383 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
1385 u64_stats_update_begin(&r_vec->tx_sync);
1387 u64_stats_update_end(&r_vec->tx_sync);
1388 dev_kfree_skb_any(skb);
1389 return NETDEV_TX_OK;
1392 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
1394 struct sk_buff *skb;
1396 while ((skb = __skb_dequeue(&r_vec->queue)))
1397 if (nfp_nfdk_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
1402 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
1404 u32 meta_type, meta_tag;
1406 if (!nfp_app_ctrl_has_meta(nn->app))
1412 meta_type = get_unaligned_be32(data);
1413 meta_tag = get_unaligned_be32(data + 4);
1415 return (meta_type == NFP_NET_META_PORTID &&
1416 meta_tag == NFP_META_PORT_ID_CTRL);
1420 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
1421 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
1423 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1424 struct nfp_net_rx_buf *rxbuf;
1425 struct nfp_net_rx_desc *rxd;
1426 dma_addr_t new_dma_addr;
1427 struct sk_buff *skb;
1431 idx = D_IDX(rx_ring, rx_ring->rd_p);
1433 rxd = &rx_ring->rxds[idx];
1434 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1437 /* Memory barrier to ensure that we won't do other reads
1438 * before the DD bit.
1444 rxbuf = &rx_ring->rxbufs[idx];
1445 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1446 data_len = le16_to_cpu(rxd->rxd.data_len);
1447 pkt_len = data_len - meta_len;
1449 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1450 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1451 pkt_off += meta_len;
1453 pkt_off += dp->rx_offset;
1454 meta_off = pkt_off - meta_len;
1457 u64_stats_update_begin(&r_vec->rx_sync);
1459 r_vec->rx_bytes += pkt_len;
1460 u64_stats_update_end(&r_vec->rx_sync);
1462 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
1464 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
1465 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
1467 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1471 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
1472 if (unlikely(!skb)) {
1473 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1476 new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
1477 if (unlikely(!new_frag)) {
1478 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1482 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1484 nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1486 skb_reserve(skb, pkt_off);
1487 skb_put(skb, pkt_len);
1489 nfp_app_ctrl_rx(nn->app, skb);
1494 static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
1496 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
1497 struct nfp_net *nn = r_vec->nfp_net;
1498 struct nfp_net_dp *dp = &nn->dp;
1499 unsigned int budget = 512;
1501 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
1507 void nfp_nfdk_ctrl_poll(struct tasklet_struct *t)
1509 struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
1511 spin_lock(&r_vec->lock);
1512 nfp_nfdk_tx_complete(r_vec->tx_ring, 0);
1513 __nfp_ctrl_tx_queued(r_vec);
1514 spin_unlock(&r_vec->lock);
1516 if (nfp_ctrl_rx(r_vec)) {
1517 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1519 tasklet_schedule(&r_vec->tasklet);
1520 nn_dp_warn(&r_vec->nfp_net->dp,
1521 "control message budget exceeded!\n");